1 /* xircom_tulip_cb.c: A Xircom CBE-100 ethernet driver for Linux. */
3 Written/copyright 1994-1999 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
8 The author may be reached as becker@scyld.com, or C/O
9 Scyld Computing Corporation
10 410 Severn Ave., Suite 210
13 -----------------------------------------------------------
15 Linux kernel-specific changes:
21 - Rewrite perfect filter/hash code
22 - Use interrupts for media changes
25 - Disallow negotiation of unsupported full-duplex modes
28 #define DRV_NAME "xircom_tulip_cb"
29 #define DRV_VERSION "0.91+LK1.1"
30 #define DRV_RELDATE "October 11, 2001"
34 /* A few user-configurable values. */
36 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
37 static int max_interrupt_work = 25;
40 /* Used to pass the full-duplex flag, etc. */
41 static int full_duplex[MAX_UNITS];
42 static int options[MAX_UNITS];
43 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
45 /* Keep the ring sizes a power of two for efficiency.
46 Making the Tx ring too large decreases the effectiveness of channel
47 bonding and packet priority.
48 There are no ill effects from too-large receive rings. */
49 #define TX_RING_SIZE 16
50 #define RX_RING_SIZE 32
52 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
54 static int rx_copybreak = 1518;
56 static int rx_copybreak = 100;
60 Set the bus performance register.
61 Typical: Set 16 longword cache alignment, no burst limit.
62 Cache alignment bits 15:14 Burst length 13:8
63 0000 No alignment 0x00000000 unlimited 0800 8 longwords
64 4000 8 longwords 0100 1 longword 1000 16 longwords
65 8000 16 longwords 0200 2 longwords 2000 32 longwords
66 C000 32 longwords 0400 4 longwords
67 Warning: many older 486 systems are broken and require setting 0x00A04800
68 8 longword cache alignment, 8 longword burst.
69 ToDo: Non-Intel setting could be better.
72 #if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
73 static int csr0 = 0x01A00000 | 0xE000;
74 #elif defined(__powerpc__)
75 static int csr0 = 0x01B00000 | 0x8000;
76 #elif defined(__sparc__)
77 static int csr0 = 0x01B00080 | 0x8000;
78 #elif defined(__i386__)
79 static int csr0 = 0x01A00000 | 0x8000;
81 #warning Processor architecture undefined!
82 static int csr0 = 0x00A00000 | 0x4800;
85 /* Operational parameters that usually are not changed. */
86 /* Time in jiffies before concluding the transmitter is hung. */
87 #define TX_TIMEOUT (4 * HZ)
88 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89 #define PKT_SETUP_SZ 192 /* Size of the setup frame */
92 #define PCI_POWERMGMT 0x40
94 #include <linux/config.h>
95 #include <linux/module.h>
96 #include <linux/kernel.h>
97 #include <linux/pci.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/delay.h>
101 #include <linux/init.h>
102 #include <linux/mii.h>
103 #include <linux/ethtool.h>
104 #include <linux/crc32.h>
107 #include <asm/processor.h> /* Processor type for cache alignment. */
108 #include <asm/uaccess.h>
111 /* These identify the driver base version and may not be removed. */
112 static char version[] __devinitdata =
113 KERN_INFO DRV_NAME ".c derived from tulip.c:v0.91 4/14/99 becker@scyld.com\n"
114 KERN_INFO " unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE "\n";
116 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117 MODULE_DESCRIPTION("Xircom CBE-100 ethernet driver");
118 MODULE_LICENSE("GPL v2");
120 MODULE_PARM(debug, "i");
121 MODULE_PARM(max_interrupt_work, "i");
122 MODULE_PARM(rx_copybreak, "i");
123 MODULE_PARM(csr0, "i");
124 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
125 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
127 #define RUN_AT(x) (jiffies + (x))
129 #define xircom_debug debug
131 static int xircom_debug = XIRCOM_DEBUG;
133 static int xircom_debug = 1;
139 I. Board Compatibility
141 This device driver was forked from the driver for the DECchip "Tulip",
142 Digital's single-chip ethernet controllers for PCI. It supports Xircom's
143 almost-Tulip-compatible CBE-100 CardBus adapters.
145 II. Board-specific settings
147 PCI bus devices are configured by the system at boot time, so no jumpers
148 need to be set on the board. The system BIOS preferably should assign the
149 PCI INTA signal to an otherwise unused system IRQ line.
151 III. Driver operation
155 The Xircom can use either ring buffers or lists of Tx and Rx descriptors.
156 This driver uses statically allocated rings of Rx and Tx descriptors, set at
157 compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
158 for the Rx ring buffers at open() time and passes the skb->data field to the
159 Xircom as receive data buffers. When an incoming frame is less than
160 RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
161 copied to the new skbuff. When the incoming frame is larger, the skbuff is
162 passed directly up the protocol stack and replaced by a newly allocated
165 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
166 using a full-sized skbuff for small frames vs. the copying costs of larger
167 frames. For small frames the copying cost is negligible (esp. considering
168 that we are pre-loading the cache with immediately useful header
169 information). For large frames the copying cost is non-trivial, and the
170 larger copy might flush the cache of useful data. A subtle aspect of this
171 choice is that the Xircom only receives into longword aligned buffers, thus
172 the IP header at offset 14 isn't longword aligned for further processing.
173 Copied frames are put into the new skbuff at an offset of "+2", thus copying
174 has the beneficial effect of aligning the IP header and preloading the
177 IIIC. Synchronization
178 The driver runs as two independent, single-threaded flows of control. One
179 is the send-packet routine, which enforces single-threaded use by the
180 dev->tbusy flag. The other thread is the interrupt handler, which is single
181 threaded by the hardware and other software.
183 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
184 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
185 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
186 the 'tp->tx_full' flag.
188 The interrupt handler has exclusive control over the Rx ring and records stats
189 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
190 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
191 stats.) After reaping the stats, it marks the queue entry as empty by setting
192 the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
193 tx_full and tbusy flags.
199 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
200 http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
201 http://www.national.com/pf/DP/DP83840A.html
207 /* A full-duplex map for media types. */
209 MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
211 static const char media_cap[] =
212 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20 };
214 /* Offsets to the Command and Status Registers, "CSRs". All accesses
215 must be longword instructions and quadword aligned. */
216 enum xircom_offsets {
217 CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
218 CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
219 CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78, CSR16=0x04, };
221 /* The bits in the CSR5 status registers, mostly interrupt sources. */
223 LinkChange=0x08000000,
224 NormalIntr=0x10000, NormalIntrMask=0x00014045,
225 AbnormalIntr=0x8000, AbnormalIntrMask=0x0a00a5a2,
226 ReservedIntrMask=0xe0001a18,
227 EarlyRxIntr=0x4000, BusErrorIntr=0x2000,
228 EarlyTxIntr=0x400, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
229 TxFIFOUnderflow=0x20, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
232 enum csr0_control_bits {
233 EnableMWI=0x01000000, EnableMRL=0x00800000,
234 EnableMRM=0x00200000, EqualBusPrio=0x02,
238 enum csr6_control_bits {
239 ReceiveAllBit=0x40000000, AllMultiBit=0x80, PromiscBit=0x40,
240 HashFilterBit=0x01, FullDuplexBit=0x0200,
241 TxThresh10=0x400000, TxStoreForw=0x200000,
242 TxThreshMask=0xc000, TxThreshShift=14,
243 EnableTx=0x2000, EnableRx=0x02,
244 ReservedZeroMask=0x8d930134, ReservedOneMask=0x320c0000,
245 EnableTxRx=(EnableTx | EnableRx),
250 HAS_MII=1, HAS_ACPI=2,
252 static struct xircom_chip_table {
254 int valid_intrs; /* CSR7 interrupt enable settings */
257 { "Xircom Cardbus Adapter",
258 LinkChange | NormalIntr | AbnormalIntr | BusErrorIntr |
259 RxDied | RxNoBuf | RxIntr | TxFIFOUnderflow | TxNoBuf | TxDied | TxIntr,
260 HAS_MII | HAS_ACPI, },
263 /* This matches the table above. */
269 /* The Xircom Rx and Tx buffer descriptors. */
270 struct xircom_rx_desc {
273 u32 buffer1, buffer2;
276 struct xircom_tx_desc {
279 u32 buffer1, buffer2; /* We use only buffer 1. */
282 enum tx_desc0_status_bits {
283 Tx0DescOwned=0x80000000, Tx0DescError=0x8000, Tx0NoCarrier=0x0800,
284 Tx0LateColl=0x0200, Tx0ManyColl=0x0100, Tx0Underflow=0x02,
286 enum tx_desc1_status_bits {
287 Tx1ComplIntr=0x80000000, Tx1LastSeg=0x40000000, Tx1FirstSeg=0x20000000,
288 Tx1SetupPkt=0x08000000, Tx1DisableCRC=0x04000000, Tx1RingWrap=0x02000000,
289 Tx1ChainDesc=0x01000000, Tx1NoPad=0x800000, Tx1HashSetup=0x400000,
290 Tx1WholePkt=(Tx1FirstSeg | Tx1LastSeg),
292 enum rx_desc0_status_bits {
293 Rx0DescOwned=0x80000000, Rx0DescError=0x8000, Rx0NoSpace=0x4000,
294 Rx0Runt=0x0800, Rx0McastPkt=0x0400, Rx0FirstSeg=0x0200, Rx0LastSeg=0x0100,
295 Rx0HugeFrame=0x80, Rx0CRCError=0x02,
296 Rx0WholePkt=(Rx0FirstSeg | Rx0LastSeg),
298 enum rx_desc1_status_bits {
299 Rx1RingWrap=0x02000000, Rx1ChainDesc=0x01000000,
302 struct xircom_private {
303 struct xircom_rx_desc rx_ring[RX_RING_SIZE];
304 struct xircom_tx_desc tx_ring[TX_RING_SIZE];
305 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
306 struct sk_buff* tx_skbuff[TX_RING_SIZE];
308 /* The X3201-3 requires 4-byte aligned tx bufs */
309 struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
311 /* The addresses of receive-in-place skbuffs. */
312 struct sk_buff* rx_skbuff[RX_RING_SIZE];
313 u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
315 struct net_device_stats stats;
316 unsigned int cur_rx, cur_tx; /* The next free ring entry */
317 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
318 unsigned int tx_full:1; /* The Tx queue is full. */
319 unsigned int speed100:1;
320 unsigned int full_duplex:1; /* Full-duplex operation requested. */
321 unsigned int autoneg:1;
322 unsigned int default_port:4; /* Last dev->if_port value. */
324 unsigned int csr0; /* CSR0 setting. */
325 unsigned int csr6; /* Current CSR6 control settings. */
326 u16 to_advertise; /* NWay capabilities advertised. */
328 signed char phys[4], mii_cnt; /* MII device addresses. */
330 struct pci_dev *pdev;
334 static int mdio_read(struct net_device *dev, int phy_id, int location);
335 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
336 static void xircom_up(struct net_device *dev);
337 static void xircom_down(struct net_device *dev);
338 static int xircom_open(struct net_device *dev);
339 static void xircom_tx_timeout(struct net_device *dev);
340 static void xircom_init_ring(struct net_device *dev);
341 static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
342 static int xircom_rx(struct net_device *dev);
343 static void xircom_media_change(struct net_device *dev);
344 static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
345 static int xircom_close(struct net_device *dev);
346 static struct net_device_stats *xircom_get_stats(struct net_device *dev);
347 static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
348 static void set_rx_mode(struct net_device *dev);
349 static void check_duplex(struct net_device *dev);
350 static struct ethtool_ops ops;
353 /* The Xircom cards are picky about when certain bits in CSR6 can be
354 manipulated. Keith Owens <kaos@ocs.com.au>. */
355 static void outl_CSR6(u32 newcsr6, long ioaddr)
357 const int strict_bits =
358 TxThresh10 | TxStoreForw | TxThreshMask | EnableTxRx | FullDuplexBit;
359 int csr5, csr5_22_20, csr5_19_17, currcsr6, attempts = 200;
363 /* mask out the reserved bits that always read 0 on the Xircom cards */
364 newcsr6 &= ~ReservedZeroMask;
365 /* or in the reserved bits that always read 1 */
366 newcsr6 |= ReservedOneMask;
367 currcsr6 = inl(ioaddr + CSR6);
368 if (((newcsr6 & strict_bits) == (currcsr6 & strict_bits)) ||
369 ((currcsr6 & ~EnableTxRx) == 0)) {
370 outl(newcsr6, ioaddr + CSR6); /* safe */
371 restore_flags(flags);
374 /* make sure the transmitter and receiver are stopped first */
375 currcsr6 &= ~EnableTxRx;
377 csr5 = inl(ioaddr + CSR5);
378 if (csr5 == 0xffffffff)
379 break; /* cannot read csr5, card removed? */
380 csr5_22_20 = csr5 & 0x700000;
381 csr5_19_17 = csr5 & 0x0e0000;
382 if ((csr5_22_20 == 0 || csr5_22_20 == 0x600000) &&
383 (csr5_19_17 == 0 || csr5_19_17 == 0x80000 || csr5_19_17 == 0xc0000))
384 break; /* both are stopped or suspended */
386 printk(KERN_INFO DRV_NAME ": outl_CSR6 too many attempts,"
387 "csr5=0x%08x\n", csr5);
388 outl(newcsr6, ioaddr + CSR6); /* unsafe but do it anyway */
389 restore_flags(flags);
392 outl(currcsr6, ioaddr + CSR6);
395 /* now it is safe to change csr6 */
396 outl(newcsr6, ioaddr + CSR6);
397 restore_flags(flags);
401 static void __devinit read_mac_address(struct net_device *dev)
403 long ioaddr = dev->base_addr;
405 unsigned char tuple, link, data_id, data_count;
407 /* Xircom has its address stored in the CIS;
408 * we access it through the boot rom interface for now
409 * this might not work, as the CIS is not parsed but I
410 * (danilo) use the offset I found on my card's CIS !!!
412 * Doug Ledford: I changed this routine around so that it
413 * walks the CIS memory space, parsing the config items, and
414 * finds the proper lan_node_id tuple and uses the data
417 outl(1 << 12, ioaddr + CSR9); /* enable boot rom access */
418 for (i = 0x100; i < 0x1f7; i += link+2) {
419 outl(i, ioaddr + CSR10);
420 tuple = inl(ioaddr + CSR9) & 0xff;
421 outl(i + 1, ioaddr + CSR10);
422 link = inl(ioaddr + CSR9) & 0xff;
423 outl(i + 2, ioaddr + CSR10);
424 data_id = inl(ioaddr + CSR9) & 0xff;
425 outl(i + 3, ioaddr + CSR10);
426 data_count = inl(ioaddr + CSR9) & 0xff;
427 if ( (tuple == 0x22) &&
428 (data_id == 0x04) && (data_count == 0x06) ) {
430 * This is it. We have the data we want.
432 for (j = 0; j < 6; j++) {
433 outl(i + j + 4, ioaddr + CSR10);
434 dev->dev_addr[j] = inl(ioaddr + CSR9) & 0xff;
437 } else if (link == 0) {
445 * locate the MII interfaces and initialize them.
446 * we disable full-duplex modes here,
447 * because we don't know how to handle them.
449 static void find_mii_transceivers(struct net_device *dev)
451 struct xircom_private *tp = netdev_priv(dev);
454 if (media_cap[tp->default_port] & MediaIsMII) {
455 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
456 tp->to_advertise = media2advert[tp->default_port - 9];
459 /*ADVERTISE_100BASE4 | ADVERTISE_100FULL |*/ ADVERTISE_100HALF |
460 /*ADVERTISE_10FULL |*/ ADVERTISE_10HALF | ADVERTISE_CSMA;
462 /* Find the connected MII xcvrs.
463 Doing this in open() would allow detecting external xcvrs later,
464 but takes much time. */
465 for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
466 int mii_status = mdio_read(dev, phy, MII_BMSR);
467 if ((mii_status & (BMSR_100BASE4 | BMSR_100HALF | BMSR_10HALF)) == BMSR_100BASE4 ||
468 ((mii_status & BMSR_100BASE4) == 0 &&
469 (mii_status & (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF)) != 0)) {
470 int mii_reg0 = mdio_read(dev, phy, MII_BMCR);
471 int mii_advert = mdio_read(dev, phy, MII_ADVERTISE);
472 int reg4 = ((mii_status >> 6) & tp->to_advertise) | ADVERTISE_CSMA;
473 tp->phys[phy_idx] = phy;
474 tp->advertising[phy_idx++] = reg4;
475 printk(KERN_INFO "%s: MII transceiver #%d "
476 "config %4.4x status %4.4x advertising %4.4x.\n",
477 dev->name, phy, mii_reg0, mii_status, mii_advert);
480 tp->mii_cnt = phy_idx;
482 printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
490 * To quote Arjan van de Ven:
491 * transceiver_voodoo() enables the external UTP plug thingy.
492 * it's called voodoo as I stole this code and cannot cross-reference
493 * it with the specification.
494 * Actually it seems to go like this:
495 * - GPIO2 enables the MII itself so we can talk to it. The MII gets reset
496 * so any prior MII settings are lost.
497 * - GPIO0 enables the TP port so the MII can talk to the network.
498 * - a software reset will reset both GPIO pins.
499 * I also moved the software reset here, because doing it in xircom_up()
500 * required enabling the GPIO pins each time, which reset the MII each time.
501 * Thus we couldn't control the MII -- which sucks because we don't know
502 * how to handle full-duplex modes so we *must* disable them.
504 static void transceiver_voodoo(struct net_device *dev)
506 struct xircom_private *tp = netdev_priv(dev);
507 long ioaddr = dev->base_addr;
509 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
510 outl(SoftwareReset, ioaddr + CSR0);
513 /* Deassert reset. */
514 outl(tp->csr0, ioaddr + CSR0);
516 /* Reset the xcvr interface and turn on heartbeat. */
517 outl(0x0008, ioaddr + CSR15);
518 udelay(5); /* The delays are Xircom-recommended to give the
519 * chipset time to reset the actual hardware
522 outl(0xa8050000, ioaddr + CSR15);
524 outl(0xa00f0000, ioaddr + CSR15);
527 outl_CSR6(0, ioaddr);
528 //outl_CSR6(FullDuplexBit, ioaddr);
532 static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
534 struct net_device *dev;
535 struct xircom_private *tp;
536 static int board_idx = -1;
537 int chip_idx = id->driver_data;
542 /* when built into the kernel, we only print version if device is found */
544 static int printed_version;
545 if (!printed_version++)
549 //printk(KERN_INFO "xircom_init_one(%s)\n", pci_name(pdev));
553 if (pci_enable_device(pdev))
556 pci_set_master(pdev);
558 ioaddr = pci_resource_start(pdev, 0);
559 dev = alloc_etherdev(sizeof(*tp));
561 printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx);
564 SET_MODULE_OWNER(dev);
565 SET_NETDEV_DEV(dev, &pdev->dev);
567 dev->base_addr = ioaddr;
568 dev->irq = pdev->irq;
570 if (pci_request_regions(pdev, dev->name)) {
571 printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", board_idx);
572 goto err_out_free_netdev;
575 /* Bring the chip out of sleep mode.
576 Caution: Snooze mode does not work with some boards! */
577 if (xircom_tbl[chip_idx].flags & HAS_ACPI)
578 pci_write_config_dword(pdev, PCI_POWERMGMT, 0);
580 /* Stop the chip's Tx and Rx processes. */
581 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
582 /* Clear the missed-packet counter. */
583 (volatile int)inl(ioaddr + CSR8);
585 tp = netdev_priv(dev);
587 spin_lock_init(&tp->lock);
589 tp->chip_id = chip_idx;
590 /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles. */
591 /* XXX: is this necessary for Xircom? */
592 tp->csr0 = csr0 & ~EnableMWI;
594 pci_set_drvdata(pdev, dev);
596 /* The lower four bits are the media type. */
597 if (board_idx >= 0 && board_idx < MAX_UNITS) {
598 tp->default_port = options[board_idx] & 15;
599 if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
601 if (mtu[board_idx] > 0)
602 dev->mtu = mtu[board_idx];
605 tp->default_port = dev->mem_start;
606 if (tp->default_port) {
607 if (media_cap[tp->default_port] & MediaAlwaysFD)
616 /* The Xircom-specific entries in the device structure. */
617 dev->open = &xircom_open;
618 dev->hard_start_xmit = &xircom_start_xmit;
619 dev->stop = &xircom_close;
620 dev->get_stats = &xircom_get_stats;
621 dev->do_ioctl = &xircom_ioctl;
622 #ifdef HAVE_MULTICAST
623 dev->set_multicast_list = &set_rx_mode;
625 dev->tx_timeout = xircom_tx_timeout;
626 dev->watchdog_timeo = TX_TIMEOUT;
627 SET_ETHTOOL_OPS(dev, &ops);
629 transceiver_voodoo(dev);
631 read_mac_address(dev);
633 if (register_netdev(dev))
634 goto err_out_cleardev;
636 pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
637 printk(KERN_INFO "%s: %s rev %d at %#3lx,",
638 dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr);
639 for (i = 0; i < 6; i++)
640 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
641 printk(", IRQ %d.\n", dev->irq);
643 if (xircom_tbl[chip_idx].flags & HAS_MII) {
644 find_mii_transceivers(dev);
651 pci_set_drvdata(pdev, NULL);
652 pci_release_regions(pdev);
659 /* MII transceiver control section.
660 Read and write the MII registers using software-generated serial
661 MDIO protocol. See the MII specifications or DP83840A data sheet
664 /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
665 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
666 "overclocking" issues or future 66Mhz PCI. */
667 #define mdio_delay() inl(mdio_addr)
669 /* Read and write the MII registers using software-generated serial
670 MDIO protocol. It is just different enough from the EEPROM protocol
671 to not share code. The maxium data clock rate is 2.5 Mhz. */
672 #define MDIO_SHIFT_CLK 0x10000
673 #define MDIO_DATA_WRITE0 0x00000
674 #define MDIO_DATA_WRITE1 0x20000
675 #define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
676 #define MDIO_ENB_IN 0x40000
677 #define MDIO_DATA_READ 0x80000
679 static int mdio_read(struct net_device *dev, int phy_id, int location)
682 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
684 long ioaddr = dev->base_addr;
685 long mdio_addr = ioaddr + CSR9;
687 /* Establish sync by sending at least 32 logic ones. */
688 for (i = 32; i >= 0; i--) {
689 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
691 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
694 /* Shift the read command bits out. */
695 for (i = 15; i >= 0; i--) {
696 int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
698 outl(MDIO_ENB | dataval, mdio_addr);
700 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
703 /* Read the two transition, 16 data, and wire-idle bits. */
704 for (i = 19; i > 0; i--) {
705 outl(MDIO_ENB_IN, mdio_addr);
707 retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
708 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
711 return (retval>>1) & 0xffff;
715 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
718 int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
719 long ioaddr = dev->base_addr;
720 long mdio_addr = ioaddr + CSR9;
722 /* Establish sync by sending 32 logic ones. */
723 for (i = 32; i >= 0; i--) {
724 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
726 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
729 /* Shift the command bits out. */
730 for (i = 31; i >= 0; i--) {
731 int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
732 outl(MDIO_ENB | dataval, mdio_addr);
734 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
737 /* Clear out extra bits. */
738 for (i = 2; i > 0; i--) {
739 outl(MDIO_ENB_IN, mdio_addr);
741 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
749 xircom_up(struct net_device *dev)
751 struct xircom_private *tp = netdev_priv(dev);
752 long ioaddr = dev->base_addr;
755 xircom_init_ring(dev);
756 /* Clear the tx ring */
757 for (i = 0; i < TX_RING_SIZE; i++) {
758 tp->tx_skbuff[i] = NULL;
759 tp->tx_ring[i].status = 0;
762 if (xircom_debug > 1)
763 printk(KERN_DEBUG "%s: xircom_up() irq %d.\n", dev->name, dev->irq);
765 outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
766 outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
768 tp->saved_if_port = dev->if_port;
769 if (dev->if_port == 0)
770 dev->if_port = tp->default_port;
772 tp->csr6 = TxThresh10 /*| FullDuplexBit*/; /* XXX: why 10 and not 100? */
776 /* Start the chip's Tx to process setup frame. */
777 outl_CSR6(tp->csr6, ioaddr);
778 outl_CSR6(tp->csr6 | EnableTx, ioaddr);
780 /* Acknowledge all outstanding interrupts sources */
781 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
782 /* Enable interrupts by setting the interrupt mask. */
783 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
785 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
787 outl(0, ioaddr + CSR2);
789 /* Tell the net layer we're ready */
790 netif_start_queue (dev);
792 /* Check current media state */
793 xircom_media_change(dev);
795 if (xircom_debug > 2) {
796 printk(KERN_DEBUG "%s: Done xircom_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
797 dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
804 xircom_open(struct net_device *dev)
806 struct xircom_private *tp = netdev_priv(dev);
808 if (request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ, dev->name, dev))
818 static void xircom_tx_timeout(struct net_device *dev)
820 struct xircom_private *tp = netdev_priv(dev);
821 long ioaddr = dev->base_addr;
823 if (media_cap[dev->if_port] & MediaIsMII) {
824 /* Do nothing -- the media monitor should handle this. */
825 if (xircom_debug > 1)
826 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
830 #if defined(way_too_many_messages)
831 if (xircom_debug > 3) {
833 for (i = 0; i < RX_RING_SIZE; i++) {
834 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
836 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
837 "%2.2x %2.2x %2.2x.\n",
838 i, (unsigned int)tp->rx_ring[i].status,
839 (unsigned int)tp->rx_ring[i].length,
840 (unsigned int)tp->rx_ring[i].buffer1,
841 (unsigned int)tp->rx_ring[i].buffer2,
842 buf[0], buf[1], buf[2]);
843 for (j = 0; buf[j] != 0xee && j < 1600; j++)
844 if (j < 100) printk(" %2.2x", buf[j]);
845 printk(" j=%d.\n", j);
847 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
848 for (i = 0; i < RX_RING_SIZE; i++)
849 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
850 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
851 for (i = 0; i < TX_RING_SIZE; i++)
852 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
857 /* Stop and restart the chip's Tx/Rx processes . */
858 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
859 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
860 /* Trigger an immediate transmit demand. */
861 outl(0, ioaddr + CSR1);
863 dev->trans_start = jiffies;
864 netif_wake_queue (dev);
865 tp->stats.tx_errors++;
869 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
870 static void xircom_init_ring(struct net_device *dev)
872 struct xircom_private *tp = netdev_priv(dev);
876 tp->cur_rx = tp->cur_tx = 0;
877 tp->dirty_rx = tp->dirty_tx = 0;
879 for (i = 0; i < RX_RING_SIZE; i++) {
880 tp->rx_ring[i].status = 0;
881 tp->rx_ring[i].length = PKT_BUF_SZ;
882 tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]);
883 tp->rx_skbuff[i] = NULL;
885 /* Mark the last entry as wrapping the ring. */
886 tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap;
887 tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]);
889 for (i = 0; i < RX_RING_SIZE; i++) {
890 /* Note the receive buffer must be longword aligned.
891 dev_alloc_skb() provides 16 byte alignment. But do *not*
892 use skb_reserve() to align the IP header! */
893 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
894 tp->rx_skbuff[i] = skb;
897 skb->dev = dev; /* Mark as being used by this device. */
898 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
899 tp->rx_ring[i].buffer1 = virt_to_bus(skb->tail);
901 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
903 /* The Tx buffer descriptor is filled in as needed, but we
904 do need to clear the ownership bit. */
905 for (i = 0; i < TX_RING_SIZE; i++) {
906 tp->tx_skbuff[i] = NULL;
907 tp->tx_ring[i].status = 0;
908 tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
910 if (tp->chip_id == X3201_3)
911 tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
914 tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
919 xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
921 struct xircom_private *tp = netdev_priv(dev);
925 /* Caution: the write order is important here, set the base address
926 with the "ownership" bits last. */
928 /* Calculate the next Tx descriptor entry. */
929 entry = tp->cur_tx % TX_RING_SIZE;
931 tp->tx_skbuff[entry] = skb;
933 if (tp->chip_id == X3201_3) {
934 memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len);
935 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
938 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
940 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
941 flag = Tx1WholePkt; /* No interrupt */
942 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
943 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
944 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
945 flag = Tx1WholePkt; /* No Tx-done intr. */
947 /* Leave room for set_rx_mode() to fill entries. */
948 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
951 if (entry == TX_RING_SIZE - 1)
952 flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap;
954 tp->tx_ring[entry].length = skb->len | flag;
955 tp->tx_ring[entry].status = Tx0DescOwned; /* Pass ownership to the chip. */
958 netif_stop_queue (dev);
960 netif_wake_queue (dev);
962 /* Trigger an immediate transmit demand. */
963 outl(0, dev->base_addr + CSR1);
965 dev->trans_start = jiffies;
971 static void xircom_media_change(struct net_device *dev)
973 struct xircom_private *tp = netdev_priv(dev);
974 long ioaddr = dev->base_addr;
975 u16 reg0, reg1, reg4, reg5;
976 u32 csr6 = inl(ioaddr + CSR6), newcsr6;
978 /* reset status first */
979 mdio_read(dev, tp->phys[0], MII_BMCR);
980 mdio_read(dev, tp->phys[0], MII_BMSR);
982 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
983 reg1 = mdio_read(dev, tp->phys[0], MII_BMSR);
985 if (reg1 & BMSR_LSTATUS) {
987 if (reg0 & BMCR_ANENABLE) {
988 /* autonegotiation is enabled */
989 reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE);
990 reg5 = mdio_read(dev, tp->phys[0], MII_LPA);
991 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
994 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
997 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1002 tp->full_duplex = 0;
1005 /* autonegotiation is disabled */
1006 if (reg0 & BMCR_SPEED100)
1010 if (reg0 & BMCR_FULLDPLX)
1011 tp->full_duplex = 1;
1013 tp->full_duplex = 0;
1015 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1017 tp->speed100 ? "100" : "10",
1018 tp->full_duplex ? "full" : "half");
1019 netif_carrier_on(dev);
1020 newcsr6 = csr6 & ~FullDuplexBit;
1021 if (tp->full_duplex)
1022 newcsr6 |= FullDuplexBit;
1023 if (newcsr6 != csr6)
1024 outl_CSR6(newcsr6, ioaddr + CSR6);
1026 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1027 netif_carrier_off(dev);
1032 static void check_duplex(struct net_device *dev)
1034 struct xircom_private *tp = netdev_priv(dev);
1037 mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET);
1039 while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET);
1041 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
1042 mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]);
1045 reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX);
1046 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1048 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1050 reg0 |= BMCR_SPEED100;
1051 if (tp->full_duplex)
1052 reg0 |= BMCR_FULLDPLX;
1053 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1055 tp->speed100 ? "100" : "10",
1056 tp->full_duplex ? "full" : "half");
1058 mdio_write(dev, tp->phys[0], MII_BMCR, reg0);
1062 /* The interrupt handler does all of the Rx thread work and cleans up
1063 after the Tx thread. */
1064 static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1066 struct net_device *dev = dev_instance;
1067 struct xircom_private *tp = netdev_priv(dev);
1068 long ioaddr = dev->base_addr;
1069 int csr5, work_budget = max_interrupt_work;
1072 spin_lock (&tp->lock);
1075 csr5 = inl(ioaddr + CSR5);
1076 /* Acknowledge all of the current interrupt sources ASAP. */
1077 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
1079 if (xircom_debug > 4)
1080 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
1081 dev->name, csr5, inl(dev->base_addr + CSR5));
1083 if (csr5 == 0xffffffff)
1084 break; /* all bits set, assume PCMCIA card removed */
1086 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
1091 if (csr5 & (RxIntr | RxNoBuf))
1092 work_budget -= xircom_rx(dev);
1094 if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
1095 unsigned int dirty_tx;
1097 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
1099 int entry = dirty_tx % TX_RING_SIZE;
1100 int status = tp->tx_ring[entry].status;
1103 break; /* It still hasn't been Txed */
1104 /* Check for Rx filter setup frames. */
1105 if (tp->tx_skbuff[entry] == NULL)
1108 if (status & Tx0DescError) {
1109 /* There was an major error, log it. */
1110 #ifndef final_version
1111 if (xircom_debug > 1)
1112 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1115 tp->stats.tx_errors++;
1116 if (status & Tx0ManyColl) {
1117 tp->stats.tx_aborted_errors++;
1119 if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++;
1120 if (status & Tx0LateColl) tp->stats.tx_window_errors++;
1121 if (status & Tx0Underflow) tp->stats.tx_fifo_errors++;
1123 tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff;
1124 tp->stats.collisions += (status >> 3) & 15;
1125 tp->stats.tx_packets++;
1128 /* Free the original skb. */
1129 dev_kfree_skb_irq(tp->tx_skbuff[entry]);
1130 tp->tx_skbuff[entry] = NULL;
1133 #ifndef final_version
1134 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
1135 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1136 dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
1137 dirty_tx += TX_RING_SIZE;
1142 tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
1143 /* The ring is no longer full */
1147 netif_stop_queue (dev);
1149 netif_wake_queue (dev);
1151 tp->dirty_tx = dirty_tx;
1152 if (csr5 & TxDied) {
1153 if (xircom_debug > 2)
1154 printk(KERN_WARNING "%s: The transmitter stopped."
1155 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
1156 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
1157 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1158 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1163 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
1164 if (csr5 & LinkChange)
1165 xircom_media_change(dev);
1166 if (csr5 & TxFIFOUnderflow) {
1167 if ((tp->csr6 & TxThreshMask) != TxThreshMask)
1168 tp->csr6 += (1 << TxThreshShift); /* Bump up the Tx threshold */
1170 tp->csr6 |= TxStoreForw; /* Store-n-forward. */
1171 /* Restart the transmit process. */
1172 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1173 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1175 if (csr5 & RxDied) { /* Missed a Rx frame. */
1176 tp->stats.rx_errors++;
1177 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1178 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1180 /* Clear all error sources, included undocumented ones! */
1181 outl(0x0800f7ba, ioaddr + CSR5);
1183 if (--work_budget < 0) {
1184 if (xircom_debug > 1)
1185 printk(KERN_WARNING "%s: Too much work during an interrupt, "
1186 "csr5=0x%8.8x.\n", dev->name, csr5);
1187 /* Acknowledge all interrupt sources. */
1188 outl(0x8001ffff, ioaddr + CSR5);
1193 if (xircom_debug > 3)
1194 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
1195 dev->name, inl(ioaddr + CSR5));
1197 spin_unlock (&tp->lock);
1198 return IRQ_RETVAL(handled);
1203 xircom_rx(struct net_device *dev)
1205 struct xircom_private *tp = netdev_priv(dev);
1206 int entry = tp->cur_rx % RX_RING_SIZE;
1207 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
1210 if (xircom_debug > 4)
1211 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1212 tp->rx_ring[entry].status);
1213 /* If we own the next entry, it's a new packet. Send it up. */
1214 while (tp->rx_ring[entry].status >= 0) {
1215 s32 status = tp->rx_ring[entry].status;
1217 if (xircom_debug > 5)
1218 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1219 tp->rx_ring[entry].status);
1220 if (--rx_work_limit < 0)
1222 if ((status & 0x38008300) != 0x0300) {
1223 if ((status & 0x38000300) != 0x0300) {
1224 /* Ignore earlier buffers. */
1225 if ((status & 0xffff) != 0x7fff) {
1226 if (xircom_debug > 1)
1227 printk(KERN_WARNING "%s: Oversized Ethernet frame "
1228 "spanned multiple buffers, status %8.8x!\n",
1230 tp->stats.rx_length_errors++;
1232 } else if (status & Rx0DescError) {
1233 /* There was a fatal error. */
1234 if (xircom_debug > 2)
1235 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1237 tp->stats.rx_errors++; /* end of a packet.*/
1238 if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++;
1239 if (status & Rx0CRCError) tp->stats.rx_crc_errors++;
1242 /* Omit the four octet CRC from the length. */
1243 short pkt_len = ((status >> 16) & 0x7ff) - 4;
1244 struct sk_buff *skb;
1246 #ifndef final_version
1247 if (pkt_len > 1518) {
1248 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
1249 dev->name, pkt_len, pkt_len);
1251 tp->stats.rx_length_errors++;
1254 /* Check if the packet is long enough to accept without copying
1255 to a minimally-sized skbuff. */
1256 if (pkt_len < rx_copybreak
1257 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1259 skb_reserve(skb, 2); /* 16 byte align the IP header */
1260 #if ! defined(__alpha__)
1261 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1263 skb_put(skb, pkt_len);
1265 memcpy(skb_put(skb, pkt_len),
1266 bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);
1269 } else { /* Pass up the skb already on the Rx ring. */
1270 skb_put(skb = tp->rx_skbuff[entry], pkt_len);
1271 tp->rx_skbuff[entry] = NULL;
1273 skb->protocol = eth_type_trans(skb, dev);
1275 dev->last_rx = jiffies;
1276 tp->stats.rx_packets++;
1277 tp->stats.rx_bytes += pkt_len;
1279 entry = (++tp->cur_rx) % RX_RING_SIZE;
1282 /* Refill the Rx ring buffers. */
1283 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
1284 entry = tp->dirty_rx % RX_RING_SIZE;
1285 if (tp->rx_skbuff[entry] == NULL) {
1286 struct sk_buff *skb;
1287 skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
1290 skb->dev = dev; /* Mark as being used by this device. */
1291 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->tail);
1294 tp->rx_ring[entry].status = Rx0DescOwned;
1302 xircom_down(struct net_device *dev)
1304 long ioaddr = dev->base_addr;
1305 struct xircom_private *tp = netdev_priv(dev);
1307 /* Disable interrupts by clearing the interrupt mask. */
1308 outl(0, ioaddr + CSR7);
1309 /* Stop the chip's Tx and Rx processes. */
1310 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
1312 if (inl(ioaddr + CSR6) != 0xffffffff)
1313 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1315 dev->if_port = tp->saved_if_port;
1320 xircom_close(struct net_device *dev)
1322 long ioaddr = dev->base_addr;
1323 struct xircom_private *tp = netdev_priv(dev);
1326 if (xircom_debug > 1)
1327 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1328 dev->name, inl(ioaddr + CSR5));
1330 netif_stop_queue(dev);
1332 if (netif_device_present(dev))
1335 free_irq(dev->irq, dev);
1337 /* Free all the skbuffs in the Rx queue. */
1338 for (i = 0; i < RX_RING_SIZE; i++) {
1339 struct sk_buff *skb = tp->rx_skbuff[i];
1340 tp->rx_skbuff[i] = NULL;
1341 tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
1342 tp->rx_ring[i].length = 0;
1343 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
1348 for (i = 0; i < TX_RING_SIZE; i++) {
1349 if (tp->tx_skbuff[i])
1350 dev_kfree_skb(tp->tx_skbuff[i]);
1351 tp->tx_skbuff[i] = NULL;
1359 static struct net_device_stats *xircom_get_stats(struct net_device *dev)
1361 struct xircom_private *tp = netdev_priv(dev);
1362 long ioaddr = dev->base_addr;
1364 if (netif_device_present(dev))
1365 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1370 static int xircom_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1372 struct xircom_private *tp = netdev_priv(dev);
1374 SUPPORTED_10baseT_Half |
1375 SUPPORTED_10baseT_Full |
1376 SUPPORTED_100baseT_Half |
1377 SUPPORTED_100baseT_Full |
1381 ecmd->advertising = ADVERTISED_MII;
1382 if (tp->advertising[0] & ADVERTISE_10HALF)
1383 ecmd->advertising |= ADVERTISED_10baseT_Half;
1384 if (tp->advertising[0] & ADVERTISE_10FULL)
1385 ecmd->advertising |= ADVERTISED_10baseT_Full;
1386 if (tp->advertising[0] & ADVERTISE_100HALF)
1387 ecmd->advertising |= ADVERTISED_100baseT_Half;
1388 if (tp->advertising[0] & ADVERTISE_100FULL)
1389 ecmd->advertising |= ADVERTISED_100baseT_Full;
1391 ecmd->advertising |= ADVERTISED_Autoneg;
1392 ecmd->autoneg = AUTONEG_ENABLE;
1394 ecmd->autoneg = AUTONEG_DISABLE;
1396 ecmd->port = PORT_MII;
1397 ecmd->transceiver = XCVR_INTERNAL;
1398 ecmd->phy_address = tp->phys[0];
1399 ecmd->speed = tp->speed100 ? SPEED_100 : SPEED_10;
1400 ecmd->duplex = tp->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1401 ecmd->maxtxpkt = TX_RING_SIZE / 2;
1406 static int xircom_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1408 struct xircom_private *tp = netdev_priv(dev);
1409 u16 autoneg, speed100, full_duplex;
1411 autoneg = (ecmd->autoneg == AUTONEG_ENABLE);
1412 speed100 = (ecmd->speed == SPEED_100);
1413 full_duplex = (ecmd->duplex == DUPLEX_FULL);
1415 tp->autoneg = autoneg;
1416 if (speed100 != tp->speed100 ||
1417 full_duplex != tp->full_duplex) {
1418 tp->speed100 = speed100;
1419 tp->full_duplex = full_duplex;
1420 /* change advertising bits */
1421 tp->advertising[0] &= ~(ADVERTISE_10HALF |
1425 ADVERTISE_100BASE4);
1428 tp->advertising[0] |= ADVERTISE_100FULL;
1430 tp->advertising[0] |= ADVERTISE_100HALF;
1433 tp->advertising[0] |= ADVERTISE_10FULL;
1435 tp->advertising[0] |= ADVERTISE_10HALF;
1442 static void xircom_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1444 struct xircom_private *tp = netdev_priv(dev);
1445 strcpy(info->driver, DRV_NAME);
1446 strcpy(info->version, DRV_VERSION);
1447 strcpy(info->bus_info, pci_name(tp->pdev));
1450 static struct ethtool_ops ops = {
1451 .get_settings = xircom_get_settings,
1452 .set_settings = xircom_set_settings,
1453 .get_drvinfo = xircom_get_drvinfo,
1456 /* Provide ioctl() calls to examine the MII xcvr state. */
1457 static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1459 struct xircom_private *tp = netdev_priv(dev);
1460 u16 *data = (u16 *)&rq->ifr_ifru;
1461 int phy = tp->phys[0] & 0x1f;
1462 unsigned long flags;
1465 /* Legacy mii-diag interface */
1466 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1472 case SIOCGMIIREG: /* Read MII PHY register. */
1475 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1476 restore_flags(flags);
1478 case SIOCSMIIREG: /* Write MII PHY register. */
1479 if (!capable(CAP_NET_ADMIN))
1483 if (data[0] == tp->phys[0]) {
1484 u16 value = data[2];
1487 if (value & (BMCR_RESET | BMCR_ANENABLE))
1488 /* Autonegotiation. */
1491 tp->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
1496 tp->advertising[0] = value;
1501 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1502 restore_flags(flags);
1511 /* Set or clear the multicast filter for this adaptor.
1512 Note that we only use exclusion around actually queueing the
1513 new frame, not around filling tp->setup_frame. This is non-deterministic
1514 when re-entered but still correct. */
1515 static void set_rx_mode(struct net_device *dev)
1517 struct xircom_private *tp = netdev_priv(dev);
1518 struct dev_mc_list *mclist;
1519 long ioaddr = dev->base_addr;
1520 int csr6 = inl(ioaddr + CSR6);
1521 u16 *eaddrs, *setup_frm;
1525 tp->csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1526 csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1527 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1528 tp->csr6 |= PromiscBit;
1533 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1534 /* Too many to filter well -- accept all multicasts. */
1535 tp->csr6 |= AllMultiBit;
1536 csr6 |= AllMultiBit;
1540 tx_flags = Tx1WholePkt | Tx1SetupPkt | PKT_SETUP_SZ;
1542 /* Note that only the low-address shortword of setup_frame is valid! */
1543 setup_frm = tp->setup_frame;
1544 mclist = dev->mc_list;
1546 /* Fill the first entry with our physical address. */
1547 eaddrs = (u16 *)dev->dev_addr;
1548 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1549 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1550 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1552 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1553 u32 *hash_table = (u32 *)(tp->setup_frame + 4 * 12);
1556 tx_flags |= Tx1HashSetup;
1557 tp->csr6 |= HashFilterBit;
1558 csr6 |= HashFilterBit;
1560 /* Fill the unused 3 entries with the broadcast address.
1561 At least one entry *must* contain the broadcast address!!!*/
1562 for (i = 0; i < 3; i++) {
1563 *setup_frm = 0xffff; setup_frm += 2;
1564 *setup_frm = 0xffff; setup_frm += 2;
1565 *setup_frm = 0xffff; setup_frm += 2;
1568 /* Truly brain-damaged hash filter layout */
1569 /* XXX: not sure if I should take the last or the first 9 bits */
1570 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1572 hash = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
1574 hash2 = hash + ((hash >> 4) << 4) +
1578 hash2 = 64 + hash + (hash >> 4) * 80;
1580 hptr = &hash_table[hash2 & ~0x1f];
1581 *hptr |= cpu_to_le32(1 << (hash2 & 0x1f));
1584 /* We have <= 14 mcast addresses so we can use Xircom's
1585 wonderful 16-address perfect filter. */
1586 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1587 eaddrs = (u16 *)mclist->dmi_addr;
1588 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1589 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1590 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1592 /* Fill the unused entries with the broadcast address.
1593 At least one entry *must* contain the broadcast address!!!*/
1594 for (; i < 15; i++) {
1595 *setup_frm = 0xffff; setup_frm += 2;
1596 *setup_frm = 0xffff; setup_frm += 2;
1597 *setup_frm = 0xffff; setup_frm += 2;
1601 /* Now add this frame to the Tx list. */
1602 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1603 /* Same setup recently queued, we need not add it. */
1604 /* XXX: Huh? All it means is that the Tx list is full...*/
1606 unsigned long flags;
1610 save_flags(flags); cli();
1611 entry = tp->cur_tx++ % TX_RING_SIZE;
1614 /* Avoid a chip errata by prefixing a dummy entry. */
1615 tp->tx_skbuff[entry] = NULL;
1616 tp->tx_ring[entry].length =
1617 (entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
1618 tp->tx_ring[entry].buffer1 = 0;
1619 /* race with chip, set Tx0DescOwned later */
1621 entry = tp->cur_tx++ % TX_RING_SIZE;
1624 tp->tx_skbuff[entry] = NULL;
1625 /* Put the setup frame on the Tx list. */
1626 if (entry == TX_RING_SIZE - 1)
1627 tx_flags |= Tx1RingWrap; /* Wrap ring. */
1628 tp->tx_ring[entry].length = tx_flags;
1629 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame);
1630 tp->tx_ring[entry].status = Tx0DescOwned;
1631 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
1633 netif_stop_queue (dev);
1636 tp->tx_ring[dummy].status = Tx0DescOwned;
1637 restore_flags(flags);
1638 /* Trigger an immediate transmit demand. */
1639 outl(0, ioaddr + CSR1);
1643 outl_CSR6(csr6, ioaddr);
1647 static struct pci_device_id xircom_pci_table[] = {
1648 { 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
1651 MODULE_DEVICE_TABLE(pci, xircom_pci_table);
1655 static int xircom_suspend(struct pci_dev *pdev, u32 state)
1657 struct net_device *dev = pci_get_drvdata(pdev);
1658 struct xircom_private *tp = netdev_priv(dev);
1659 printk(KERN_INFO "xircom_suspend(%s)\n", dev->name);
1663 pci_save_state(pdev);
1664 pci_disable_device(pdev);
1665 pci_set_power_state(pdev, 3);
1671 static int xircom_resume(struct pci_dev *pdev)
1673 struct net_device *dev = pci_get_drvdata(pdev);
1674 struct xircom_private *tp = netdev_priv(dev);
1675 printk(KERN_INFO "xircom_resume(%s)\n", dev->name);
1677 pci_set_power_state(pdev,0);
1678 pci_enable_device(pdev);
1679 pci_restore_state(pdev);
1681 /* Bring the chip out of sleep mode.
1682 Caution: Snooze mode does not work with some boards! */
1683 if (xircom_tbl[tp->chip_id].flags & HAS_ACPI)
1684 pci_write_config_dword(tp->pdev, PCI_POWERMGMT, 0);
1686 transceiver_voodoo(dev);
1687 if (xircom_tbl[tp->chip_id].flags & HAS_MII)
1694 #endif /* CONFIG_PM */
1697 static void __devexit xircom_remove_one(struct pci_dev *pdev)
1699 struct net_device *dev = pci_get_drvdata(pdev);
1701 printk(KERN_INFO "xircom_remove_one(%s)\n", dev->name);
1702 unregister_netdev(dev);
1703 pci_release_regions(pdev);
1705 pci_set_drvdata(pdev, NULL);
1709 static struct pci_driver xircom_driver = {
1711 .id_table = xircom_pci_table,
1712 .probe = xircom_init_one,
1713 .remove = __devexit_p(xircom_remove_one),
1715 .suspend = xircom_suspend,
1716 .resume = xircom_resume
1717 #endif /* CONFIG_PM */
1721 static int __init xircom_init(void)
1723 /* when a module, this is printed whether or not devices are found in probe */
1727 return pci_module_init(&xircom_driver);
1731 static void __exit xircom_exit(void)
1733 pci_unregister_driver(&xircom_driver);
1736 module_init(xircom_init)
1737 module_exit(xircom_exit)