1 /* xircom_tulip_cb.c: A Xircom CBE-100 ethernet driver for Linux. */
3 Written/copyright 1994-1999 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
8 The author may be reached as becker@scyld.com, or C/O
9 Scyld Computing Corporation
10 410 Severn Ave., Suite 210
13 -----------------------------------------------------------
15 Linux kernel-specific changes:
21 - Rewrite perfect filter/hash code
22 - Use interrupts for media changes
25 - Disallow negotiation of unsupported full-duplex modes
28 #define DRV_NAME "xircom_tulip_cb"
29 #define DRV_VERSION "0.91+LK1.1"
30 #define DRV_RELDATE "October 11, 2001"
34 /* A few user-configurable values. */
36 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
37 static int max_interrupt_work = 25;
40 /* Used to pass the full-duplex flag, etc. */
41 static int full_duplex[MAX_UNITS];
42 static int options[MAX_UNITS];
43 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
45 /* Keep the ring sizes a power of two for efficiency.
46 Making the Tx ring too large decreases the effectiveness of channel
47 bonding and packet priority.
48 There are no ill effects from too-large receive rings. */
49 #define TX_RING_SIZE 16
50 #define RX_RING_SIZE 32
52 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
54 static int rx_copybreak = 1518;
56 static int rx_copybreak = 100;
60 Set the bus performance register.
61 Typical: Set 16 longword cache alignment, no burst limit.
62 Cache alignment bits 15:14 Burst length 13:8
63 0000 No alignment 0x00000000 unlimited 0800 8 longwords
64 4000 8 longwords 0100 1 longword 1000 16 longwords
65 8000 16 longwords 0200 2 longwords 2000 32 longwords
66 C000 32 longwords 0400 4 longwords
67 Warning: many older 486 systems are broken and require setting 0x00A04800
68 8 longword cache alignment, 8 longword burst.
69 ToDo: Non-Intel setting could be better.
72 #if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
73 static int csr0 = 0x01A00000 | 0xE000;
74 #elif defined(__powerpc__)
75 static int csr0 = 0x01B00000 | 0x8000;
76 #elif defined(__sparc__)
77 static int csr0 = 0x01B00080 | 0x8000;
78 #elif defined(__i386__)
79 static int csr0 = 0x01A00000 | 0x8000;
81 #warning Processor architecture undefined!
82 static int csr0 = 0x00A00000 | 0x4800;
85 /* Operational parameters that usually are not changed. */
86 /* Time in jiffies before concluding the transmitter is hung. */
87 #define TX_TIMEOUT (4 * HZ)
88 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89 #define PKT_SETUP_SZ 192 /* Size of the setup frame */
92 #define PCI_POWERMGMT 0x40
94 #include <linux/config.h>
95 #include <linux/module.h>
96 #include <linux/kernel.h>
97 #include <linux/pci.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/delay.h>
101 #include <linux/init.h>
102 #include <linux/mii.h>
103 #include <linux/ethtool.h>
104 #include <linux/crc32.h>
107 #include <asm/processor.h> /* Processor type for cache alignment. */
108 #include <asm/uaccess.h>
111 /* These identify the driver base version and may not be removed. */
112 static char version[] __devinitdata =
113 KERN_INFO DRV_NAME ".c derived from tulip.c:v0.91 4/14/99 becker@scyld.com\n"
114 KERN_INFO " unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE "\n";
116 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117 MODULE_DESCRIPTION("Xircom CBE-100 ethernet driver");
118 MODULE_LICENSE("GPL v2");
119 MODULE_VERSION(DRV_VERSION);
121 MODULE_PARM(debug, "i");
122 MODULE_PARM(max_interrupt_work, "i");
123 MODULE_PARM(rx_copybreak, "i");
124 MODULE_PARM(csr0, "i");
125 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
126 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
128 #define RUN_AT(x) (jiffies + (x))
130 #define xircom_debug debug
132 static int xircom_debug = XIRCOM_DEBUG;
134 static int xircom_debug = 1;
140 I. Board Compatibility
142 This device driver was forked from the driver for the DECchip "Tulip",
143 Digital's single-chip ethernet controllers for PCI. It supports Xircom's
144 almost-Tulip-compatible CBE-100 CardBus adapters.
146 II. Board-specific settings
148 PCI bus devices are configured by the system at boot time, so no jumpers
149 need to be set on the board. The system BIOS preferably should assign the
150 PCI INTA signal to an otherwise unused system IRQ line.
152 III. Driver operation
156 The Xircom can use either ring buffers or lists of Tx and Rx descriptors.
157 This driver uses statically allocated rings of Rx and Tx descriptors, set at
158 compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
159 for the Rx ring buffers at open() time and passes the skb->data field to the
160 Xircom as receive data buffers. When an incoming frame is less than
161 RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
162 copied to the new skbuff. When the incoming frame is larger, the skbuff is
163 passed directly up the protocol stack and replaced by a newly allocated
166 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
167 using a full-sized skbuff for small frames vs. the copying costs of larger
168 frames. For small frames the copying cost is negligible (esp. considering
169 that we are pre-loading the cache with immediately useful header
170 information). For large frames the copying cost is non-trivial, and the
171 larger copy might flush the cache of useful data. A subtle aspect of this
172 choice is that the Xircom only receives into longword aligned buffers, thus
173 the IP header at offset 14 isn't longword aligned for further processing.
174 Copied frames are put into the new skbuff at an offset of "+2", thus copying
175 has the beneficial effect of aligning the IP header and preloading the
178 IIIC. Synchronization
179 The driver runs as two independent, single-threaded flows of control. One
180 is the send-packet routine, which enforces single-threaded use by the
181 dev->tbusy flag. The other thread is the interrupt handler, which is single
182 threaded by the hardware and other software.
184 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
185 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
186 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
187 the 'tp->tx_full' flag.
189 The interrupt handler has exclusive control over the Rx ring and records stats
190 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
191 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
192 stats.) After reaping the stats, it marks the queue entry as empty by setting
193 the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
194 tx_full and tbusy flags.
200 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
201 http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
202 http://www.national.com/pf/DP/DP83840A.html
208 /* A full-duplex map for media types. */
210 MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
212 static const char media_cap[] =
213 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20 };
215 /* Offsets to the Command and Status Registers, "CSRs". All accesses
216 must be longword instructions and quadword aligned. */
217 enum xircom_offsets {
218 CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
219 CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
220 CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78, CSR16=0x04, };
222 /* The bits in the CSR5 status registers, mostly interrupt sources. */
224 LinkChange=0x08000000,
225 NormalIntr=0x10000, NormalIntrMask=0x00014045,
226 AbnormalIntr=0x8000, AbnormalIntrMask=0x0a00a5a2,
227 ReservedIntrMask=0xe0001a18,
228 EarlyRxIntr=0x4000, BusErrorIntr=0x2000,
229 EarlyTxIntr=0x400, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
230 TxFIFOUnderflow=0x20, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
233 enum csr0_control_bits {
234 EnableMWI=0x01000000, EnableMRL=0x00800000,
235 EnableMRM=0x00200000, EqualBusPrio=0x02,
239 enum csr6_control_bits {
240 ReceiveAllBit=0x40000000, AllMultiBit=0x80, PromiscBit=0x40,
241 HashFilterBit=0x01, FullDuplexBit=0x0200,
242 TxThresh10=0x400000, TxStoreForw=0x200000,
243 TxThreshMask=0xc000, TxThreshShift=14,
244 EnableTx=0x2000, EnableRx=0x02,
245 ReservedZeroMask=0x8d930134, ReservedOneMask=0x320c0000,
246 EnableTxRx=(EnableTx | EnableRx),
251 HAS_MII=1, HAS_ACPI=2,
253 static struct xircom_chip_table {
255 int valid_intrs; /* CSR7 interrupt enable settings */
258 { "Xircom Cardbus Adapter",
259 LinkChange | NormalIntr | AbnormalIntr | BusErrorIntr |
260 RxDied | RxNoBuf | RxIntr | TxFIFOUnderflow | TxNoBuf | TxDied | TxIntr,
261 HAS_MII | HAS_ACPI, },
264 /* This matches the table above. */
270 /* The Xircom Rx and Tx buffer descriptors. */
271 struct xircom_rx_desc {
274 u32 buffer1, buffer2;
277 struct xircom_tx_desc {
280 u32 buffer1, buffer2; /* We use only buffer 1. */
283 enum tx_desc0_status_bits {
284 Tx0DescOwned=0x80000000, Tx0DescError=0x8000, Tx0NoCarrier=0x0800,
285 Tx0LateColl=0x0200, Tx0ManyColl=0x0100, Tx0Underflow=0x02,
287 enum tx_desc1_status_bits {
288 Tx1ComplIntr=0x80000000, Tx1LastSeg=0x40000000, Tx1FirstSeg=0x20000000,
289 Tx1SetupPkt=0x08000000, Tx1DisableCRC=0x04000000, Tx1RingWrap=0x02000000,
290 Tx1ChainDesc=0x01000000, Tx1NoPad=0x800000, Tx1HashSetup=0x400000,
291 Tx1WholePkt=(Tx1FirstSeg | Tx1LastSeg),
293 enum rx_desc0_status_bits {
294 Rx0DescOwned=0x80000000, Rx0DescError=0x8000, Rx0NoSpace=0x4000,
295 Rx0Runt=0x0800, Rx0McastPkt=0x0400, Rx0FirstSeg=0x0200, Rx0LastSeg=0x0100,
296 Rx0HugeFrame=0x80, Rx0CRCError=0x02,
297 Rx0WholePkt=(Rx0FirstSeg | Rx0LastSeg),
299 enum rx_desc1_status_bits {
300 Rx1RingWrap=0x02000000, Rx1ChainDesc=0x01000000,
303 struct xircom_private {
304 struct xircom_rx_desc rx_ring[RX_RING_SIZE];
305 struct xircom_tx_desc tx_ring[TX_RING_SIZE];
306 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
307 struct sk_buff* tx_skbuff[TX_RING_SIZE];
309 /* The X3201-3 requires 4-byte aligned tx bufs */
310 struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
312 /* The addresses of receive-in-place skbuffs. */
313 struct sk_buff* rx_skbuff[RX_RING_SIZE];
314 u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
316 struct net_device_stats stats;
317 unsigned int cur_rx, cur_tx; /* The next free ring entry */
318 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
319 unsigned int tx_full:1; /* The Tx queue is full. */
320 unsigned int speed100:1;
321 unsigned int full_duplex:1; /* Full-duplex operation requested. */
322 unsigned int autoneg:1;
323 unsigned int default_port:4; /* Last dev->if_port value. */
325 unsigned int csr0; /* CSR0 setting. */
326 unsigned int csr6; /* Current CSR6 control settings. */
327 u16 to_advertise; /* NWay capabilities advertised. */
329 signed char phys[4], mii_cnt; /* MII device addresses. */
331 struct pci_dev *pdev;
338 static int mdio_read(struct net_device *dev, int phy_id, int location);
339 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
340 static void xircom_up(struct net_device *dev);
341 static void xircom_down(struct net_device *dev);
342 static int xircom_open(struct net_device *dev);
343 static void xircom_tx_timeout(struct net_device *dev);
344 static void xircom_init_ring(struct net_device *dev);
345 static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
346 static int xircom_rx(struct net_device *dev);
347 static void xircom_media_change(struct net_device *dev);
348 static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
349 static int xircom_close(struct net_device *dev);
350 static struct net_device_stats *xircom_get_stats(struct net_device *dev);
351 static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
352 static void set_rx_mode(struct net_device *dev);
353 static void check_duplex(struct net_device *dev);
356 /* The Xircom cards are picky about when certain bits in CSR6 can be
357 manipulated. Keith Owens <kaos@ocs.com.au>. */
358 static void outl_CSR6(u32 newcsr6, long ioaddr)
360 const int strict_bits =
361 TxThresh10 | TxStoreForw | TxThreshMask | EnableTxRx | FullDuplexBit;
362 int csr5, csr5_22_20, csr5_19_17, currcsr6, attempts = 200;
366 /* mask out the reserved bits that always read 0 on the Xircom cards */
367 newcsr6 &= ~ReservedZeroMask;
368 /* or in the reserved bits that always read 1 */
369 newcsr6 |= ReservedOneMask;
370 currcsr6 = inl(ioaddr + CSR6);
371 if (((newcsr6 & strict_bits) == (currcsr6 & strict_bits)) ||
372 ((currcsr6 & ~EnableTxRx) == 0)) {
373 outl(newcsr6, ioaddr + CSR6); /* safe */
374 restore_flags(flags);
377 /* make sure the transmitter and receiver are stopped first */
378 currcsr6 &= ~EnableTxRx;
380 csr5 = inl(ioaddr + CSR5);
381 if (csr5 == 0xffffffff)
382 break; /* cannot read csr5, card removed? */
383 csr5_22_20 = csr5 & 0x700000;
384 csr5_19_17 = csr5 & 0x0e0000;
385 if ((csr5_22_20 == 0 || csr5_22_20 == 0x600000) &&
386 (csr5_19_17 == 0 || csr5_19_17 == 0x80000 || csr5_19_17 == 0xc0000))
387 break; /* both are stopped or suspended */
389 printk(KERN_INFO DRV_NAME ": outl_CSR6 too many attempts,"
390 "csr5=0x%08x\n", csr5);
391 outl(newcsr6, ioaddr + CSR6); /* unsafe but do it anyway */
392 restore_flags(flags);
395 outl(currcsr6, ioaddr + CSR6);
398 /* now it is safe to change csr6 */
399 outl(newcsr6, ioaddr + CSR6);
400 restore_flags(flags);
404 static void __devinit read_mac_address(struct net_device *dev)
406 long ioaddr = dev->base_addr;
408 unsigned char tuple, link, data_id, data_count;
410 /* Xircom has its address stored in the CIS;
411 * we access it through the boot rom interface for now
412 * this might not work, as the CIS is not parsed but I
413 * (danilo) use the offset I found on my card's CIS !!!
415 * Doug Ledford: I changed this routine around so that it
416 * walks the CIS memory space, parsing the config items, and
417 * finds the proper lan_node_id tuple and uses the data
420 outl(1 << 12, ioaddr + CSR9); /* enable boot rom access */
421 for (i = 0x100; i < 0x1f7; i += link+2) {
422 outl(i, ioaddr + CSR10);
423 tuple = inl(ioaddr + CSR9) & 0xff;
424 outl(i + 1, ioaddr + CSR10);
425 link = inl(ioaddr + CSR9) & 0xff;
426 outl(i + 2, ioaddr + CSR10);
427 data_id = inl(ioaddr + CSR9) & 0xff;
428 outl(i + 3, ioaddr + CSR10);
429 data_count = inl(ioaddr + CSR9) & 0xff;
430 if ( (tuple == 0x22) &&
431 (data_id == 0x04) && (data_count == 0x06) ) {
433 * This is it. We have the data we want.
435 for (j = 0; j < 6; j++) {
436 outl(i + j + 4, ioaddr + CSR10);
437 dev->dev_addr[j] = inl(ioaddr + CSR9) & 0xff;
440 } else if (link == 0) {
448 * locate the MII interfaces and initialize them.
449 * we disable full-duplex modes here,
450 * because we don't know how to handle them.
452 static void find_mii_transceivers(struct net_device *dev)
454 struct xircom_private *tp = dev->priv;
457 if (media_cap[tp->default_port] & MediaIsMII) {
458 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
459 tp->to_advertise = media2advert[tp->default_port - 9];
462 /*ADVERTISE_100BASE4 | ADVERTISE_100FULL |*/ ADVERTISE_100HALF |
463 /*ADVERTISE_10FULL |*/ ADVERTISE_10HALF | ADVERTISE_CSMA;
465 /* Find the connected MII xcvrs.
466 Doing this in open() would allow detecting external xcvrs later,
467 but takes much time. */
468 for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
469 int mii_status = mdio_read(dev, phy, MII_BMSR);
470 if ((mii_status & (BMSR_100BASE4 | BMSR_100HALF | BMSR_10HALF)) == BMSR_100BASE4 ||
471 ((mii_status & BMSR_100BASE4) == 0 &&
472 (mii_status & (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF)) != 0)) {
473 int mii_reg0 = mdio_read(dev, phy, MII_BMCR);
474 int mii_advert = mdio_read(dev, phy, MII_ADVERTISE);
475 int reg4 = ((mii_status >> 6) & tp->to_advertise) | ADVERTISE_CSMA;
476 tp->phys[phy_idx] = phy;
477 tp->advertising[phy_idx++] = reg4;
478 printk(KERN_INFO "%s: MII transceiver #%d "
479 "config %4.4x status %4.4x advertising %4.4x.\n",
480 dev->name, phy, mii_reg0, mii_status, mii_advert);
483 tp->mii_cnt = phy_idx;
485 printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
493 * To quote Arjan van de Ven:
494 * transceiver_voodoo() enables the external UTP plug thingy.
495 * it's called voodoo as I stole this code and cannot cross-reference
496 * it with the specification.
497 * Actually it seems to go like this:
498 * - GPIO2 enables the MII itself so we can talk to it. The MII gets reset
499 * so any prior MII settings are lost.
500 * - GPIO0 enables the TP port so the MII can talk to the network.
501 * - a software reset will reset both GPIO pins.
502 * I also moved the software reset here, because doing it in xircom_up()
503 * required enabling the GPIO pins each time, which reset the MII each time.
504 * Thus we couldn't control the MII -- which sucks because we don't know
505 * how to handle full-duplex modes so we *must* disable them.
507 static void transceiver_voodoo(struct net_device *dev)
509 struct xircom_private *tp = dev->priv;
510 long ioaddr = dev->base_addr;
512 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
513 outl(SoftwareReset, ioaddr + CSR0);
516 /* Deassert reset. */
517 outl(tp->csr0, ioaddr + CSR0);
519 /* Reset the xcvr interface and turn on heartbeat. */
520 outl(0x0008, ioaddr + CSR15);
521 udelay(5); /* The delays are Xircom-recommended to give the
522 * chipset time to reset the actual hardware
525 outl(0xa8050000, ioaddr + CSR15);
527 outl(0xa00f0000, ioaddr + CSR15);
530 outl_CSR6(0, ioaddr);
531 //outl_CSR6(FullDuplexBit, ioaddr);
535 static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
537 struct net_device *dev;
538 struct xircom_private *tp;
539 static int board_idx = -1;
540 int chip_idx = id->driver_data;
545 /* when built into the kernel, we only print version if device is found */
547 static int printed_version;
548 if (!printed_version++)
552 //printk(KERN_INFO "xircom_init_one(%s)\n", pci_name(pdev));
556 if (pci_enable_device(pdev))
559 pci_set_master(pdev);
561 ioaddr = pci_resource_start(pdev, 0);
562 dev = alloc_etherdev(sizeof(*tp));
564 printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx);
567 SET_MODULE_OWNER(dev);
568 SET_NETDEV_DEV(dev, &pdev->dev);
570 dev->base_addr = ioaddr;
571 dev->irq = pdev->irq;
573 if (pci_request_regions(pdev, dev->name)) {
574 printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", board_idx);
575 goto err_out_free_netdev;
578 /* Bring the chip out of sleep mode.
579 Caution: Snooze mode does not work with some boards! */
580 if (xircom_tbl[chip_idx].flags & HAS_ACPI)
581 pci_write_config_dword(pdev, PCI_POWERMGMT, 0);
583 /* Stop the chip's Tx and Rx processes. */
584 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
585 /* Clear the missed-packet counter. */
586 (volatile int)inl(ioaddr + CSR8);
590 tp->lock = SPIN_LOCK_UNLOCKED;
592 tp->chip_id = chip_idx;
593 /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles. */
594 /* XXX: is this necessary for Xircom? */
595 tp->csr0 = csr0 & ~EnableMWI;
597 pci_set_drvdata(pdev, dev);
599 /* The lower four bits are the media type. */
600 if (board_idx >= 0 && board_idx < MAX_UNITS) {
601 tp->default_port = options[board_idx] & 15;
602 if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
604 if (mtu[board_idx] > 0)
605 dev->mtu = mtu[board_idx];
608 tp->default_port = dev->mem_start;
609 if (tp->default_port) {
610 if (media_cap[tp->default_port] & MediaAlwaysFD)
619 /* The Xircom-specific entries in the device structure. */
620 dev->open = &xircom_open;
621 dev->hard_start_xmit = &xircom_start_xmit;
622 dev->stop = &xircom_close;
623 dev->get_stats = &xircom_get_stats;
624 dev->do_ioctl = &xircom_ioctl;
625 #ifdef HAVE_MULTICAST
626 dev->set_multicast_list = &set_rx_mode;
628 dev->tx_timeout = xircom_tx_timeout;
629 dev->watchdog_timeo = TX_TIMEOUT;
631 transceiver_voodoo(dev);
633 read_mac_address(dev);
635 if (register_netdev(dev))
636 goto err_out_cleardev;
638 pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
639 printk(KERN_INFO "%s: %s rev %d at %#3lx,",
640 dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr);
641 for (i = 0; i < 6; i++)
642 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
643 printk(", IRQ %d.\n", dev->irq);
645 if (xircom_tbl[chip_idx].flags & HAS_MII) {
646 find_mii_transceivers(dev);
653 pci_set_drvdata(pdev, NULL);
654 pci_release_regions(pdev);
661 /* MII transceiver control section.
662 Read and write the MII registers using software-generated serial
663 MDIO protocol. See the MII specifications or DP83840A data sheet
666 /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
667 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
668 "overclocking" issues or future 66Mhz PCI. */
669 #define mdio_delay() inl(mdio_addr)
671 /* Read and write the MII registers using software-generated serial
672 MDIO protocol. It is just different enough from the EEPROM protocol
673 to not share code. The maxium data clock rate is 2.5 Mhz. */
674 #define MDIO_SHIFT_CLK 0x10000
675 #define MDIO_DATA_WRITE0 0x00000
676 #define MDIO_DATA_WRITE1 0x20000
677 #define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
678 #define MDIO_ENB_IN 0x40000
679 #define MDIO_DATA_READ 0x80000
681 static int mdio_read(struct net_device *dev, int phy_id, int location)
684 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
686 long ioaddr = dev->base_addr;
687 long mdio_addr = ioaddr + CSR9;
689 /* Establish sync by sending at least 32 logic ones. */
690 for (i = 32; i >= 0; i--) {
691 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
693 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
696 /* Shift the read command bits out. */
697 for (i = 15; i >= 0; i--) {
698 int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
700 outl(MDIO_ENB | dataval, mdio_addr);
702 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
705 /* Read the two transition, 16 data, and wire-idle bits. */
706 for (i = 19; i > 0; i--) {
707 outl(MDIO_ENB_IN, mdio_addr);
709 retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
710 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
713 return (retval>>1) & 0xffff;
717 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
720 int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
721 long ioaddr = dev->base_addr;
722 long mdio_addr = ioaddr + CSR9;
724 /* Establish sync by sending 32 logic ones. */
725 for (i = 32; i >= 0; i--) {
726 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
728 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
731 /* Shift the command bits out. */
732 for (i = 31; i >= 0; i--) {
733 int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
734 outl(MDIO_ENB | dataval, mdio_addr);
736 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
739 /* Clear out extra bits. */
740 for (i = 2; i > 0; i--) {
741 outl(MDIO_ENB_IN, mdio_addr);
743 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
751 xircom_up(struct net_device *dev)
753 struct xircom_private *tp = dev->priv;
754 long ioaddr = dev->base_addr;
757 xircom_init_ring(dev);
758 /* Clear the tx ring */
759 for (i = 0; i < TX_RING_SIZE; i++) {
760 tp->tx_skbuff[i] = NULL;
761 tp->tx_ring[i].status = 0;
764 if (xircom_debug > 1)
765 printk(KERN_DEBUG "%s: xircom_up() irq %d.\n", dev->name, dev->irq);
767 outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
768 outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
770 tp->saved_if_port = dev->if_port;
771 if (dev->if_port == 0)
772 dev->if_port = tp->default_port;
774 tp->csr6 = TxThresh10 /*| FullDuplexBit*/; /* XXX: why 10 and not 100? */
778 /* Start the chip's Tx to process setup frame. */
779 outl_CSR6(tp->csr6, ioaddr);
780 outl_CSR6(tp->csr6 | EnableTx, ioaddr);
782 /* Acknowledge all outstanding interrupts sources */
783 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
784 /* Enable interrupts by setting the interrupt mask. */
785 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
787 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
789 outl(0, ioaddr + CSR2);
791 /* Tell the net layer we're ready */
792 netif_start_queue (dev);
794 /* Check current media state */
795 xircom_media_change(dev);
797 if (xircom_debug > 2) {
798 printk(KERN_DEBUG "%s: Done xircom_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
799 dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
806 xircom_open(struct net_device *dev)
808 struct xircom_private *tp = dev->priv;
810 if (request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ, dev->name, dev))
820 static void xircom_tx_timeout(struct net_device *dev)
822 struct xircom_private *tp = dev->priv;
823 long ioaddr = dev->base_addr;
825 if (media_cap[dev->if_port] & MediaIsMII) {
826 /* Do nothing -- the media monitor should handle this. */
827 if (xircom_debug > 1)
828 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
832 #if defined(way_too_many_messages)
833 if (xircom_debug > 3) {
835 for (i = 0; i < RX_RING_SIZE; i++) {
836 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
838 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
839 "%2.2x %2.2x %2.2x.\n",
840 i, (unsigned int)tp->rx_ring[i].status,
841 (unsigned int)tp->rx_ring[i].length,
842 (unsigned int)tp->rx_ring[i].buffer1,
843 (unsigned int)tp->rx_ring[i].buffer2,
844 buf[0], buf[1], buf[2]);
845 for (j = 0; buf[j] != 0xee && j < 1600; j++)
846 if (j < 100) printk(" %2.2x", buf[j]);
847 printk(" j=%d.\n", j);
849 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
850 for (i = 0; i < RX_RING_SIZE; i++)
851 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
852 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
853 for (i = 0; i < TX_RING_SIZE; i++)
854 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
859 /* Stop and restart the chip's Tx/Rx processes . */
860 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
861 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
862 /* Trigger an immediate transmit demand. */
863 outl(0, ioaddr + CSR1);
865 dev->trans_start = jiffies;
866 netif_wake_queue (dev);
867 tp->stats.tx_errors++;
871 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
872 static void xircom_init_ring(struct net_device *dev)
874 struct xircom_private *tp = dev->priv;
878 tp->cur_rx = tp->cur_tx = 0;
879 tp->dirty_rx = tp->dirty_tx = 0;
881 for (i = 0; i < RX_RING_SIZE; i++) {
882 tp->rx_ring[i].status = 0;
883 tp->rx_ring[i].length = PKT_BUF_SZ;
884 tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]);
885 tp->rx_skbuff[i] = NULL;
887 /* Mark the last entry as wrapping the ring. */
888 tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap;
889 tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]);
891 for (i = 0; i < RX_RING_SIZE; i++) {
892 /* Note the receive buffer must be longword aligned.
893 dev_alloc_skb() provides 16 byte alignment. But do *not*
894 use skb_reserve() to align the IP header! */
895 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
896 tp->rx_skbuff[i] = skb;
899 skb->dev = dev; /* Mark as being used by this device. */
900 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
901 tp->rx_ring[i].buffer1 = virt_to_bus(skb->tail);
903 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
905 /* The Tx buffer descriptor is filled in as needed, but we
906 do need to clear the ownership bit. */
907 for (i = 0; i < TX_RING_SIZE; i++) {
908 tp->tx_skbuff[i] = NULL;
909 tp->tx_ring[i].status = 0;
910 tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
912 if (tp->chip_id == X3201_3)
913 tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
916 tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
921 xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
923 struct xircom_private *tp = dev->priv;
927 /* Caution: the write order is important here, set the base address
928 with the "ownership" bits last. */
930 /* Calculate the next Tx descriptor entry. */
931 entry = tp->cur_tx % TX_RING_SIZE;
933 tp->tx_skbuff[entry] = skb;
935 if (tp->chip_id == X3201_3) {
936 memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len);
937 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
940 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
942 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
943 flag = Tx1WholePkt; /* No interrupt */
944 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
945 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
946 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
947 flag = Tx1WholePkt; /* No Tx-done intr. */
949 /* Leave room for set_rx_mode() to fill entries. */
950 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
953 if (entry == TX_RING_SIZE - 1)
954 flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap;
956 tp->tx_ring[entry].length = skb->len | flag;
957 tp->tx_ring[entry].status = Tx0DescOwned; /* Pass ownership to the chip. */
960 netif_stop_queue (dev);
962 netif_wake_queue (dev);
964 /* Trigger an immediate transmit demand. */
965 outl(0, dev->base_addr + CSR1);
967 dev->trans_start = jiffies;
973 static void xircom_media_change(struct net_device *dev)
975 struct xircom_private *tp = dev->priv;
976 long ioaddr = dev->base_addr;
977 u16 reg0, reg1, reg4, reg5;
978 u32 csr6 = inl(ioaddr + CSR6), newcsr6;
980 /* reset status first */
981 mdio_read(dev, tp->phys[0], MII_BMCR);
982 mdio_read(dev, tp->phys[0], MII_BMSR);
984 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
985 reg1 = mdio_read(dev, tp->phys[0], MII_BMSR);
987 if (reg1 & BMSR_LSTATUS) {
989 if (reg0 & BMCR_ANENABLE) {
990 /* autonegotiation is enabled */
991 reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE);
992 reg5 = mdio_read(dev, tp->phys[0], MII_LPA);
993 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
996 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
999 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1001 tp->full_duplex = 1;
1004 tp->full_duplex = 0;
1007 /* autonegotiation is disabled */
1008 if (reg0 & BMCR_SPEED100)
1012 if (reg0 & BMCR_FULLDPLX)
1013 tp->full_duplex = 1;
1015 tp->full_duplex = 0;
1017 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1019 tp->speed100 ? "100" : "10",
1020 tp->full_duplex ? "full" : "half");
1021 netif_carrier_on(dev);
1022 newcsr6 = csr6 & ~FullDuplexBit;
1023 if (tp->full_duplex)
1024 newcsr6 |= FullDuplexBit;
1025 if (newcsr6 != csr6)
1026 outl_CSR6(newcsr6, ioaddr + CSR6);
1028 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1029 netif_carrier_off(dev);
1034 static void check_duplex(struct net_device *dev)
1036 struct xircom_private *tp = dev->priv;
1039 mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET);
1041 while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET);
1043 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
1044 mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]);
1047 reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX);
1048 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1050 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1052 reg0 |= BMCR_SPEED100;
1053 if (tp->full_duplex)
1054 reg0 |= BMCR_FULLDPLX;
1055 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1057 tp->speed100 ? "100" : "10",
1058 tp->full_duplex ? "full" : "half");
1060 mdio_write(dev, tp->phys[0], MII_BMCR, reg0);
1064 /* The interrupt handler does all of the Rx thread work and cleans up
1065 after the Tx thread. */
1066 static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1068 struct net_device *dev = dev_instance;
1069 struct xircom_private *tp = dev->priv;
1070 long ioaddr = dev->base_addr;
1071 int csr5, work_budget = max_interrupt_work;
1074 spin_lock (&tp->lock);
1077 csr5 = inl(ioaddr + CSR5);
1078 /* Acknowledge all of the current interrupt sources ASAP. */
1079 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
1081 if (xircom_debug > 4)
1082 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
1083 dev->name, csr5, inl(dev->base_addr + CSR5));
1085 if (csr5 == 0xffffffff)
1086 break; /* all bits set, assume PCMCIA card removed */
1088 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
1093 if (csr5 & (RxIntr | RxNoBuf))
1094 work_budget -= xircom_rx(dev);
1096 if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
1097 unsigned int dirty_tx;
1099 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
1101 int entry = dirty_tx % TX_RING_SIZE;
1102 int status = tp->tx_ring[entry].status;
1105 break; /* It still hasn't been Txed */
1106 /* Check for Rx filter setup frames. */
1107 if (tp->tx_skbuff[entry] == NULL)
1110 if (status & Tx0DescError) {
1111 /* There was an major error, log it. */
1112 #ifndef final_version
1113 if (xircom_debug > 1)
1114 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1117 tp->stats.tx_errors++;
1118 if (status & Tx0ManyColl) {
1119 tp->stats.tx_aborted_errors++;
1121 if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++;
1122 if (status & Tx0LateColl) tp->stats.tx_window_errors++;
1123 if (status & Tx0Underflow) tp->stats.tx_fifo_errors++;
1125 tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff;
1126 tp->stats.collisions += (status >> 3) & 15;
1127 tp->stats.tx_packets++;
1130 /* Free the original skb. */
1131 dev_kfree_skb_irq(tp->tx_skbuff[entry]);
1132 tp->tx_skbuff[entry] = NULL;
1135 #ifndef final_version
1136 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
1137 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1138 dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
1139 dirty_tx += TX_RING_SIZE;
1144 tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
1145 /* The ring is no longer full */
1149 netif_stop_queue (dev);
1151 netif_wake_queue (dev);
1153 tp->dirty_tx = dirty_tx;
1154 if (csr5 & TxDied) {
1155 if (xircom_debug > 2)
1156 printk(KERN_WARNING "%s: The transmitter stopped."
1157 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
1158 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
1159 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1160 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1165 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
1166 if (csr5 & LinkChange)
1167 xircom_media_change(dev);
1168 if (csr5 & TxFIFOUnderflow) {
1169 if ((tp->csr6 & TxThreshMask) != TxThreshMask)
1170 tp->csr6 += (1 << TxThreshShift); /* Bump up the Tx threshold */
1172 tp->csr6 |= TxStoreForw; /* Store-n-forward. */
1173 /* Restart the transmit process. */
1174 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1175 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1177 if (csr5 & RxDied) { /* Missed a Rx frame. */
1178 tp->stats.rx_errors++;
1179 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1180 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1182 /* Clear all error sources, included undocumented ones! */
1183 outl(0x0800f7ba, ioaddr + CSR5);
1185 if (--work_budget < 0) {
1186 if (xircom_debug > 1)
1187 printk(KERN_WARNING "%s: Too much work during an interrupt, "
1188 "csr5=0x%8.8x.\n", dev->name, csr5);
1189 /* Acknowledge all interrupt sources. */
1190 outl(0x8001ffff, ioaddr + CSR5);
1195 if (xircom_debug > 3)
1196 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
1197 dev->name, inl(ioaddr + CSR5));
1199 spin_unlock (&tp->lock);
1200 return IRQ_RETVAL(handled);
1205 xircom_rx(struct net_device *dev)
1207 struct xircom_private *tp = dev->priv;
1208 int entry = tp->cur_rx % RX_RING_SIZE;
1209 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
1212 if (xircom_debug > 4)
1213 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1214 tp->rx_ring[entry].status);
1215 /* If we own the next entry, it's a new packet. Send it up. */
1216 while (tp->rx_ring[entry].status >= 0) {
1217 s32 status = tp->rx_ring[entry].status;
1219 if (xircom_debug > 5)
1220 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1221 tp->rx_ring[entry].status);
1222 if (--rx_work_limit < 0)
1224 if ((status & 0x38008300) != 0x0300) {
1225 if ((status & 0x38000300) != 0x0300) {
1226 /* Ignore earlier buffers. */
1227 if ((status & 0xffff) != 0x7fff) {
1228 if (xircom_debug > 1)
1229 printk(KERN_WARNING "%s: Oversized Ethernet frame "
1230 "spanned multiple buffers, status %8.8x!\n",
1232 tp->stats.rx_length_errors++;
1234 } else if (status & Rx0DescError) {
1235 /* There was a fatal error. */
1236 if (xircom_debug > 2)
1237 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1239 tp->stats.rx_errors++; /* end of a packet.*/
1240 if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++;
1241 if (status & Rx0CRCError) tp->stats.rx_crc_errors++;
1244 /* Omit the four octet CRC from the length. */
1245 short pkt_len = ((status >> 16) & 0x7ff) - 4;
1246 struct sk_buff *skb;
1248 #ifndef final_version
1249 if (pkt_len > 1518) {
1250 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
1251 dev->name, pkt_len, pkt_len);
1253 tp->stats.rx_length_errors++;
1256 /* Check if the packet is long enough to accept without copying
1257 to a minimally-sized skbuff. */
1258 if (pkt_len < rx_copybreak
1259 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1261 skb_reserve(skb, 2); /* 16 byte align the IP header */
1262 #if ! defined(__alpha__)
1263 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1265 skb_put(skb, pkt_len);
1267 memcpy(skb_put(skb, pkt_len),
1268 bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);
1271 } else { /* Pass up the skb already on the Rx ring. */
1272 skb_put(skb = tp->rx_skbuff[entry], pkt_len);
1273 tp->rx_skbuff[entry] = NULL;
1275 skb->protocol = eth_type_trans(skb, dev);
1277 dev->last_rx = jiffies;
1278 tp->stats.rx_packets++;
1279 tp->stats.rx_bytes += pkt_len;
1281 entry = (++tp->cur_rx) % RX_RING_SIZE;
1284 /* Refill the Rx ring buffers. */
1285 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
1286 entry = tp->dirty_rx % RX_RING_SIZE;
1287 if (tp->rx_skbuff[entry] == NULL) {
1288 struct sk_buff *skb;
1289 skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
1292 skb->dev = dev; /* Mark as being used by this device. */
1293 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->tail);
1296 tp->rx_ring[entry].status = Rx0DescOwned;
1304 xircom_down(struct net_device *dev)
1306 long ioaddr = dev->base_addr;
1307 struct xircom_private *tp = dev->priv;
1309 /* Disable interrupts by clearing the interrupt mask. */
1310 outl(0, ioaddr + CSR7);
1311 /* Stop the chip's Tx and Rx processes. */
1312 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
1314 if (inl(ioaddr + CSR6) != 0xffffffff)
1315 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1317 dev->if_port = tp->saved_if_port;
1322 xircom_close(struct net_device *dev)
1324 long ioaddr = dev->base_addr;
1325 struct xircom_private *tp = dev->priv;
1328 if (xircom_debug > 1)
1329 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1330 dev->name, inl(ioaddr + CSR5));
1332 netif_stop_queue(dev);
1334 if (netif_device_present(dev))
1337 free_irq(dev->irq, dev);
1339 /* Free all the skbuffs in the Rx queue. */
1340 for (i = 0; i < RX_RING_SIZE; i++) {
1341 struct sk_buff *skb = tp->rx_skbuff[i];
1342 tp->rx_skbuff[i] = NULL;
1343 tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
1344 tp->rx_ring[i].length = 0;
1345 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
1350 for (i = 0; i < TX_RING_SIZE; i++) {
1351 if (tp->tx_skbuff[i])
1352 dev_kfree_skb(tp->tx_skbuff[i]);
1353 tp->tx_skbuff[i] = NULL;
1361 static struct net_device_stats *xircom_get_stats(struct net_device *dev)
1363 struct xircom_private *tp = dev->priv;
1364 long ioaddr = dev->base_addr;
1366 if (netif_device_present(dev))
1367 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1373 static int xircom_ethtool_ioctl(struct net_device *dev, void __user *useraddr)
1375 struct ethtool_cmd ecmd;
1376 struct xircom_private *tp = dev->priv;
1378 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1384 SUPPORTED_10baseT_Half |
1385 SUPPORTED_10baseT_Full |
1386 SUPPORTED_100baseT_Half |
1387 SUPPORTED_100baseT_Full |
1391 ecmd.advertising = ADVERTISED_MII;
1392 if (tp->advertising[0] & ADVERTISE_10HALF)
1393 ecmd.advertising |= ADVERTISED_10baseT_Half;
1394 if (tp->advertising[0] & ADVERTISE_10FULL)
1395 ecmd.advertising |= ADVERTISED_10baseT_Full;
1396 if (tp->advertising[0] & ADVERTISE_100HALF)
1397 ecmd.advertising |= ADVERTISED_100baseT_Half;
1398 if (tp->advertising[0] & ADVERTISE_100FULL)
1399 ecmd.advertising |= ADVERTISED_100baseT_Full;
1401 ecmd.advertising |= ADVERTISED_Autoneg;
1402 ecmd.autoneg = AUTONEG_ENABLE;
1404 ecmd.autoneg = AUTONEG_DISABLE;
1406 ecmd.port = PORT_MII;
1407 ecmd.transceiver = XCVR_INTERNAL;
1408 ecmd.phy_address = tp->phys[0];
1409 ecmd.speed = tp->speed100 ? SPEED_100 : SPEED_10;
1410 ecmd.duplex = tp->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1411 ecmd.maxtxpkt = TX_RING_SIZE / 2;
1414 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1418 case ETHTOOL_SSET: {
1419 u16 autoneg, speed100, full_duplex;
1421 autoneg = (ecmd.autoneg == AUTONEG_ENABLE);
1422 speed100 = (ecmd.speed == SPEED_100);
1423 full_duplex = (ecmd.duplex == DUPLEX_FULL);
1425 tp->autoneg = autoneg;
1426 if (speed100 != tp->speed100 ||
1427 full_duplex != tp->full_duplex) {
1428 tp->speed100 = speed100;
1429 tp->full_duplex = full_duplex;
1430 /* change advertising bits */
1431 tp->advertising[0] &= ~(ADVERTISE_10HALF |
1435 ADVERTISE_100BASE4);
1438 tp->advertising[0] |= ADVERTISE_100FULL;
1440 tp->advertising[0] |= ADVERTISE_100HALF;
1443 tp->advertising[0] |= ADVERTISE_10FULL;
1445 tp->advertising[0] |= ADVERTISE_10HALF;
1452 case ETHTOOL_GDRVINFO: {
1453 struct ethtool_drvinfo info;
1454 memset(&info, 0, sizeof(info));
1455 info.cmd = ecmd.cmd;
1456 strcpy(info.driver, DRV_NAME);
1457 strcpy(info.version, DRV_VERSION);
1458 *info.fw_version = 0;
1459 strcpy(info.bus_info, pci_name(tp->pdev));
1460 if (copy_to_user(useraddr, &info, sizeof(info)))
1471 /* Provide ioctl() calls to examine the MII xcvr state. */
1472 static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1474 struct xircom_private *tp = dev->priv;
1475 u16 *data = (u16 *)&rq->ifr_ifru;
1476 int phy = tp->phys[0] & 0x1f;
1477 unsigned long flags;
1481 return xircom_ethtool_ioctl(dev, rq->ifr_data);
1483 /* Legacy mii-diag interface */
1484 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1490 case SIOCGMIIREG: /* Read MII PHY register. */
1493 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1494 restore_flags(flags);
1496 case SIOCSMIIREG: /* Write MII PHY register. */
1497 if (!capable(CAP_NET_ADMIN))
1501 if (data[0] == tp->phys[0]) {
1502 u16 value = data[2];
1505 if (value & (BMCR_RESET | BMCR_ANENABLE))
1506 /* Autonegotiation. */
1509 tp->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
1514 tp->advertising[0] = value;
1519 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1520 restore_flags(flags);
1529 /* Set or clear the multicast filter for this adaptor.
1530 Note that we only use exclusion around actually queueing the
1531 new frame, not around filling tp->setup_frame. This is non-deterministic
1532 when re-entered but still correct. */
1533 static void set_rx_mode(struct net_device *dev)
1535 struct xircom_private *tp = dev->priv;
1536 struct dev_mc_list *mclist;
1537 long ioaddr = dev->base_addr;
1538 int csr6 = inl(ioaddr + CSR6);
1539 u16 *eaddrs, *setup_frm;
1543 tp->csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1544 csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1545 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1546 tp->csr6 |= PromiscBit;
1551 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1552 /* Too many to filter well -- accept all multicasts. */
1553 tp->csr6 |= AllMultiBit;
1554 csr6 |= AllMultiBit;
1558 tx_flags = Tx1WholePkt | Tx1SetupPkt | PKT_SETUP_SZ;
1560 /* Note that only the low-address shortword of setup_frame is valid! */
1561 setup_frm = tp->setup_frame;
1562 mclist = dev->mc_list;
1564 /* Fill the first entry with our physical address. */
1565 eaddrs = (u16 *)dev->dev_addr;
1566 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1567 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1568 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1570 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1571 u32 *hash_table = (u32 *)(tp->setup_frame + 4 * 12);
1574 tx_flags |= Tx1HashSetup;
1575 tp->csr6 |= HashFilterBit;
1576 csr6 |= HashFilterBit;
1578 /* Fill the unused 3 entries with the broadcast address.
1579 At least one entry *must* contain the broadcast address!!!*/
1580 for (i = 0; i < 3; i++) {
1581 *setup_frm = 0xffff; setup_frm += 2;
1582 *setup_frm = 0xffff; setup_frm += 2;
1583 *setup_frm = 0xffff; setup_frm += 2;
1586 /* Truly brain-damaged hash filter layout */
1587 /* XXX: not sure if I should take the last or the first 9 bits */
1588 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1590 hash = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
1592 hash2 = hash + ((hash >> 4) << 4) +
1596 hash2 = 64 + hash + (hash >> 4) * 80;
1598 hptr = &hash_table[hash2 & ~0x1f];
1599 *hptr |= cpu_to_le32(1 << (hash2 & 0x1f));
1602 /* We have <= 14 mcast addresses so we can use Xircom's
1603 wonderful 16-address perfect filter. */
1604 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1605 eaddrs = (u16 *)mclist->dmi_addr;
1606 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1607 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1608 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1610 /* Fill the unused entries with the broadcast address.
1611 At least one entry *must* contain the broadcast address!!!*/
1612 for (; i < 15; i++) {
1613 *setup_frm = 0xffff; setup_frm += 2;
1614 *setup_frm = 0xffff; setup_frm += 2;
1615 *setup_frm = 0xffff; setup_frm += 2;
1619 /* Now add this frame to the Tx list. */
1620 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1621 /* Same setup recently queued, we need not add it. */
1622 /* XXX: Huh? All it means is that the Tx list is full...*/
1624 unsigned long flags;
1628 save_flags(flags); cli();
1629 entry = tp->cur_tx++ % TX_RING_SIZE;
1632 /* Avoid a chip errata by prefixing a dummy entry. */
1633 tp->tx_skbuff[entry] = NULL;
1634 tp->tx_ring[entry].length =
1635 (entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
1636 tp->tx_ring[entry].buffer1 = 0;
1637 /* race with chip, set Tx0DescOwned later */
1639 entry = tp->cur_tx++ % TX_RING_SIZE;
1642 tp->tx_skbuff[entry] = NULL;
1643 /* Put the setup frame on the Tx list. */
1644 if (entry == TX_RING_SIZE - 1)
1645 tx_flags |= Tx1RingWrap; /* Wrap ring. */
1646 tp->tx_ring[entry].length = tx_flags;
1647 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame);
1648 tp->tx_ring[entry].status = Tx0DescOwned;
1649 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
1651 netif_stop_queue (dev);
1654 tp->tx_ring[dummy].status = Tx0DescOwned;
1655 restore_flags(flags);
1656 /* Trigger an immediate transmit demand. */
1657 outl(0, ioaddr + CSR1);
1661 outl_CSR6(csr6, ioaddr);
1665 static struct pci_device_id xircom_pci_table[] = {
1666 { 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
1669 MODULE_DEVICE_TABLE(pci, xircom_pci_table);
1673 static int xircom_suspend(struct pci_dev *pdev, u32 state)
1675 struct net_device *dev = pci_get_drvdata(pdev);
1676 struct xircom_private *tp = dev->priv;
1677 printk(KERN_INFO "xircom_suspend(%s)\n", dev->name);
1681 pci_save_state(pdev, tp->pci_state);
1682 pci_disable_device(pdev);
1683 pci_set_power_state(pdev, 3);
1689 static int xircom_resume(struct pci_dev *pdev)
1691 struct net_device *dev = pci_get_drvdata(pdev);
1692 struct xircom_private *tp = dev->priv;
1693 printk(KERN_INFO "xircom_resume(%s)\n", dev->name);
1695 pci_set_power_state(pdev,0);
1696 pci_enable_device(pdev);
1697 pci_restore_state(pdev, tp->pci_state);
1699 /* Bring the chip out of sleep mode.
1700 Caution: Snooze mode does not work with some boards! */
1701 if (xircom_tbl[tp->chip_id].flags & HAS_ACPI)
1702 pci_write_config_dword(tp->pdev, PCI_POWERMGMT, 0);
1704 transceiver_voodoo(dev);
1705 if (xircom_tbl[tp->chip_id].flags & HAS_MII)
1712 #endif /* CONFIG_PM */
1715 static void __devexit xircom_remove_one(struct pci_dev *pdev)
1717 struct net_device *dev = pci_get_drvdata(pdev);
1719 printk(KERN_INFO "xircom_remove_one(%s)\n", dev->name);
1720 unregister_netdev(dev);
1721 pci_release_regions(pdev);
1723 pci_set_drvdata(pdev, NULL);
1727 static struct pci_driver xircom_driver = {
1729 .id_table = xircom_pci_table,
1730 .probe = xircom_init_one,
1731 .remove = __devexit_p(xircom_remove_one),
1733 .suspend = xircom_suspend,
1734 .resume = xircom_resume
1735 #endif /* CONFIG_PM */
1739 static int __init xircom_init(void)
1741 /* when a module, this is printed whether or not devices are found in probe */
1745 return pci_module_init(&xircom_driver);
1749 static void __exit xircom_exit(void)
1751 pci_unregister_driver(&xircom_driver);
1754 module_init(xircom_init)
1755 module_exit(xircom_exit)