1 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
3 Written 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
13 It also supports the Symbios Logic version of the same chip core.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
20 Support and updates available at
21 http://www.scyld.com/network/yellowfin.html
24 Linux kernel changelog:
25 -----------------------
27 LK1.1.1 (jgarzik): Port to 2.4 kernel
30 * Merge in becker version 1.05
34 * Update yellowfin_timer to correctly calculate duplex.
35 (suggested by Manfred Spraul)
37 LK1.1.4 (val@nmt.edu):
38 * Fix three endian-ness bugs
39 * Support dual function SYM53C885E ethernet chip
41 LK1.1.5 (val@nmt.edu):
42 * Fix forced full-duplex bug I introduced
44 LK1.1.6 (val@nmt.edu):
45 * Only print warning on truly "oversized" packets
46 * Fix theoretical bug on gigabit cards - return to 1.1.3 behavior
50 #define DRV_NAME "yellowfin"
51 #define DRV_VERSION "1.05+LK1.1.6"
52 #define DRV_RELDATE "Feb 11, 2002"
54 #define PFX DRV_NAME ": "
56 /* The user-configurable values.
57 These may be modified when a driver module is loaded.*/
59 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
60 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
61 static int max_interrupt_work = 20;
63 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
64 /* System-wide count of bogus-rx frames. */
66 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
67 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
68 #elif YF_NEW /* A future perfect board :->. */
69 static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
70 static int fifo_cfg = 0x0028;
72 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
73 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
76 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
77 Setting to > 1514 effectively disables this feature. */
78 static int rx_copybreak;
80 /* Used to pass the media type, etc.
81 No media types are currently defined. These exist for driver
84 #define MAX_UNITS 8 /* More are supported, limit only on options */
85 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
88 /* Do ugly workaround for GX server chipset errata. */
91 /* Operational parameters that are set at compile time. */
93 /* Keep the ring sizes a power of two for efficiency.
94 Making the Tx ring too long decreases the effectiveness of channel
95 bonding and packet priority.
96 There are no ill effects from too-large receive rings. */
97 #define TX_RING_SIZE 16
98 #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
99 #define RX_RING_SIZE 64
100 #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
101 #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
102 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
104 /* Operational parameters that usually are not changed. */
105 /* Time in jiffies before concluding the transmitter is hung. */
106 #define TX_TIMEOUT (2*HZ)
107 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
109 #define yellowfin_debug debug
111 #if !defined(__OPTIMIZE__)
112 #warning You must compile this file with the correct options!
113 #warning See the last lines of the source file.
114 #error You must compile this driver with "-O".
117 #include <linux/module.h>
118 #include <linux/kernel.h>
119 #include <linux/string.h>
120 #include <linux/timer.h>
121 #include <linux/errno.h>
122 #include <linux/ioport.h>
123 #include <linux/slab.h>
124 #include <linux/interrupt.h>
125 #include <linux/pci.h>
126 #include <linux/init.h>
127 #include <linux/mii.h>
128 #include <linux/netdevice.h>
129 #include <linux/etherdevice.h>
130 #include <linux/skbuff.h>
131 #include <linux/ethtool.h>
132 #include <linux/crc32.h>
133 #include <asm/uaccess.h>
134 #include <asm/processor.h> /* Processor type for cache alignment. */
135 #include <asm/unaligned.h>
136 #include <asm/bitops.h>
139 /* These identify the driver base version and may not be removed. */
140 static char version[] __devinitdata =
141 KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
142 KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
143 KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
158 #endif /* !USE_IO_OPS */
159 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
160 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
161 MODULE_LICENSE("GPL");
163 MODULE_PARM(max_interrupt_work, "i");
164 MODULE_PARM(mtu, "i");
165 MODULE_PARM(debug, "i");
166 MODULE_PARM(rx_copybreak, "i");
167 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
168 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
169 MODULE_PARM(gx_fix, "i");
170 MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
171 MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
172 MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
173 MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
174 MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
175 MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
176 MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
181 I. Board Compatibility
183 This device driver is designed for the Packet Engines "Yellowfin" Gigabit
184 Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the
185 Symbios 53C885E dual function chip.
187 II. Board-specific settings
189 PCI bus devices are configured by the system at boot time, so no jumpers
190 need to be set on the board. The system BIOS preferably should assign the
191 PCI INTA signal to an otherwise unused system IRQ line.
192 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
195 III. Driver operation
199 The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
200 This is a descriptor list scheme similar to that used by the EEPro100 and
201 Tulip. This driver uses two statically allocated fixed-size descriptor lists
202 formed into rings by a branch from the final descriptor to the beginning of
203 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
205 The driver allocates full frame size skbuffs for the Rx ring buffers at
206 open() time and passes the skb->data field to the Yellowfin as receive data
207 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
208 a fresh skbuff is allocated and the frame is copied to the new skbuff.
209 When the incoming frame is larger, the skbuff is passed directly up the
210 protocol stack and replaced by a newly allocated skbuff.
212 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
213 using a full-sized skbuff for small frames vs. the copying costs of larger
214 frames. For small frames the copying cost is negligible (esp. considering
215 that we are pre-loading the cache with immediately useful header
216 information). For large frames the copying cost is non-trivial, and the
217 larger copy might flush the cache of useful data.
219 IIIC. Synchronization
221 The driver runs as two independent, single-threaded flows of control. One
222 is the send-packet routine, which enforces single-threaded use by the
223 dev->tbusy flag. The other thread is the interrupt handler, which is single
224 threaded by the hardware and other software.
226 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
227 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
228 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
229 the 'yp->tx_full' flag.
231 The interrupt handler has exclusive control over the Rx ring and records stats
232 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
233 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
234 clears both the tx_full and tbusy flags.
238 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
239 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
240 and an AlphaStation to verifty the Alpha port!
244 Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
245 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
247 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
248 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
252 See Packet Engines confidential appendix (prototype chips only).
257 enum pci_id_flags_bits {
258 /* Set PCI command register bits before calling probe1(). */
259 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
260 /* Read and map the single following PCI BAR. */
261 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
262 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
263 PCI_UNUSED_IRQ=0x800,
265 enum capability_flags {
266 HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
267 HasMACAddrBug=32, /* Only on early revs. */
268 DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
270 /* The PCI I/O space extent. */
271 #define YELLOWFIN_SIZE 0x100
273 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
275 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
281 int pci, pci_mask, subsystem, subsystem_mask;
282 int revision, revision_mask; /* Only 8 bits. */
284 enum pci_id_flags_bits pci_flags;
285 int io_size; /* Needed for I/O region check or ioremap(). */
286 int drv_flags; /* Driver use, intended as capability flags. */
289 static struct pci_id_info pci_id_tbl[] = {
290 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
291 PCI_IOTYPE, YELLOWFIN_SIZE,
292 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
293 {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
294 PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | DontUseEeprom },
298 static struct pci_device_id yellowfin_pci_tbl[] = {
299 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
300 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
303 MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
306 /* Offsets to the Yellowfin registers. Various sizes and alignments. */
307 enum yellowfin_offsets {
308 TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
309 TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
310 RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
311 RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
312 EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
313 ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
314 Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
315 MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
317 RxDepth=0xB8, FlowCtrl=0xBC,
318 AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
319 EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
323 /* The Yellowfin Rx and Tx buffer descriptors.
324 Elements are written as 32 bit for endian portability. */
325 struct yellowfin_desc {
332 struct tx_status_words {
338 #else /* Little endian chips. */
343 #endif /* __BIG_ENDIAN */
346 /* Bits in yellowfin_desc.cmd */
348 CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
349 CMD_NOP=0x60000000, CMD_STOP=0x70000000,
350 BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
351 BRANCH_IFTRUE=0x040000,
354 /* Bits in yellowfin_desc.status */
355 enum desc_status_bits { RX_EOP=0x0040, };
357 /* Bits in the interrupt status/mask registers. */
358 enum intr_status_bits {
359 IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
360 IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
361 IntrEarlyRx=0x100, IntrWakeup=0x200, };
363 #define PRIV_ALIGN 31 /* Required alignment mask */
365 struct yellowfin_private {
366 /* Descriptor rings first for alignment.
367 Tx requires a second descriptor for status. */
368 struct yellowfin_desc *rx_ring;
369 struct yellowfin_desc *tx_ring;
370 struct sk_buff* rx_skbuff[RX_RING_SIZE];
371 struct sk_buff* tx_skbuff[TX_RING_SIZE];
372 dma_addr_t rx_ring_dma;
373 dma_addr_t tx_ring_dma;
375 struct tx_status_words *tx_status;
376 dma_addr_t tx_status_dma;
378 struct timer_list timer; /* Media selection timer. */
379 struct net_device_stats stats;
380 /* Frequently used and paired value: keep adjacent for cache effect. */
381 int chip_id, drv_flags;
382 struct pci_dev *pci_dev;
383 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
384 unsigned int rx_buf_sz; /* Based on MTU+slack. */
385 struct tx_status_words *tx_tail_desc;
386 unsigned int cur_tx, dirty_tx;
388 unsigned int tx_full:1; /* The Tx queue is full. */
389 unsigned int full_duplex:1; /* Full-duplex operation requested. */
390 unsigned int duplex_lock:1;
391 unsigned int medialock:1; /* Do not sense media. */
392 unsigned int default_port:4; /* Last dev->if_port value. */
393 /* MII transceiver section. */
394 int mii_cnt; /* MII device addresses. */
395 u16 advertising; /* NWay media advertisement */
396 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */
400 static int read_eeprom(long ioaddr, int location);
401 static int mdio_read(long ioaddr, int phy_id, int location);
402 static void mdio_write(long ioaddr, int phy_id, int location, int value);
403 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
404 static int yellowfin_open(struct net_device *dev);
405 static void yellowfin_timer(unsigned long data);
406 static void yellowfin_tx_timeout(struct net_device *dev);
407 static void yellowfin_init_ring(struct net_device *dev);
408 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
409 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
410 static int yellowfin_rx(struct net_device *dev);
411 static void yellowfin_error(struct net_device *dev, int intr_status);
412 static int yellowfin_close(struct net_device *dev);
413 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev);
414 static void set_rx_mode(struct net_device *dev);
417 static int __devinit yellowfin_init_one(struct pci_dev *pdev,
418 const struct pci_device_id *ent)
420 struct net_device *dev;
421 struct yellowfin_private *np;
423 int chip_idx = ent->driver_data;
425 long ioaddr, real_ioaddr;
426 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
427 int drv_flags = pci_id_tbl[chip_idx].drv_flags;
431 /* when built into the kernel, we only print version if device is found */
433 static int printed_version;
434 if (!printed_version++)
438 i = pci_enable_device(pdev);
441 dev = alloc_etherdev(sizeof(*np));
443 printk (KERN_ERR PFX "cannot allocate ethernet device\n");
446 SET_MODULE_OWNER(dev);
447 SET_NETDEV_DEV(dev, &pdev->dev);
451 if (pci_request_regions(pdev, dev->name))
452 goto err_out_free_netdev;
454 pci_set_master (pdev);
457 real_ioaddr = ioaddr = pci_resource_start (pdev, 0);
459 real_ioaddr = ioaddr = pci_resource_start (pdev, 1);
460 ioaddr = (long) ioremap(ioaddr, YELLOWFIN_SIZE);
462 goto err_out_free_res;
466 if (drv_flags & DontUseEeprom)
467 for (i = 0; i < 6; i++)
468 dev->dev_addr[i] = inb(ioaddr + StnAddr + i);
470 int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
471 for (i = 0; i < 6; i++)
472 dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
475 /* Reset the chip. */
476 outl(0x80000000, ioaddr + DMACtrl);
478 dev->base_addr = ioaddr;
481 pci_set_drvdata(pdev, dev);
482 spin_lock_init(&np->lock);
485 np->chip_id = chip_idx;
486 np->drv_flags = drv_flags;
488 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
490 goto err_out_cleardev;
491 np->tx_ring = (struct yellowfin_desc *)ring_space;
492 np->tx_ring_dma = ring_dma;
494 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
496 goto err_out_unmap_tx;
497 np->rx_ring = (struct yellowfin_desc *)ring_space;
498 np->rx_ring_dma = ring_dma;
500 ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
502 goto err_out_unmap_rx;
503 np->tx_status = (struct tx_status_words *)ring_space;
504 np->tx_status_dma = ring_dma;
507 option = dev->mem_start;
509 /* The lower four bits are the media type. */
513 np->default_port = option & 15;
514 if (np->default_port)
517 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
523 /* The Yellowfin-specific entries in the device structure. */
524 dev->open = &yellowfin_open;
525 dev->hard_start_xmit = &yellowfin_start_xmit;
526 dev->stop = &yellowfin_close;
527 dev->get_stats = &yellowfin_get_stats;
528 dev->set_multicast_list = &set_rx_mode;
529 dev->do_ioctl = &netdev_ioctl;
530 dev->tx_timeout = yellowfin_tx_timeout;
531 dev->watchdog_timeo = TX_TIMEOUT;
536 i = register_netdev(dev);
538 goto err_out_unmap_status;
540 printk(KERN_INFO "%s: %s type %8x at 0x%lx, ",
541 dev->name, pci_id_tbl[chip_idx].name, inl(ioaddr + ChipRev), ioaddr);
542 for (i = 0; i < 5; i++)
543 printk("%2.2x:", dev->dev_addr[i]);
544 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
546 if (np->drv_flags & HasMII) {
547 int phy, phy_idx = 0;
548 for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
549 int mii_status = mdio_read(ioaddr, phy, 1);
550 if (mii_status != 0xffff && mii_status != 0x0000) {
551 np->phys[phy_idx++] = phy;
552 np->advertising = mdio_read(ioaddr, phy, 4);
553 printk(KERN_INFO "%s: MII PHY found at address %d, status "
554 "0x%4.4x advertising %4.4x.\n",
555 dev->name, phy, mii_status, np->advertising);
558 np->mii_cnt = phy_idx;
565 err_out_unmap_status:
566 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
569 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
571 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
573 pci_set_drvdata(pdev, NULL);
575 iounmap((void *)ioaddr);
578 pci_release_regions(pdev);
584 static int __devinit read_eeprom(long ioaddr, int location)
586 int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
588 outb(location, ioaddr + EEAddr);
589 outb(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
590 while ((inb(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
592 return inb(ioaddr + EERead);
595 /* MII Managemen Data I/O accesses.
596 These routines assume the MDIO controller is idle, and do not exit until
597 the command is finished. */
599 static int mdio_read(long ioaddr, int phy_id, int location)
603 outw((phy_id<<8) + location, ioaddr + MII_Addr);
604 outw(1, ioaddr + MII_Cmd);
605 for (i = 10000; i >= 0; i--)
606 if ((inw(ioaddr + MII_Status) & 1) == 0)
608 return inw(ioaddr + MII_Rd_Data);
611 static void mdio_write(long ioaddr, int phy_id, int location, int value)
615 outw((phy_id<<8) + location, ioaddr + MII_Addr);
616 outw(value, ioaddr + MII_Wr_Data);
618 /* Wait for the command to finish. */
619 for (i = 10000; i >= 0; i--)
620 if ((inw(ioaddr + MII_Status) & 1) == 0)
626 static int yellowfin_open(struct net_device *dev)
628 struct yellowfin_private *yp = dev->priv;
629 long ioaddr = dev->base_addr;
632 /* Reset the chip. */
633 outl(0x80000000, ioaddr + DMACtrl);
635 i = request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev);
638 if (yellowfin_debug > 1)
639 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
640 dev->name, dev->irq);
642 yellowfin_init_ring(dev);
644 outl(yp->rx_ring_dma, ioaddr + RxPtr);
645 outl(yp->tx_ring_dma, ioaddr + TxPtr);
647 for (i = 0; i < 6; i++)
648 outb(dev->dev_addr[i], ioaddr + StnAddr + i);
650 /* Set up various condition 'select' registers.
651 There are no options here. */
652 outl(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
653 outl(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
654 outl(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
655 outl(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
656 outl(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
657 outl(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
659 /* Initialize other registers: with so many this eventually this will
660 converted to an offset/value list. */
661 outl(dma_ctrl, ioaddr + DMACtrl);
662 outw(fifo_cfg, ioaddr + FIFOcfg);
663 /* Enable automatic generation of flow control frames, period 0xffff. */
664 outl(0x0030FFFF, ioaddr + FlowCtrl);
666 yp->tx_threshold = 32;
667 outl(yp->tx_threshold, ioaddr + TxThreshold);
669 if (dev->if_port == 0)
670 dev->if_port = yp->default_port;
672 netif_start_queue(dev);
674 /* Setting the Rx mode will start the Rx process. */
675 if (yp->drv_flags & IsGigabit) {
676 /* We are always in full-duplex mode with gigabit! */
678 outw(0x01CF, ioaddr + Cnfg);
680 outw(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
681 outw(0x1018, ioaddr + FrameGap1);
682 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
686 /* Enable interrupts by setting the interrupt mask. */
687 outw(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
688 outw(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
689 outl(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
690 outl(0x80008000, ioaddr + TxCtrl);
692 if (yellowfin_debug > 2) {
693 printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
697 /* Set the timer to check for link beat. */
698 init_timer(&yp->timer);
699 yp->timer.expires = jiffies + 3*HZ;
700 yp->timer.data = (unsigned long)dev;
701 yp->timer.function = &yellowfin_timer; /* timer handler */
702 add_timer(&yp->timer);
707 static void yellowfin_timer(unsigned long data)
709 struct net_device *dev = (struct net_device *)data;
710 struct yellowfin_private *yp = dev->priv;
711 long ioaddr = dev->base_addr;
712 int next_tick = 60*HZ;
714 if (yellowfin_debug > 3) {
715 printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
716 dev->name, inw(ioaddr + IntrStatus));
720 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
721 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
722 int negotiated = lpa & yp->advertising;
723 if (yellowfin_debug > 1)
724 printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
725 "link partner capability %4.4x.\n",
726 dev->name, yp->phys[0], bmsr, lpa);
728 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
730 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
732 if (bmsr & BMSR_LSTATUS)
738 yp->timer.expires = jiffies + next_tick;
739 add_timer(&yp->timer);
742 static void yellowfin_tx_timeout(struct net_device *dev)
744 struct yellowfin_private *yp = dev->priv;
745 long ioaddr = dev->base_addr;
747 printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
748 "status %4.4x, Rx status %4.4x, resetting...\n",
749 dev->name, yp->cur_tx, yp->dirty_tx,
750 inl(ioaddr + TxStatus), inl(ioaddr + RxStatus));
752 /* Note: these should be KERN_DEBUG. */
753 if (yellowfin_debug) {
755 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
756 for (i = 0; i < RX_RING_SIZE; i++)
757 printk(" %8.8x", yp->rx_ring[i].result_status);
758 printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring);
759 for (i = 0; i < TX_RING_SIZE; i++)
760 printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
761 yp->tx_ring[i].result_status);
765 /* If the hardware is found to hang regularly, we will update the code
766 to reinitialize the chip here. */
769 /* Wake the potentially-idle transmit channel. */
770 outl(0x10001000, dev->base_addr + TxCtrl);
771 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
772 netif_wake_queue (dev); /* Typical path */
774 dev->trans_start = jiffies;
775 yp->stats.tx_errors++;
778 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
779 static void yellowfin_init_ring(struct net_device *dev)
781 struct yellowfin_private *yp = dev->priv;
785 yp->cur_rx = yp->cur_tx = 0;
788 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
790 for (i = 0; i < RX_RING_SIZE; i++) {
791 yp->rx_ring[i].dbdma_cmd =
792 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
793 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
794 ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
797 for (i = 0; i < RX_RING_SIZE; i++) {
798 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
799 yp->rx_skbuff[i] = skb;
802 skb->dev = dev; /* Mark as being used by this device. */
803 skb_reserve(skb, 2); /* 16 byte align the IP header. */
804 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
805 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
807 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
808 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
812 /* In this mode the Tx ring needs only a single descriptor. */
813 for (i = 0; i < TX_RING_SIZE; i++) {
814 yp->tx_skbuff[i] = 0;
815 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
816 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
817 ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
820 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
825 /* Tx ring needs a pair of descriptors, the second for the status. */
826 for (i = 0; i < TX_RING_SIZE; i++) {
828 yp->tx_skbuff[i] = 0;
829 /* Branch on Tx error. */
830 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
831 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
832 (j+1)*sizeof(struct yellowfin_desc);
834 if (yp->flags & FullTxStatus) {
835 yp->tx_ring[j].dbdma_cmd =
836 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
837 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
838 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
839 i*sizeof(struct tx_status_words);
841 /* Symbios chips write only tx_errs word. */
842 yp->tx_ring[j].dbdma_cmd =
843 cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
844 yp->tx_ring[j].request_cnt = 2;
845 /* Om pade ummmmm... */
846 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
847 i*sizeof(struct tx_status_words) +
848 &(yp->tx_status[0].tx_errs) -
849 &(yp->tx_status[0]));
851 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
852 ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
855 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
858 yp->tx_tail_desc = &yp->tx_status[0];
862 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
864 struct yellowfin_private *yp = dev->priv;
868 netif_stop_queue (dev);
870 /* Note: Ordering is important here, set the field with the
871 "ownership" bit last, and only then increment cur_tx. */
873 /* Calculate the next Tx descriptor entry. */
874 entry = yp->cur_tx % TX_RING_SIZE;
876 if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
877 int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
878 /* Fix GX chipset errata. */
879 if (cacheline_end > 24 || cacheline_end == 0) {
880 len = skb->len + 32 - cacheline_end + 1;
882 skb = skb_padto(skb, len);
885 yp->tx_skbuff[entry] = NULL;
886 netif_wake_queue(dev);
890 yp->tx_skbuff[entry] = skb;
893 yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
894 skb->data, len, PCI_DMA_TODEVICE));
895 yp->tx_ring[entry].result_status = 0;
896 if (entry >= TX_RING_SIZE-1) {
897 /* New stop command. */
898 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
899 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
900 cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
902 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
903 yp->tx_ring[entry].dbdma_cmd =
904 cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
908 yp->tx_ring[entry<<1].request_cnt = len;
909 yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
910 skb->data, len, PCI_DMA_TODEVICE));
911 /* The input_last (status-write) command is constant, but we must
912 rewrite the subsequent 'stop' command. */
916 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
917 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
919 /* Final step -- overwrite the old 'stop' command. */
921 yp->tx_ring[entry<<1].dbdma_cmd =
922 cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
923 CMD_TX_PKT | BRANCH_IFTRUE) | len);
926 /* Non-x86 Todo: explicitly flush cache lines here. */
928 /* Wake the potentially-idle transmit channel. */
929 outl(0x10001000, dev->base_addr + TxCtrl);
931 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
932 netif_start_queue (dev); /* Typical path */
935 dev->trans_start = jiffies;
937 if (yellowfin_debug > 4) {
938 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
939 dev->name, yp->cur_tx, entry);
944 /* The interrupt handler does all of the Rx thread work and cleans up
945 after the Tx thread. */
946 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
948 struct net_device *dev = dev_instance;
949 struct yellowfin_private *yp;
951 int boguscnt = max_interrupt_work;
952 unsigned int handled = 0;
954 #ifndef final_version /* Can never occur. */
956 printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
961 ioaddr = dev->base_addr;
964 spin_lock (&yp->lock);
967 u16 intr_status = inw(ioaddr + IntrClear);
969 if (yellowfin_debug > 4)
970 printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
971 dev->name, intr_status);
973 if (intr_status == 0)
977 if (intr_status & (IntrRxDone | IntrEarlyRx)) {
979 outl(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
983 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
984 int entry = yp->dirty_tx % TX_RING_SIZE;
987 if (yp->tx_ring[entry].result_status == 0)
989 skb = yp->tx_skbuff[entry];
990 yp->stats.tx_packets++;
991 yp->stats.tx_bytes += skb->len;
992 /* Free the original skb. */
993 pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr,
994 skb->len, PCI_DMA_TODEVICE);
995 dev_kfree_skb_irq(skb);
996 yp->tx_skbuff[entry] = 0;
999 && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
1000 /* The ring is no longer full, clear tbusy. */
1002 netif_wake_queue(dev);
1005 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
1006 unsigned dirty_tx = yp->dirty_tx;
1008 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
1010 /* Todo: optimize this. */
1011 int entry = dirty_tx % TX_RING_SIZE;
1012 u16 tx_errs = yp->tx_status[entry].tx_errs;
1013 struct sk_buff *skb;
1015 #ifndef final_version
1016 if (yellowfin_debug > 5)
1017 printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
1018 "%4.4x %4.4x %4.4x %4.4x.\n",
1020 yp->tx_status[entry].tx_cnt,
1021 yp->tx_status[entry].tx_errs,
1022 yp->tx_status[entry].total_tx_cnt,
1023 yp->tx_status[entry].paused);
1026 break; /* It still hasn't been Txed */
1027 skb = yp->tx_skbuff[entry];
1028 if (tx_errs & 0xF810) {
1029 /* There was an major error, log it. */
1030 #ifndef final_version
1031 if (yellowfin_debug > 1)
1032 printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
1033 dev->name, tx_errs);
1035 yp->stats.tx_errors++;
1036 if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
1037 if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
1038 if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
1039 if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
1041 #ifndef final_version
1042 if (yellowfin_debug > 4)
1043 printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
1044 dev->name, tx_errs);
1046 yp->stats.tx_bytes += skb->len;
1047 yp->stats.collisions += tx_errs & 15;
1048 yp->stats.tx_packets++;
1050 /* Free the original skb. */
1051 pci_unmap_single(yp->pci_dev,
1052 yp->tx_ring[entry<<1].addr, skb->len,
1054 dev_kfree_skb_irq(skb);
1055 yp->tx_skbuff[entry] = 0;
1056 /* Mark status as empty. */
1057 yp->tx_status[entry].tx_errs = 0;
1060 #ifndef final_version
1061 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1062 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1063 dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
1064 dirty_tx += TX_RING_SIZE;
1069 && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1070 /* The ring is no longer full, clear tbusy. */
1072 netif_wake_queue(dev);
1075 yp->dirty_tx = dirty_tx;
1076 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1080 /* Log errors and other uncommon events. */
1081 if (intr_status & 0x2ee) /* Abnormal error summary. */
1082 yellowfin_error(dev, intr_status);
1084 if (--boguscnt < 0) {
1085 printk(KERN_WARNING "%s: Too much work at interrupt, "
1086 "status=0x%4.4x.\n",
1087 dev->name, intr_status);
1092 if (yellowfin_debug > 3)
1093 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1094 dev->name, inw(ioaddr + IntrStatus));
1096 spin_unlock (&yp->lock);
1097 return IRQ_RETVAL(handled);
1100 /* This routine is logically part of the interrupt handler, but separated
1101 for clarity and better register allocation. */
1102 static int yellowfin_rx(struct net_device *dev)
1104 struct yellowfin_private *yp = dev->priv;
1105 int entry = yp->cur_rx % RX_RING_SIZE;
1106 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1108 if (yellowfin_debug > 4) {
1109 printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
1110 entry, yp->rx_ring[entry].result_status);
1111 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
1112 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1113 yp->rx_ring[entry].result_status);
1116 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1118 struct yellowfin_desc *desc = &yp->rx_ring[entry];
1119 struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1125 if(!desc->result_status)
1127 pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr,
1128 yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1129 desc_status = le32_to_cpu(desc->result_status) >> 16;
1130 buf_addr = rx_skb->tail;
1131 data_size = (le32_to_cpu(desc->dbdma_cmd) -
1132 le32_to_cpu(desc->result_status)) & 0xffff;
1133 frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
1134 if (yellowfin_debug > 4)
1135 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
1139 if ( ! (desc_status & RX_EOP)) {
1141 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
1142 " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size);
1143 yp->stats.rx_length_errors++;
1144 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1145 /* There was a error. */
1146 if (yellowfin_debug > 3)
1147 printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
1149 yp->stats.rx_errors++;
1150 if (frame_status & 0x0060) yp->stats.rx_length_errors++;
1151 if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
1152 if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
1153 if (frame_status < 0) yp->stats.rx_dropped++;
1154 } else if ( !(yp->drv_flags & IsGigabit) &&
1155 ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1156 u8 status1 = buf_addr[data_size-2];
1157 u8 status2 = buf_addr[data_size-1];
1158 yp->stats.rx_errors++;
1159 if (status1 & 0xC0) yp->stats.rx_length_errors++;
1160 if (status2 & 0x03) yp->stats.rx_frame_errors++;
1161 if (status2 & 0x04) yp->stats.rx_crc_errors++;
1162 if (status2 & 0x80) yp->stats.rx_dropped++;
1163 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1164 } else if ((yp->flags & HasMACAddrBug) &&
1165 memcmp(le32_to_cpu(yp->rx_ring_dma +
1166 entry*sizeof(struct yellowfin_desc)),
1167 dev->dev_addr, 6) != 0 &&
1168 memcmp(le32_to_cpu(yp->rx_ring_dma +
1169 entry*sizeof(struct yellowfin_desc)),
1170 "\377\377\377\377\377\377", 6) != 0) {
1171 if (bogus_rx++ == 0)
1172 printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:"
1174 dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
1175 buf_addr[3], buf_addr[4], buf_addr[5]);
1178 struct sk_buff *skb;
1179 int pkt_len = data_size -
1180 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1181 /* To verify: Yellowfin Length should omit the CRC! */
1183 #ifndef final_version
1184 if (yellowfin_debug > 4)
1185 printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
1186 " of %d, bogus_cnt %d.\n",
1187 pkt_len, data_size, boguscnt);
1189 /* Check if the packet is long enough to just pass up the skbuff
1190 without copying to a properly sized skbuff. */
1191 if (pkt_len > rx_copybreak) {
1192 skb_put(skb = rx_skb, pkt_len);
1193 pci_unmap_single(yp->pci_dev,
1194 yp->rx_ring[entry].addr,
1196 PCI_DMA_FROMDEVICE);
1197 yp->rx_skbuff[entry] = NULL;
1199 skb = dev_alloc_skb(pkt_len + 2);
1203 skb_reserve(skb, 2); /* 16 byte align the IP header */
1205 eth_copy_and_sum(skb, rx_skb->tail, pkt_len, 0);
1206 skb_put(skb, pkt_len);
1208 memcpy(skb_put(skb, pkt_len),
1209 rx_skb->tail, pkt_len);
1211 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
1213 PCI_DMA_FROMDEVICE);
1215 skb->protocol = eth_type_trans(skb, dev);
1217 dev->last_rx = jiffies;
1218 yp->stats.rx_packets++;
1219 yp->stats.rx_bytes += pkt_len;
1221 entry = (++yp->cur_rx) % RX_RING_SIZE;
1224 /* Refill the Rx ring buffers. */
1225 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1226 entry = yp->dirty_rx % RX_RING_SIZE;
1227 if (yp->rx_skbuff[entry] == NULL) {
1228 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1230 break; /* Better luck next round. */
1231 yp->rx_skbuff[entry] = skb;
1232 skb->dev = dev; /* Mark as being used by this device. */
1233 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1234 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1235 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1237 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1238 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1240 yp->rx_ring[entry - 1].dbdma_cmd =
1241 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1243 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1244 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1251 static void yellowfin_error(struct net_device *dev, int intr_status)
1253 struct yellowfin_private *yp = dev->priv;
1255 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1256 dev->name, intr_status);
1257 /* Hmmmmm, it's not clear what to do here. */
1258 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1259 yp->stats.tx_errors++;
1260 if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1261 yp->stats.rx_errors++;
1264 static int yellowfin_close(struct net_device *dev)
1266 long ioaddr = dev->base_addr;
1267 struct yellowfin_private *yp = dev->priv;
1270 netif_stop_queue (dev);
1272 if (yellowfin_debug > 1) {
1273 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
1274 "Rx %4.4x Int %2.2x.\n",
1275 dev->name, inw(ioaddr + TxStatus),
1276 inw(ioaddr + RxStatus), inw(ioaddr + IntrStatus));
1277 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1278 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1281 /* Disable interrupts by clearing the interrupt mask. */
1282 outw(0x0000, ioaddr + IntrEnb);
1284 /* Stop the chip's Tx and Rx processes. */
1285 outl(0x80000000, ioaddr + RxCtrl);
1286 outl(0x80000000, ioaddr + TxCtrl);
1288 del_timer(&yp->timer);
1290 #if defined(__i386__)
1291 if (yellowfin_debug > 2) {
1292 printk("\n"KERN_DEBUG" Tx ring at %8.8llx:\n",
1293 (unsigned long long)yp->tx_ring_dma);
1294 for (i = 0; i < TX_RING_SIZE*2; i++)
1295 printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
1296 inl(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1297 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1298 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1299 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1300 for (i = 0; i < TX_RING_SIZE; i++)
1301 printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1302 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1303 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1305 printk("\n"KERN_DEBUG " Rx ring %8.8llx:\n",
1306 (unsigned long long)yp->rx_ring_dma);
1307 for (i = 0; i < RX_RING_SIZE; i++) {
1308 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
1309 inl(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1310 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1311 yp->rx_ring[i].result_status);
1312 if (yellowfin_debug > 6) {
1313 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1315 for (j = 0; j < 0x50; j++)
1317 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1323 #endif /* __i386__ debugging only */
1325 free_irq(dev->irq, dev);
1327 /* Free all the skbuffs in the Rx queue. */
1328 for (i = 0; i < RX_RING_SIZE; i++) {
1329 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1330 yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1331 if (yp->rx_skbuff[i]) {
1332 dev_kfree_skb(yp->rx_skbuff[i]);
1334 yp->rx_skbuff[i] = 0;
1336 for (i = 0; i < TX_RING_SIZE; i++) {
1337 if (yp->tx_skbuff[i])
1338 dev_kfree_skb(yp->tx_skbuff[i]);
1339 yp->tx_skbuff[i] = 0;
1342 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1343 if (yellowfin_debug > 0) {
1344 printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
1345 dev->name, bogus_rx);
1352 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev)
1354 struct yellowfin_private *yp = dev->priv;
1358 /* Set or clear the multicast filter for this adaptor. */
1360 static void set_rx_mode(struct net_device *dev)
1362 struct yellowfin_private *yp = dev->priv;
1363 long ioaddr = dev->base_addr;
1364 u16 cfg_value = inw(ioaddr + Cnfg);
1366 /* Stop the Rx process to change any value. */
1367 outw(cfg_value & ~0x1000, ioaddr + Cnfg);
1368 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1369 /* Unconditionally log net taps. */
1370 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1371 outw(0x000F, ioaddr + AddrMode);
1372 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
1373 /* Too many to filter well, or accept all multicasts. */
1374 outw(0x000B, ioaddr + AddrMode);
1375 } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
1376 struct dev_mc_list *mclist;
1379 memset(hash_table, 0, sizeof(hash_table));
1380 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1381 i++, mclist = mclist->next) {
1384 /* Due to a bug in the early chip versions, multiple filter
1385 slots must be set for each address. */
1386 if (yp->drv_flags & HasMulticastBug) {
1387 bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f;
1388 hash_table[bit >> 4] |= (1 << bit);
1389 bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f;
1390 hash_table[bit >> 4] |= (1 << bit);
1391 bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f;
1392 hash_table[bit >> 4] |= (1 << bit);
1394 bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f;
1395 hash_table[bit >> 4] |= (1 << bit);
1397 /* Copy the hash table to the chip. */
1398 for (i = 0; i < 4; i++)
1399 outw(hash_table[i], ioaddr + HashTbl + i*2);
1400 outw(0x0003, ioaddr + AddrMode);
1401 } else { /* Normal, unicast/broadcast-only mode. */
1402 outw(0x0001, ioaddr + AddrMode);
1404 /* Restart the Rx process. */
1405 outw(cfg_value | 0x1000, ioaddr + Cnfg);
1408 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1410 struct yellowfin_private *np = dev->priv;
1413 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
1417 case ETHTOOL_GDRVINFO: {
1418 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1419 strcpy(info.driver, DRV_NAME);
1420 strcpy(info.version, DRV_VERSION);
1421 strcpy(info.bus_info, pci_name(np->pci_dev));
1422 if (copy_to_user(useraddr, &info, sizeof(info)))
1432 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1434 struct yellowfin_private *np = dev->priv;
1435 long ioaddr = dev->base_addr;
1436 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
1440 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1441 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1442 data->phy_id = np->phys[0] & 0x1f;
1445 case SIOCGMIIREG: /* Read MII PHY register. */
1446 data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1449 case SIOCSMIIREG: /* Write MII PHY register. */
1450 if (!capable(CAP_NET_ADMIN))
1452 if (data->phy_id == np->phys[0]) {
1453 u16 value = data->val_in;
1454 switch (data->reg_num) {
1456 /* Check for autonegotiation on or reset. */
1457 np->medialock = (value & 0x9000) ? 0 : 1;
1459 np->full_duplex = (value & 0x0100) ? 1 : 0;
1461 case 4: np->advertising = value; break;
1463 /* Perhaps check_duplex(dev), depending on chip semantics. */
1465 mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1473 static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1475 struct net_device *dev = pci_get_drvdata(pdev);
1476 struct yellowfin_private *np;
1482 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1484 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1485 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1486 unregister_netdev (dev);
1488 pci_release_regions (pdev);
1491 iounmap ((void *) dev->base_addr);
1495 pci_set_drvdata(pdev, NULL);
1499 static struct pci_driver yellowfin_driver = {
1501 .id_table = yellowfin_pci_tbl,
1502 .probe = yellowfin_init_one,
1503 .remove = __devexit_p(yellowfin_remove_one),
1507 static int __init yellowfin_init (void)
1509 /* when a module, this is printed whether or not devices are found in probe */
1513 return pci_module_init (&yellowfin_driver);
1517 static void __exit yellowfin_cleanup (void)
1519 pci_unregister_driver (&yellowfin_driver);
1523 module_init(yellowfin_init);
1524 module_exit(yellowfin_cleanup);
1528 * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c yellowfin.c"
1529 * compile-command-alphaLX: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c yellowfin.c -fomit-frame-pointer -fno-strength-reduce -mno-fp-regs -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED"
1530 * simple-compile-command: "gcc -DMODULE -O6 -c yellowfin.c"