1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
30 Linux kernel version history:
33 - Jeff Garzik: softnet 'n stuff
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
63 - Manfred Spraul: added reset into tx_timeout
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
85 - Replace some MII-related magic numbers with constants
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
105 - Use new MII lib helper generic_mii_ioctl
107 LK1.1.16 (Roger Luethi)
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
130 #define DRV_NAME "via-rhine"
131 #define DRV_VERSION "1.1.20-2.6"
132 #define DRV_RELDATE "May-23-2004"
135 /* A few user-configurable values.
136 These may be modified when a driver module is loaded. */
138 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
139 static int max_interrupt_work = 20;
141 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
142 Setting to > 1518 effectively disables this feature. */
143 static int rx_copybreak;
145 /* Select a backoff algorithm (Ethernet capture effect) */
148 /* Used to pass the media type, etc.
149 Both 'options[]' and 'full_duplex[]' should exist for driver
151 The media type is usually passed in 'options[]'.
152 The default is autonegotiation for speed and duplex.
153 This should rarely be overridden.
154 Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
155 Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
156 Use option values 0x20 and 0x200 for forcing full duplex operation.
158 #define MAX_UNITS 8 /* More are supported, limit only on options */
159 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
160 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
162 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
163 The Rhine has a 64 element 8390-like hash table. */
164 static const int multicast_filter_limit = 32;
167 /* Operational parameters that are set at compile time. */
169 /* Keep the ring sizes a power of two for compile efficiency.
170 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
171 Making the Tx ring too large decreases the effectiveness of channel
172 bonding and packet priority.
173 There are no ill effects from too-large receive rings. */
174 #define TX_RING_SIZE 16
175 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
176 #define RX_RING_SIZE 16
179 /* Operational parameters that usually are not changed. */
181 /* Time in jiffies before concluding the transmitter is hung. */
182 #define TX_TIMEOUT (2*HZ)
184 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
186 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
187 #warning You must compile this file with the correct options!
188 #warning See the last lines of the source file.
189 #error You must compile this driver with "-O".
192 #include <linux/module.h>
193 #include <linux/kernel.h>
194 #include <linux/string.h>
195 #include <linux/timer.h>
196 #include <linux/errno.h>
197 #include <linux/ioport.h>
198 #include <linux/slab.h>
199 #include <linux/interrupt.h>
200 #include <linux/pci.h>
201 #include <linux/netdevice.h>
202 #include <linux/etherdevice.h>
203 #include <linux/skbuff.h>
204 #include <linux/init.h>
205 #include <linux/delay.h>
206 #include <linux/mii.h>
207 #include <linux/ethtool.h>
208 #include <linux/crc32.h>
209 #include <asm/processor.h> /* Processor type for cache alignment. */
210 #include <asm/bitops.h>
213 #include <asm/uaccess.h>
215 /* These identify the driver base version and may not be removed. */
216 static char version[] __devinitdata =
217 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
219 static char shortname[] = DRV_NAME;
222 /* This driver was written to use PCI memory space. Some early versions
223 of the Rhine may only work correctly with I/O space accesses. */
224 #ifdef CONFIG_VIA_RHINE_MMIO
241 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
242 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
243 MODULE_LICENSE("GPL");
245 MODULE_PARM(max_interrupt_work, "i");
246 MODULE_PARM(debug, "i");
247 MODULE_PARM(rx_copybreak, "i");
248 MODULE_PARM(backoff, "i");
249 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
250 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
251 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
252 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
253 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
254 MODULE_PARM_DESC(backoff, "VIA Rhine: Bits 0-3: backoff algorithm");
255 MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex");
256 MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)");
261 I. Board Compatibility
263 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
266 II. Board-specific settings
268 Boards with this chip are functional only in a bus-master PCI slot.
270 Many operational settings are loaded from the EEPROM to the Config word at
271 offset 0x78. For most of these settings, this driver assumes that they are
273 If this driver is compiled to use PCI memory space operations the EEPROM
274 must be configured to enable memory ops.
276 III. Driver operation
280 This driver uses two statically allocated fixed-size descriptor lists
281 formed into rings by a branch from the final descriptor to the beginning of
282 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
284 IIIb/c. Transmit/Receive Structure
286 This driver attempts to use a zero-copy receive and transmit scheme.
288 Alas, all data buffers are required to start on a 32 bit boundary, so
289 the driver must often copy transmit packets into bounce buffers.
291 The driver allocates full frame size skbuffs for the Rx ring buffers at
292 open() time and passes the skb->data field to the chip as receive data
293 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
294 a fresh skbuff is allocated and the frame is copied to the new skbuff.
295 When the incoming frame is larger, the skbuff is passed directly up the
296 protocol stack. Buffers consumed this way are replaced by newly allocated
297 skbuffs in the last phase of rhine_rx().
299 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
300 using a full-sized skbuff for small frames vs. the copying costs of larger
301 frames. New boards are typically used in generously configured machines
302 and the underfilled buffers have negligible impact compared to the benefit of
303 a single allocation size, so the default value of zero results in never
304 copying packets. When copying is done, the cost is usually mitigated by using
305 a combined copy/checksum routine. Copying also preloads the cache, which is
306 most useful with small frames.
308 Since the VIA chips are only able to transfer data to buffers on 32 bit
309 boundaries, the IP header at offset 14 in an ethernet frame isn't
310 longword aligned for further processing. Copying these unaligned buffers
311 has the beneficial effect of 16-byte aligning the IP header.
313 IIId. Synchronization
315 The driver runs as two independent, single-threaded flows of control. One
316 is the send-packet routine, which enforces single-threaded use by the
317 dev->priv->lock spinlock. The other thread is the interrupt handler, which
318 is single threaded by the hardware and interrupt handling software.
320 The send packet thread has partial control over the Tx ring. It locks the
321 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
322 is not available it stops the transmit queue by calling netif_stop_queue.
324 The interrupt handler has exclusive control over the Rx ring and records stats
325 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
326 empty by incrementing the dirty_tx mark. If at least half of the entries in
327 the Rx ring are available the transmit queue is woken up if it was stopped.
333 Preliminary VT86C100A manual from http://www.via.com.tw/
334 http://www.scyld.com/expert/100mbps.html
335 http://www.scyld.com/expert/NWay.html
336 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
337 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
342 The VT86C100A manual is not reliable information.
343 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
344 in significant performance degradation for bounce buffer copies on transmit
345 and unaligned IP headers on receive.
346 The chip does not pad to minimum transmit length.
351 /* This table drives the PCI probe routines. It's mostly boilerplate in all
352 of the drivers, and will likely be provided by some future kernel.
353 Note the matching code -- the first table entry matchs all 56** cards but
354 second only the 1234 card.
358 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
359 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
369 struct rhine_chip_info {
377 enum chip_capability_flags {
378 CanHaveMII=1, HasESIPhy=2, HasDavicomPhy=4,
379 ReqTxAlign=0x10, HasWOL=0x20,
383 #define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
385 #define RHINE_IOTYPE (PCI_USES_IO | PCI_USES_MASTER | PCI_ADDR0)
387 /* Beware of PCI posted writes */
388 #define IOSYNC do { readb(dev->base_addr + StationAddr); } while (0)
390 /* directly indexed by enum rhine_chips, above */
391 static struct rhine_chip_info rhine_chip_info[] __devinitdata =
393 { "VIA VT86C100A Rhine", RHINE_IOTYPE, 128,
394 CanHaveMII | ReqTxAlign | HasDavicomPhy },
395 { "VIA VT6102 Rhine-II", RHINE_IOTYPE, 256,
396 CanHaveMII | HasWOL },
397 { "VIA VT6105 Rhine-III", RHINE_IOTYPE, 256,
398 CanHaveMII | HasWOL },
399 { "VIA VT6105M Rhine-III", RHINE_IOTYPE, 256,
400 CanHaveMII | HasWOL },
403 static struct pci_device_id rhine_pci_tbl[] =
405 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT86C100A},
406 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6102},
407 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105}, /* 6105{,L,LOM} */
408 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105M},
409 {0,} /* terminate list */
411 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
414 /* Offsets to the device registers. */
415 enum register_offsets {
416 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
417 IntrStatus=0x0C, IntrEnable=0x0E,
418 MulticastFilter0=0x10, MulticastFilter1=0x14,
419 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
420 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
421 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
422 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
423 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
424 StickyHW=0x83, IntrStatus2=0x84, WOLcrClr=0xA4, WOLcgClr=0xA7,
428 /* Bits in ConfigD */
430 BackOptional=0x01, BackModify=0x02,
431 BackCaptureEffect=0x04, BackRandom=0x08
435 /* Registers we check that mmio and reg are the same. */
436 int mmio_verify_registers[] = {
437 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
442 /* Bits in the interrupt status/mask registers. */
443 enum intr_status_bits {
444 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
445 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
447 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
448 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
449 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
451 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
452 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
453 IntrTxErrSummary=0x082218,
456 /* The Rx and Tx buffer descriptors. */
459 u32 desc_length; /* Chain flag, Buffer/frame length */
465 u32 desc_length; /* Chain flag, Tx Config, Frame length */
470 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
471 #define TXDESC 0x00e08000
473 enum rx_status_bits {
474 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
477 /* Bits in *_desc.*_status */
478 enum desc_status_bits {
482 /* Bits in ChipCmd. */
484 CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
485 CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
486 CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
487 CmdNoTxPoll=0x0800, CmdReset=0x8000,
490 #define MAX_MII_CNT 4
491 struct rhine_private {
492 /* Descriptor rings */
493 struct rx_desc *rx_ring;
494 struct tx_desc *tx_ring;
495 dma_addr_t rx_ring_dma;
496 dma_addr_t tx_ring_dma;
498 /* The addresses of receive-in-place skbuffs. */
499 struct sk_buff *rx_skbuff[RX_RING_SIZE];
500 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
502 /* The saved address of a sent-in-place packet/buffer, for later free(). */
503 struct sk_buff *tx_skbuff[TX_RING_SIZE];
504 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
506 /* Tx bounce buffers */
507 unsigned char *tx_buf[TX_RING_SIZE];
508 unsigned char *tx_bufs;
509 dma_addr_t tx_bufs_dma;
511 struct pci_dev *pdev;
512 struct net_device_stats stats;
513 struct timer_list timer; /* Media monitoring timer. */
516 /* Frequently used values: keep some adjacent for cache effect. */
517 int chip_id, drv_flags;
518 struct rx_desc *rx_head_desc;
519 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
520 unsigned int cur_tx, dirty_tx;
521 unsigned int rx_buf_sz; /* Based on MTU+slack. */
522 u16 chip_cmd; /* Current setting for ChipCmd */
524 /* These values are keep track of the transceiver/media in use. */
525 unsigned int default_port:4; /* Last dev->if_port value. */
526 u8 tx_thresh, rx_thresh;
528 /* MII transceiver section. */
529 unsigned char phys[MAX_MII_CNT]; /* MII device addresses. */
530 unsigned int mii_cnt; /* number of MIIs found, but only the first one is used */
531 u16 mii_status; /* last read MII status */
532 struct mii_if_info mii_if;
535 static int mdio_read(struct net_device *dev, int phy_id, int location);
536 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
537 static int rhine_open(struct net_device *dev);
538 static void rhine_check_duplex(struct net_device *dev);
539 static void rhine_timer(unsigned long data);
540 static void rhine_tx_timeout(struct net_device *dev);
541 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
542 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
543 static void rhine_tx(struct net_device *dev);
544 static void rhine_rx(struct net_device *dev);
545 static void rhine_error(struct net_device *dev, int intr_status);
546 static void rhine_set_rx_mode(struct net_device *dev);
547 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
548 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
549 static struct ethtool_ops netdev_ethtool_ops;
550 static int rhine_close(struct net_device *dev);
552 static inline u32 get_intr_status(struct net_device *dev)
554 long ioaddr = dev->base_addr;
555 struct rhine_private *rp = netdev_priv(dev);
558 intr_status = readw(ioaddr + IntrStatus);
559 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
560 if (rp->chip_id == VT6102)
561 intr_status |= readb(ioaddr + IntrStatus2) << 16;
565 static void wait_for_reset(struct net_device *dev, int chip_id, char *name)
567 long ioaddr = dev->base_addr;
572 if (readw(ioaddr + ChipCmd) & CmdReset) {
573 printk(KERN_INFO "%s: Reset not complete yet. "
574 "Trying harder.\n", name);
576 /* Rhine-II needs to be forced sometimes */
577 if (chip_id == VT6102)
578 writeb(0x40, ioaddr + MiscCmd);
580 /* VT86C100A may need long delay after reset (dlink) */
581 /* Seen on Rhine-II as well (rl) */
582 while ((readw(ioaddr + ChipCmd) & CmdReset) && --boguscnt)
588 printk(KERN_INFO "%s: Reset %s.\n", name,
589 boguscnt ? "succeeded" : "failed");
593 static void __devinit enable_mmio(long ioaddr, int chip_id)
596 if (chip_id == VT86C100A) {
597 /* More recent docs say that this bit is reserved ... */
598 n = inb(ioaddr + ConfigA) | 0x20;
599 outb(n, ioaddr + ConfigA);
601 n = inb(ioaddr + ConfigD) | 0x80;
602 outb(n, ioaddr + ConfigD);
607 static void __devinit reload_eeprom(long ioaddr)
610 outb(0x20, ioaddr + MACRegEEcsr);
611 /* Typically 2 cycles to reload. */
612 for (i = 0; i < 150; i++)
613 if (! (inb(ioaddr + MACRegEEcsr) & 0x20))
617 #ifdef CONFIG_NET_POLL_CONTROLLER
618 static void rhine_poll(struct net_device *dev)
620 disable_irq(dev->irq);
621 rhine_interrupt(dev->irq, (void *)dev, NULL);
622 enable_irq(dev->irq);
626 static int __devinit rhine_init_one(struct pci_dev *pdev,
627 const struct pci_device_id *ent)
629 struct net_device *dev;
630 struct rhine_private *rp;
632 int chip_id = (int) ent->driver_data;
633 static int card_idx = -1;
642 /* when built into the kernel, we only print version if device is found */
644 static int printed_version;
645 if (!printed_version++)
650 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
651 io_size = rhine_chip_info[chip_id].io_size;
652 pci_flags = rhine_chip_info[chip_id].pci_flags;
654 if (pci_enable_device(pdev))
657 /* this should always be supported */
658 if (pci_set_dma_mask(pdev, 0xffffffff)) {
659 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
665 if ((pci_resource_len(pdev, 0) < io_size) ||
666 (pci_resource_len(pdev, 1) < io_size)) {
667 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
671 ioaddr = pci_resource_start(pdev, 0);
672 memaddr = pci_resource_start(pdev, 1);
674 if (pci_flags & PCI_USES_MASTER)
675 pci_set_master(pdev);
677 dev = alloc_etherdev(sizeof(*rp));
679 printk(KERN_ERR "init_ethernet failed for card #%d\n",
683 SET_MODULE_OWNER(dev);
684 SET_NETDEV_DEV(dev, &pdev->dev);
686 if (pci_request_regions(pdev, shortname))
687 goto err_out_free_netdev;
691 enable_mmio(ioaddr0, chip_id);
693 ioaddr = (long) ioremap(memaddr, io_size);
695 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
696 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
697 goto err_out_free_res;
700 /* Check that selected MMIO registers match the PIO ones */
702 while (mmio_verify_registers[i]) {
703 int reg = mmio_verify_registers[i++];
704 unsigned char a = inb(ioaddr0+reg);
705 unsigned char b = readb(ioaddr+reg);
707 printk(KERN_ERR "MMIO do not match PIO [%02x] "
708 "(%02x != %02x)\n", reg, a, b);
712 #endif /* USE_MMIO */
714 /* D-Link provided reset code (with comment additions) */
715 if (rhine_chip_info[chip_id].drv_flags & HasWOL) {
716 unsigned char byOrgValue;
718 /* clear sticky bit before reset & read ethernet address */
719 byOrgValue = readb(ioaddr + StickyHW);
720 byOrgValue = byOrgValue & 0xFC;
721 writeb(byOrgValue, ioaddr + StickyHW);
723 /* (bits written are cleared?) */
724 /* disable force PME-enable */
725 writeb(0x80, ioaddr + WOLcgClr);
726 /* disable power-event config bit */
727 writeb(0xFF, ioaddr + WOLcrClr);
728 /* clear power status (undocumented in vt6102 docs?) */
729 writeb(0xFF, ioaddr + PwrcsrClr);
732 /* Reset the chip to erase previous misconfiguration. */
733 writew(CmdReset, ioaddr + ChipCmd);
735 dev->base_addr = ioaddr;
736 wait_for_reset(dev, chip_id, shortname);
738 /* Reload the station address from the EEPROM. */
740 reload_eeprom(ioaddr0);
741 /* Reloading from eeprom overwrites cfgA-D, so we must re-enable MMIO.
742 If reload_eeprom() was done first this could be avoided, but it is
743 not known if that still works with the "win98-reboot" problem. */
744 enable_mmio(ioaddr0, chip_id);
746 reload_eeprom(ioaddr);
749 for (i = 0; i < 6; i++)
750 dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
752 if (!is_valid_ether_addr(dev->dev_addr)) {
753 printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx);
757 if (chip_id == VT6102) {
759 * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA
760 * turned on. it makes MAC receive magic packet
761 * automatically. So, we turn it off. (D-Link)
763 writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
766 /* Select backoff algorithm */
768 writeb(readb(ioaddr + ConfigD) & (0xF0 | backoff),
771 dev->irq = pdev->irq;
773 rp = netdev_priv(dev);
774 spin_lock_init(&rp->lock);
775 rp->chip_id = chip_id;
776 rp->drv_flags = rhine_chip_info[chip_id].drv_flags;
778 rp->mii_if.dev = dev;
779 rp->mii_if.mdio_read = mdio_read;
780 rp->mii_if.mdio_write = mdio_write;
781 rp->mii_if.phy_id_mask = 0x1f;
782 rp->mii_if.reg_num_mask = 0x1f;
785 option = dev->mem_start;
787 /* The chip-specific entries in the device structure. */
788 dev->open = rhine_open;
789 dev->hard_start_xmit = rhine_start_tx;
790 dev->stop = rhine_close;
791 dev->get_stats = rhine_get_stats;
792 dev->set_multicast_list = rhine_set_rx_mode;
793 dev->do_ioctl = netdev_ioctl;
794 dev->ethtool_ops = &netdev_ethtool_ops;
795 dev->tx_timeout = rhine_tx_timeout;
796 dev->watchdog_timeo = TX_TIMEOUT;
797 #ifdef CONFIG_NET_POLL_CONTROLLER
798 dev->poll_controller = rhine_poll;
800 if (rp->drv_flags & ReqTxAlign)
801 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
803 /* dev->name not defined before register_netdev()! */
804 i = register_netdev(dev);
808 /* The lower four bits are the media type. */
811 rp->mii_if.full_duplex = 1;
812 rp->default_port = option & 15;
814 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
815 rp->mii_if.full_duplex = 1;
817 if (rp->mii_if.full_duplex) {
818 printk(KERN_INFO "%s: Set to forced full duplex, "
819 "autonegotiation disabled.\n", dev->name);
820 rp->mii_if.force_media = 1;
823 printk(KERN_INFO "%s: %s at 0x%lx, ",
824 dev->name, rhine_chip_info[chip_id].name,
825 (pci_flags & PCI_USES_IO) ? ioaddr : memaddr);
827 for (i = 0; i < 5; i++)
828 printk("%2.2x:", dev->dev_addr[i]);
829 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
831 pci_set_drvdata(pdev, dev);
833 if (rp->drv_flags & CanHaveMII) {
834 int phy, phy_idx = 0;
835 rp->phys[0] = 1; /* Standard for this chip. */
836 for (phy = 1; phy < 32 && phy_idx < MAX_MII_CNT; phy++) {
837 int mii_status = mdio_read(dev, phy, 1);
838 if (mii_status != 0xffff && mii_status != 0x0000) {
839 rp->phys[phy_idx++] = phy;
840 rp->mii_if.advertising = mdio_read(dev, phy, 4);
841 printk(KERN_INFO "%s: MII PHY found at address "
842 "%d, status 0x%4.4x advertising %4.4x "
843 "Link %4.4x.\n", dev->name, phy,
844 mii_status, rp->mii_if.advertising,
845 mdio_read(dev, phy, 5));
847 /* set IFF_RUNNING */
848 if (mii_status & BMSR_LSTATUS)
849 netif_carrier_on(dev);
851 netif_carrier_off(dev);
856 rp->mii_cnt = phy_idx;
857 rp->mii_if.phy_id = rp->phys[0];
860 /* Allow forcing the media type. */
863 rp->mii_if.full_duplex = 1;
864 rp->default_port = option & 0x3ff;
865 if (option & 0x330) {
866 /* FIXME: shouldn't someone check this variable? */
867 /* rp->medialock = 1; */
868 printk(KERN_INFO " Forcing %dMbs %s-duplex "
870 (option & 0x300 ? 100 : 10),
871 (option & 0x220 ? "full" : "half"));
873 mdio_write(dev, rp->phys[0], MII_BMCR,
874 ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
875 ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
883 iounmap((void *)ioaddr);
886 pci_release_regions(pdev);
893 static int alloc_ring(struct net_device* dev)
895 struct rhine_private *rp = netdev_priv(dev);
899 ring = pci_alloc_consistent(rp->pdev,
900 RX_RING_SIZE * sizeof(struct rx_desc) +
901 TX_RING_SIZE * sizeof(struct tx_desc),
904 printk(KERN_ERR "Could not allocate DMA memory.\n");
907 if (rp->drv_flags & ReqTxAlign) {
908 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
909 PKT_BUF_SZ * TX_RING_SIZE,
911 if (rp->tx_bufs == NULL) {
912 pci_free_consistent(rp->pdev,
913 RX_RING_SIZE * sizeof(struct rx_desc) +
914 TX_RING_SIZE * sizeof(struct tx_desc),
921 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
922 rp->rx_ring_dma = ring_dma;
923 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
928 void free_ring(struct net_device* dev)
930 struct rhine_private *rp = netdev_priv(dev);
932 pci_free_consistent(rp->pdev,
933 RX_RING_SIZE * sizeof(struct rx_desc) +
934 TX_RING_SIZE * sizeof(struct tx_desc),
935 rp->rx_ring, rp->rx_ring_dma);
939 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
940 rp->tx_bufs, rp->tx_bufs_dma);
946 static void alloc_rbufs(struct net_device *dev)
948 struct rhine_private *rp = netdev_priv(dev);
952 rp->dirty_rx = rp->cur_rx = 0;
954 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
955 rp->rx_head_desc = &rp->rx_ring[0];
956 next = rp->rx_ring_dma;
958 /* Init the ring entries */
959 for (i = 0; i < RX_RING_SIZE; i++) {
960 rp->rx_ring[i].rx_status = 0;
961 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
962 next += sizeof(struct rx_desc);
963 rp->rx_ring[i].next_desc = cpu_to_le32(next);
964 rp->rx_skbuff[i] = 0;
966 /* Mark the last entry as wrapping the ring. */
967 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
969 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
970 for (i = 0; i < RX_RING_SIZE; i++) {
971 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
972 rp->rx_skbuff[i] = skb;
975 skb->dev = dev; /* Mark as being used by this device. */
977 rp->rx_skbuff_dma[i] =
978 pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz,
981 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
982 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
984 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
987 static void free_rbufs(struct net_device* dev)
989 struct rhine_private *rp = netdev_priv(dev);
992 /* Free all the skbuffs in the Rx queue. */
993 for (i = 0; i < RX_RING_SIZE; i++) {
994 rp->rx_ring[i].rx_status = 0;
995 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
996 if (rp->rx_skbuff[i]) {
997 pci_unmap_single(rp->pdev,
998 rp->rx_skbuff_dma[i],
999 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1000 dev_kfree_skb(rp->rx_skbuff[i]);
1002 rp->rx_skbuff[i] = 0;
1006 static void alloc_tbufs(struct net_device* dev)
1008 struct rhine_private *rp = netdev_priv(dev);
1012 rp->dirty_tx = rp->cur_tx = 0;
1013 next = rp->tx_ring_dma;
1014 for (i = 0; i < TX_RING_SIZE; i++) {
1015 rp->tx_skbuff[i] = 0;
1016 rp->tx_ring[i].tx_status = 0;
1017 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1018 next += sizeof(struct tx_desc);
1019 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1020 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1022 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1026 static void free_tbufs(struct net_device* dev)
1028 struct rhine_private *rp = netdev_priv(dev);
1031 for (i = 0; i < TX_RING_SIZE; i++) {
1032 rp->tx_ring[i].tx_status = 0;
1033 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1034 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1035 if (rp->tx_skbuff[i]) {
1036 if (rp->tx_skbuff_dma[i]) {
1037 pci_unmap_single(rp->pdev,
1038 rp->tx_skbuff_dma[i],
1039 rp->tx_skbuff[i]->len,
1042 dev_kfree_skb(rp->tx_skbuff[i]);
1044 rp->tx_skbuff[i] = 0;
1049 static void init_registers(struct net_device *dev)
1051 struct rhine_private *rp = netdev_priv(dev);
1052 long ioaddr = dev->base_addr;
1055 for (i = 0; i < 6; i++)
1056 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
1058 /* Initialize other registers. */
1059 writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1060 /* Configure initial FIFO thresholds. */
1061 writeb(0x20, ioaddr + TxConfig);
1062 rp->tx_thresh = 0x20;
1063 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1064 rp->mii_if.full_duplex = 0;
1066 if (dev->if_port == 0)
1067 dev->if_port = rp->default_port;
1069 writel(rp->rx_ring_dma, ioaddr + RxRingPtr);
1070 writel(rp->tx_ring_dma, ioaddr + TxRingPtr);
1072 rhine_set_rx_mode(dev);
1074 /* Enable interrupts by setting the interrupt mask. */
1075 writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1076 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1077 IntrTxDone | IntrTxError | IntrTxUnderrun |
1078 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1079 ioaddr + IntrEnable);
1081 rp->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
1082 if (rp->mii_if.force_media)
1083 rp->chip_cmd |= CmdFDuplex;
1084 writew(rp->chip_cmd, ioaddr + ChipCmd);
1086 rhine_check_duplex(dev);
1088 /* The LED outputs of various MII xcvrs should be configured. */
1089 /* For NS or Mison phys, turn on bit 1 in register 0x17 */
1090 /* For ESI phys, turn on bit 7 in register 0x17. */
1091 mdio_write(dev, rp->phys[0], 0x17, mdio_read(dev, rp->phys[0], 0x17) |
1092 (rp->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
1095 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1097 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1099 long ioaddr = dev->base_addr;
1100 int boguscnt = 1024;
1102 /* Wait for a previous command to complete. */
1103 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1105 writeb(0x00, ioaddr + MIICmd);
1106 writeb(phy_id, ioaddr + MIIPhyAddr);
1107 writeb(regnum, ioaddr + MIIRegAddr);
1108 writeb(0x40, ioaddr + MIICmd); /* Trigger read */
1110 while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
1112 return readw(ioaddr + MIIData);
1115 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1117 struct rhine_private *rp = netdev_priv(dev);
1118 long ioaddr = dev->base_addr;
1119 int boguscnt = 1024;
1121 if (phy_id == rp->phys[0]) {
1123 case MII_BMCR: /* Is user forcing speed/duplex? */
1124 if (value & 0x9000) /* Autonegotiation. */
1125 rp->mii_if.force_media = 0;
1127 rp->mii_if.full_duplex = (value & 0x0100) ? 1 : 0;
1130 rp->mii_if.advertising = value;
1135 /* Wait for a previous command to complete. */
1136 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1138 writeb(0x00, ioaddr + MIICmd);
1139 writeb(phy_id, ioaddr + MIIPhyAddr);
1140 writeb(regnum, ioaddr + MIIRegAddr);
1141 writew(value, ioaddr + MIIData);
1142 writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
1146 static int rhine_open(struct net_device *dev)
1148 struct rhine_private *rp = netdev_priv(dev);
1149 long ioaddr = dev->base_addr;
1152 /* Reset the chip. */
1153 writew(CmdReset, ioaddr + ChipCmd);
1155 i = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
1161 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1162 dev->name, rp->pdev->irq);
1164 i = alloc_ring(dev);
1169 wait_for_reset(dev, rp->chip_id, dev->name);
1170 init_registers(dev);
1172 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1173 "MII status: %4.4x.\n",
1174 dev->name, readw(ioaddr + ChipCmd),
1175 mdio_read(dev, rp->phys[0], MII_BMSR));
1177 netif_start_queue(dev);
1179 /* Set the timer to check for link beat. */
1180 init_timer(&rp->timer);
1181 rp->timer.expires = jiffies + 2 * HZ/100;
1182 rp->timer.data = (unsigned long)dev;
1183 rp->timer.function = &rhine_timer; /* timer handler */
1184 add_timer(&rp->timer);
1189 static void rhine_check_duplex(struct net_device *dev)
1191 struct rhine_private *rp = netdev_priv(dev);
1192 long ioaddr = dev->base_addr;
1193 int mii_lpa = mdio_read(dev, rp->phys[0], MII_LPA);
1194 int negotiated = mii_lpa & rp->mii_if.advertising;
1197 if (rp->mii_if.force_media || mii_lpa == 0xffff)
1199 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
1200 if (rp->mii_if.full_duplex != duplex) {
1201 rp->mii_if.full_duplex = duplex;
1203 printk(KERN_INFO "%s: Setting %s-duplex based on "
1204 "MII #%d link partner capability of %4.4x.\n",
1205 dev->name, duplex ? "full" : "half",
1206 rp->phys[0], mii_lpa);
1208 rp->chip_cmd |= CmdFDuplex;
1210 rp->chip_cmd &= ~CmdFDuplex;
1211 writew(rp->chip_cmd, ioaddr + ChipCmd);
1216 static void rhine_timer(unsigned long data)
1218 struct net_device *dev = (struct net_device *)data;
1219 struct rhine_private *rp = netdev_priv(dev);
1220 long ioaddr = dev->base_addr;
1221 int next_tick = 10*HZ;
1225 printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
1226 dev->name, readw(ioaddr + IntrStatus));
1229 spin_lock_irq (&rp->lock);
1231 rhine_check_duplex(dev);
1233 /* make IFF_RUNNING follow the MII status bit "Link established" */
1234 mii_status = mdio_read(dev, rp->phys[0], MII_BMSR);
1235 if ((mii_status & BMSR_LSTATUS) != (rp->mii_status & BMSR_LSTATUS)) {
1236 if (mii_status & BMSR_LSTATUS)
1237 netif_carrier_on(dev);
1239 netif_carrier_off(dev);
1241 rp->mii_status = mii_status;
1243 spin_unlock_irq(&rp->lock);
1245 rp->timer.expires = jiffies + next_tick;
1246 add_timer(&rp->timer);
1250 static void rhine_tx_timeout(struct net_device *dev)
1252 struct rhine_private *rp = netdev_priv(dev);
1253 long ioaddr = dev->base_addr;
1255 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1256 "%4.4x, resetting...\n",
1257 dev->name, readw(ioaddr + IntrStatus),
1258 mdio_read(dev, rp->phys[0], MII_BMSR));
1262 /* protect against concurrent rx interrupts */
1263 disable_irq(rp->pdev->irq);
1265 spin_lock(&rp->lock);
1267 /* Reset the chip. */
1268 writew(CmdReset, ioaddr + ChipCmd);
1270 /* clear all descriptors */
1276 /* Reinitialize the hardware. */
1277 wait_for_reset(dev, rp->chip_id, dev->name);
1278 init_registers(dev);
1280 spin_unlock(&rp->lock);
1281 enable_irq(rp->pdev->irq);
1283 dev->trans_start = jiffies;
1284 rp->stats.tx_errors++;
1285 netif_wake_queue(dev);
1288 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1290 struct rhine_private *rp = netdev_priv(dev);
1294 /* Caution: the write order is important here, set the field
1295 with the "ownership" bits last. */
1297 /* Calculate the next Tx descriptor entry. */
1298 entry = rp->cur_tx % TX_RING_SIZE;
1300 if (skb->len < ETH_ZLEN) {
1301 skb = skb_padto(skb, ETH_ZLEN);
1306 rp->tx_skbuff[entry] = skb;
1308 if ((rp->drv_flags & ReqTxAlign) &&
1309 (((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1310 /* Must use alignment buffer. */
1311 if (skb->len > PKT_BUF_SZ) {
1312 /* packet too long, drop it */
1314 rp->tx_skbuff[entry] = NULL;
1315 rp->stats.tx_dropped++;
1318 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1319 rp->tx_skbuff_dma[entry] = 0;
1320 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1321 (rp->tx_buf[entry] -
1324 rp->tx_skbuff_dma[entry] =
1325 pci_map_single(rp->pdev, skb->data, skb->len,
1327 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1330 rp->tx_ring[entry].desc_length =
1331 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1334 spin_lock_irq(&rp->lock);
1336 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1341 /* Non-x86 Todo: explicitly flush cache lines here. */
1344 * Wake the potentially-idle transmit channel unless errors are
1345 * pending (the ISR must sort them out first).
1347 intr_status = get_intr_status(dev);
1348 if ((intr_status & IntrTxErrSummary) == 0) {
1349 writew(CmdTxDemand | rp->chip_cmd, dev->base_addr + ChipCmd);
1353 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1354 netif_stop_queue(dev);
1356 dev->trans_start = jiffies;
1358 spin_unlock_irq(&rp->lock);
1361 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1362 dev->name, rp->cur_tx-1, entry);
1367 /* The interrupt handler does all of the Rx thread work and cleans up
1368 after the Tx thread. */
1369 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1371 struct net_device *dev = dev_instance;
1374 int boguscnt = max_interrupt_work;
1377 ioaddr = dev->base_addr;
1379 while ((intr_status = get_intr_status(dev))) {
1382 /* Acknowledge all of the current interrupt sources ASAP. */
1383 if (intr_status & IntrTxDescRace)
1384 writeb(0x08, ioaddr + IntrStatus2);
1385 writew(intr_status & 0xffff, ioaddr + IntrStatus);
1389 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1390 dev->name, intr_status);
1392 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1393 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1396 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1397 if (intr_status & IntrTxErrSummary) {
1399 /* Avoid scavenging before Tx engine turned off */
1400 while ((readw(ioaddr+ChipCmd) & CmdTxOn) && --cnt)
1402 if (debug > 2 && !cnt)
1403 printk(KERN_WARNING "%s: "
1404 "rhine_interrupt() Tx engine"
1405 "still on.\n", dev->name);
1410 /* Abnormal error summary/uncommon events handlers. */
1411 if (intr_status & (IntrPCIErr | IntrLinkChange |
1412 IntrStatsMax | IntrTxError | IntrTxAborted |
1413 IntrTxUnderrun | IntrTxDescRace))
1414 rhine_error(dev, intr_status);
1416 if (--boguscnt < 0) {
1417 printk(KERN_WARNING "%s: Too much work at interrupt, "
1419 dev->name, intr_status);
1425 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1426 dev->name, readw(ioaddr + IntrStatus));
1427 return IRQ_RETVAL(handled);
1430 /* This routine is logically part of the interrupt handler, but isolated
1432 static void rhine_tx(struct net_device *dev)
1434 struct rhine_private *rp = netdev_priv(dev);
1435 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1437 spin_lock(&rp->lock);
1439 /* find and cleanup dirty tx descriptors */
1440 while (rp->dirty_tx != rp->cur_tx) {
1441 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1443 printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
1445 if (txstatus & DescOwn)
1447 if (txstatus & 0x8000) {
1449 printk(KERN_DEBUG "%s: Transmit error, "
1450 "Tx status %8.8x.\n",
1451 dev->name, txstatus);
1452 rp->stats.tx_errors++;
1453 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1454 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1455 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1456 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1457 if (((rp->chip_id == VT86C100A) && txstatus & 0x0002) ||
1458 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1459 rp->stats.tx_fifo_errors++;
1460 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1461 break; /* Keep the skb - we try again */
1463 /* Transmitter restarted in 'abnormal' handler. */
1465 if (rp->chip_id == VT86C100A)
1466 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1468 rp->stats.collisions += txstatus & 0x0F;
1470 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1471 (txstatus >> 3) & 0xF,
1473 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1474 rp->stats.tx_packets++;
1476 /* Free the original skb. */
1477 if (rp->tx_skbuff_dma[entry]) {
1478 pci_unmap_single(rp->pdev,
1479 rp->tx_skbuff_dma[entry],
1480 rp->tx_skbuff[entry]->len,
1483 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1484 rp->tx_skbuff[entry] = NULL;
1485 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1487 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1488 netif_wake_queue(dev);
1490 spin_unlock(&rp->lock);
1493 /* This routine is logically part of the interrupt handler, but isolated
1494 for clarity and better register allocation. */
1495 static void rhine_rx(struct net_device *dev)
1497 struct rhine_private *rp = netdev_priv(dev);
1498 int entry = rp->cur_rx % RX_RING_SIZE;
1499 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1502 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1504 le32_to_cpu(rp->rx_head_desc->rx_status));
1507 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1508 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1509 struct rx_desc *desc = rp->rx_head_desc;
1510 u32 desc_status = le32_to_cpu(desc->rx_status);
1511 int data_size = desc_status >> 16;
1514 printk(KERN_DEBUG " rhine_rx() status is %8.8x.\n",
1518 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1519 if ((desc_status & RxWholePkt) != RxWholePkt) {
1520 printk(KERN_WARNING "%s: Oversized Ethernet "
1521 "frame spanned multiple buffers, entry "
1522 "%#x length %d status %8.8x!\n",
1523 dev->name, entry, data_size,
1525 printk(KERN_WARNING "%s: Oversized Ethernet "
1526 "frame %p vs %p.\n", dev->name,
1527 rp->rx_head_desc, &rp->rx_ring[entry]);
1528 rp->stats.rx_length_errors++;
1529 } else if (desc_status & RxErr) {
1530 /* There was a error. */
1532 printk(KERN_DEBUG " rhine_rx() Rx "
1533 "error was %8.8x.\n",
1535 rp->stats.rx_errors++;
1536 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1537 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1538 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1539 if (desc_status & 0x0002) {
1540 /* this can also be updated outside the interrupt handler */
1541 spin_lock(&rp->lock);
1542 rp->stats.rx_crc_errors++;
1543 spin_unlock(&rp->lock);
1547 struct sk_buff *skb;
1548 /* Length should omit the CRC */
1549 int pkt_len = data_size - 4;
1551 /* Check if the packet is long enough to accept without
1552 copying to a minimally-sized skbuff. */
1553 if (pkt_len < rx_copybreak &&
1554 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1556 skb_reserve(skb, 2); /* 16 byte align the IP header */
1557 pci_dma_sync_single_for_cpu(rp->pdev,
1558 rp->rx_skbuff_dma[entry],
1560 PCI_DMA_FROMDEVICE);
1562 /* *_IP_COPYSUM isn't defined anywhere and
1563 eth_copy_and_sum is memcpy for all archs so
1564 this is kind of pointless right now
1566 #if HAS_IP_COPYSUM /* Call copy + cksum if available. */
1567 eth_copy_and_sum(skb,
1568 rp->rx_skbuff[entry]->tail,
1570 skb_put(skb, pkt_len);
1572 memcpy(skb_put(skb, pkt_len),
1573 rp->rx_skbuff[entry]->tail, pkt_len);
1575 pci_dma_sync_single_for_device(rp->pdev,
1576 rp->rx_skbuff_dma[entry],
1578 PCI_DMA_FROMDEVICE);
1580 skb = rp->rx_skbuff[entry];
1582 printk(KERN_ERR "%s: Inconsistent Rx "
1583 "descriptor chain.\n",
1587 rp->rx_skbuff[entry] = NULL;
1588 skb_put(skb, pkt_len);
1589 pci_unmap_single(rp->pdev,
1590 rp->rx_skbuff_dma[entry],
1592 PCI_DMA_FROMDEVICE);
1594 skb->protocol = eth_type_trans(skb, dev);
1596 dev->last_rx = jiffies;
1597 rp->stats.rx_bytes += pkt_len;
1598 rp->stats.rx_packets++;
1600 entry = (++rp->cur_rx) % RX_RING_SIZE;
1601 rp->rx_head_desc = &rp->rx_ring[entry];
1604 /* Refill the Rx ring buffers. */
1605 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1606 struct sk_buff *skb;
1607 entry = rp->dirty_rx % RX_RING_SIZE;
1608 if (rp->rx_skbuff[entry] == NULL) {
1609 skb = dev_alloc_skb(rp->rx_buf_sz);
1610 rp->rx_skbuff[entry] = skb;
1612 break; /* Better luck next round. */
1613 skb->dev = dev; /* Mark as being used by this device. */
1614 rp->rx_skbuff_dma[entry] =
1615 pci_map_single(rp->pdev, skb->tail,
1617 PCI_DMA_FROMDEVICE);
1618 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1620 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1623 /* Pre-emptively restart Rx engine. */
1624 writew(readw(dev->base_addr + ChipCmd) | CmdRxOn | CmdRxDemand,
1625 dev->base_addr + ChipCmd);
1629 * Clears the "tally counters" for CRC errors and missed frames(?).
1630 * It has been reported that some chips need a write of 0 to clear
1631 * these, for others the counters are set to 1 when written to and
1632 * instead cleared when read. So we clear them both ways ...
1634 static inline void clear_tally_counters(const long ioaddr)
1636 writel(0, ioaddr + RxMissed);
1637 readw(ioaddr + RxCRCErrs);
1638 readw(ioaddr + RxMissed);
1641 static void rhine_restart_tx(struct net_device *dev) {
1642 struct rhine_private *rp = netdev_priv(dev);
1643 long ioaddr = dev->base_addr;
1644 int entry = rp->dirty_tx % TX_RING_SIZE;
1648 * If new errors occured, we need to sort them out before doing Tx.
1649 * In that case the ISR will be back here RSN anyway.
1651 intr_status = get_intr_status(dev);
1653 if ((intr_status & IntrTxErrSummary) == 0) {
1655 /* We know better than the chip where it should continue. */
1656 writel(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1657 ioaddr + TxRingPtr);
1659 writew(CmdTxDemand | rp->chip_cmd, ioaddr + ChipCmd);
1663 /* This should never happen */
1665 printk(KERN_WARNING "%s: rhine_restart_tx() "
1666 "Another error occured %8.8x.\n",
1667 dev->name, intr_status);
1672 static void rhine_error(struct net_device *dev, int intr_status)
1674 struct rhine_private *rp = netdev_priv(dev);
1675 long ioaddr = dev->base_addr;
1677 spin_lock(&rp->lock);
1679 if (intr_status & (IntrLinkChange)) {
1680 if (readb(ioaddr + MIIStatus) & 0x02) {
1681 /* Link failed, restart autonegotiation. */
1682 if (rp->drv_flags & HasDavicomPhy)
1683 mdio_write(dev, rp->phys[0], MII_BMCR, 0x3300);
1685 rhine_check_duplex(dev);
1687 printk(KERN_ERR "%s: MII status changed: "
1688 "Autonegotiation advertising %4.4x partner "
1689 "%4.4x.\n", dev->name,
1690 mdio_read(dev, rp->phys[0], MII_ADVERTISE),
1691 mdio_read(dev, rp->phys[0], MII_LPA));
1693 if (intr_status & IntrStatsMax) {
1694 rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1695 rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1696 clear_tally_counters(ioaddr);
1698 if (intr_status & IntrTxAborted) {
1700 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1701 dev->name, intr_status);
1703 if (intr_status & IntrTxUnderrun) {
1704 if (rp->tx_thresh < 0xE0)
1705 writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1707 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1708 "threshold now %2.2x.\n",
1709 dev->name, rp->tx_thresh);
1711 if (intr_status & IntrTxDescRace) {
1713 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1716 if ((intr_status & IntrTxError) &&
1717 (intr_status & (IntrTxAborted |
1718 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1719 if (rp->tx_thresh < 0xE0) {
1720 writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1723 printk(KERN_INFO "%s: Unspecified error. Tx "
1724 "threshold now %2.2x.\n",
1725 dev->name, rp->tx_thresh);
1727 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1729 rhine_restart_tx(dev);
1731 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1732 IntrTxError | IntrTxAborted | IntrNormalSummary |
1735 printk(KERN_ERR "%s: Something Wicked happened! "
1736 "%8.8x.\n", dev->name, intr_status);
1739 spin_unlock(&rp->lock);
1742 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1744 struct rhine_private *rp = netdev_priv(dev);
1745 long ioaddr = dev->base_addr;
1746 unsigned long flags;
1748 spin_lock_irqsave(&rp->lock, flags);
1749 rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1750 rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1751 clear_tally_counters(ioaddr);
1752 spin_unlock_irqrestore(&rp->lock, flags);
1757 static void rhine_set_rx_mode(struct net_device *dev)
1759 struct rhine_private *rp = netdev_priv(dev);
1760 long ioaddr = dev->base_addr;
1761 u32 mc_filter[2]; /* Multicast hash filter */
1762 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1764 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1765 /* Unconditionally log net taps. */
1766 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1769 writel(0xffffffff, ioaddr + MulticastFilter0);
1770 writel(0xffffffff, ioaddr + MulticastFilter1);
1771 } else if ((dev->mc_count > multicast_filter_limit)
1772 || (dev->flags & IFF_ALLMULTI)) {
1773 /* Too many to match, or accept all multicasts. */
1774 writel(0xffffffff, ioaddr + MulticastFilter0);
1775 writel(0xffffffff, ioaddr + MulticastFilter1);
1778 struct dev_mc_list *mclist;
1780 memset(mc_filter, 0, sizeof(mc_filter));
1781 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1782 i++, mclist = mclist->next) {
1783 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1785 mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
1787 writel(mc_filter[0], ioaddr + MulticastFilter0);
1788 writel(mc_filter[1], ioaddr + MulticastFilter1);
1791 writeb(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1794 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1796 struct rhine_private *rp = netdev_priv(dev);
1798 strcpy(info->driver, DRV_NAME);
1799 strcpy(info->version, DRV_VERSION);
1800 strcpy(info->bus_info, pci_name(rp->pdev));
1803 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1805 struct rhine_private *rp = netdev_priv(dev);
1808 if (!(rp->drv_flags & CanHaveMII))
1811 spin_lock_irq(&rp->lock);
1812 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1813 spin_unlock_irq(&rp->lock);
1818 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1820 struct rhine_private *rp = netdev_priv(dev);
1823 if (!(rp->drv_flags & CanHaveMII))
1826 spin_lock_irq(&rp->lock);
1827 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1828 spin_unlock_irq(&rp->lock);
1833 static int netdev_nway_reset(struct net_device *dev)
1835 struct rhine_private *rp = netdev_priv(dev);
1837 if (!(rp->drv_flags & CanHaveMII))
1840 return mii_nway_restart(&rp->mii_if);
1843 static u32 netdev_get_link(struct net_device *dev)
1845 struct rhine_private *rp = netdev_priv(dev);
1847 if (!(rp->drv_flags & CanHaveMII))
1848 return 0; /* -EINVAL */
1850 return mii_link_ok(&rp->mii_if);
1853 static u32 netdev_get_msglevel(struct net_device *dev)
1858 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1863 static struct ethtool_ops netdev_ethtool_ops = {
1864 .get_drvinfo = netdev_get_drvinfo,
1865 .get_settings = netdev_get_settings,
1866 .set_settings = netdev_set_settings,
1867 .nway_reset = netdev_nway_reset,
1868 .get_link = netdev_get_link,
1869 .get_msglevel = netdev_get_msglevel,
1870 .set_msglevel = netdev_set_msglevel,
1871 .get_sg = ethtool_op_get_sg,
1872 .get_tx_csum = ethtool_op_get_tx_csum,
1875 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1877 struct rhine_private *rp = netdev_priv(dev);
1880 if (!netif_running(dev))
1883 spin_lock_irq(&rp->lock);
1884 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1885 spin_unlock_irq(&rp->lock);
1890 static int rhine_close(struct net_device *dev)
1892 long ioaddr = dev->base_addr;
1893 struct rhine_private *rp = netdev_priv(dev);
1895 del_timer_sync(&rp->timer);
1897 spin_lock_irq(&rp->lock);
1899 netif_stop_queue(dev);
1902 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1903 "status was %4.4x.\n",
1904 dev->name, readw(ioaddr + ChipCmd));
1906 /* Switch to loopback mode to avoid hardware races. */
1907 writeb(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1909 /* Disable interrupts by clearing the interrupt mask. */
1910 writew(0x0000, ioaddr + IntrEnable);
1912 /* Stop the chip's Tx and Rx processes. */
1913 writew(CmdStop, ioaddr + ChipCmd);
1915 spin_unlock_irq(&rp->lock);
1917 free_irq(rp->pdev->irq, dev);
1926 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1928 struct net_device *dev = pci_get_drvdata(pdev);
1930 unregister_netdev(dev);
1932 pci_release_regions(pdev);
1935 iounmap((char *)(dev->base_addr));
1939 pci_disable_device(pdev);
1940 pci_set_drvdata(pdev, NULL);
1944 static struct pci_driver rhine_driver = {
1945 .name = "via-rhine",
1946 .id_table = rhine_pci_tbl,
1947 .probe = rhine_init_one,
1948 .remove = __devexit_p(rhine_remove_one),
1952 static int __init rhine_init(void)
1954 /* when a module, this is printed whether or not devices are found in probe */
1958 return pci_module_init(&rhine_driver);
1962 static void __exit rhine_cleanup(void)
1964 pci_unregister_driver(&rhine_driver);
1968 module_init(rhine_init);
1969 module_exit(rhine_cleanup);