1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
30 Linux kernel version history:
33 - Jeff Garzik: softnet 'n stuff
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
63 - Manfred Spraul: added reset into tx_timeout
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
85 - Replace some MII-related magic numbers with constants
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
105 - Use new MII lib helper generic_mii_ioctl
107 LK1.1.16 (Roger Luethi)
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
130 #define DRV_NAME "via-rhine"
131 #define DRV_VERSION "1.1.20-2.6"
132 #define DRV_RELDATE "May-23-2004"
135 /* A few user-configurable values.
136 These may be modified when a driver module is loaded. */
138 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
139 static int max_interrupt_work = 20;
141 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
142 Setting to > 1518 effectively disables this feature. */
143 static int rx_copybreak;
145 /* Select a backoff algorithm (Ethernet capture effect) */
148 /* Used to pass the media type, etc.
149 Both 'options[]' and 'full_duplex[]' should exist for driver
151 The media type is usually passed in 'options[]'.
152 The default is autonegotiation for speed and duplex.
153 This should rarely be overridden.
154 Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
155 Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
156 Use option values 0x20 and 0x200 for forcing full duplex operation.
158 #define MAX_UNITS 8 /* More are supported, limit only on options */
159 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
160 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
162 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
163 The Rhine has a 64 element 8390-like hash table. */
164 static const int multicast_filter_limit = 32;
167 /* Operational parameters that are set at compile time. */
169 /* Keep the ring sizes a power of two for compile efficiency.
170 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
171 Making the Tx ring too large decreases the effectiveness of channel
172 bonding and packet priority.
173 There are no ill effects from too-large receive rings. */
174 #define TX_RING_SIZE 16
175 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
176 #define RX_RING_SIZE 16
179 /* Operational parameters that usually are not changed. */
181 /* Time in jiffies before concluding the transmitter is hung. */
182 #define TX_TIMEOUT (2*HZ)
184 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
186 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
187 #warning You must compile this file with the correct options!
188 #warning See the last lines of the source file.
189 #error You must compile this driver with "-O".
192 #include <linux/module.h>
193 #include <linux/kernel.h>
194 #include <linux/string.h>
195 #include <linux/timer.h>
196 #include <linux/errno.h>
197 #include <linux/ioport.h>
198 #include <linux/slab.h>
199 #include <linux/interrupt.h>
200 #include <linux/pci.h>
201 #include <linux/netdevice.h>
202 #include <linux/etherdevice.h>
203 #include <linux/skbuff.h>
204 #include <linux/init.h>
205 #include <linux/delay.h>
206 #include <linux/mii.h>
207 #include <linux/ethtool.h>
208 #include <linux/crc32.h>
209 #include <asm/processor.h> /* Processor type for cache alignment. */
210 #include <asm/bitops.h>
213 #include <asm/uaccess.h>
215 /* These identify the driver base version and may not be removed. */
216 static char version[] __devinitdata =
217 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
219 static char shortname[] = DRV_NAME;
222 /* This driver was written to use PCI memory space. Some early versions
223 of the Rhine may only work correctly with I/O space accesses. */
224 #ifdef CONFIG_VIA_RHINE_MMIO
241 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
242 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
243 MODULE_LICENSE("GPL");
245 MODULE_PARM(max_interrupt_work, "i");
246 MODULE_PARM(debug, "i");
247 MODULE_PARM(rx_copybreak, "i");
248 MODULE_PARM(backoff, "i");
249 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
250 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
251 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
252 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
253 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
254 MODULE_PARM_DESC(backoff, "VIA Rhine: Bits 0-3: backoff algorithm");
255 MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex");
256 MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)");
261 I. Board Compatibility
263 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
266 II. Board-specific settings
268 Boards with this chip are functional only in a bus-master PCI slot.
270 Many operational settings are loaded from the EEPROM to the Config word at
271 offset 0x78. For most of these settings, this driver assumes that they are
273 If this driver is compiled to use PCI memory space operations the EEPROM
274 must be configured to enable memory ops.
276 III. Driver operation
280 This driver uses two statically allocated fixed-size descriptor lists
281 formed into rings by a branch from the final descriptor to the beginning of
282 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
284 IIIb/c. Transmit/Receive Structure
286 This driver attempts to use a zero-copy receive and transmit scheme.
288 Alas, all data buffers are required to start on a 32 bit boundary, so
289 the driver must often copy transmit packets into bounce buffers.
291 The driver allocates full frame size skbuffs for the Rx ring buffers at
292 open() time and passes the skb->data field to the chip as receive data
293 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
294 a fresh skbuff is allocated and the frame is copied to the new skbuff.
295 When the incoming frame is larger, the skbuff is passed directly up the
296 protocol stack. Buffers consumed this way are replaced by newly allocated
297 skbuffs in the last phase of rhine_rx().
299 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
300 using a full-sized skbuff for small frames vs. the copying costs of larger
301 frames. New boards are typically used in generously configured machines
302 and the underfilled buffers have negligible impact compared to the benefit of
303 a single allocation size, so the default value of zero results in never
304 copying packets. When copying is done, the cost is usually mitigated by using
305 a combined copy/checksum routine. Copying also preloads the cache, which is
306 most useful with small frames.
308 Since the VIA chips are only able to transfer data to buffers on 32 bit
309 boundaries, the IP header at offset 14 in an ethernet frame isn't
310 longword aligned for further processing. Copying these unaligned buffers
311 has the beneficial effect of 16-byte aligning the IP header.
313 IIId. Synchronization
315 The driver runs as two independent, single-threaded flows of control. One
316 is the send-packet routine, which enforces single-threaded use by the
317 dev->priv->lock spinlock. The other thread is the interrupt handler, which
318 is single threaded by the hardware and interrupt handling software.
320 The send packet thread has partial control over the Tx ring. It locks the
321 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
322 is not available it stops the transmit queue by calling netif_stop_queue.
324 The interrupt handler has exclusive control over the Rx ring and records stats
325 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
326 empty by incrementing the dirty_tx mark. If at least half of the entries in
327 the Rx ring are available the transmit queue is woken up if it was stopped.
333 Preliminary VT86C100A manual from http://www.via.com.tw/
334 http://www.scyld.com/expert/100mbps.html
335 http://www.scyld.com/expert/NWay.html
336 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
337 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
342 The VT86C100A manual is not reliable information.
343 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
344 in significant performance degradation for bounce buffer copies on transmit
345 and unaligned IP headers on receive.
346 The chip does not pad to minimum transmit length.
351 /* This table drives the PCI probe routines. It's mostly boilerplate in all
352 of the drivers, and will likely be provided by some future kernel.
353 Note the matching code -- the first table entry matchs all 56** cards but
354 second only the 1234 card.
360 VT8231 = 0x50, /* Integrated MAC */
361 VT8233 = 0x60, /* Integrated MAC */
362 VT8235 = 0x74, /* Integrated MAC */
363 VT8237 = 0x78, /* Integrated MAC */
374 rqWOL = 0x0001, /* Wake-On-LAN support */
375 rqForceReset = 0x0002,
376 rqDavicomPhy = 0x0020,
377 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
378 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
379 rqRhineI = 0x0100, /* See comment below */
382 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
383 * MMIO as well as for the collision counter and the Tx FIFO underflow
384 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
387 /* Beware of PCI posted writes */
388 #define IOSYNC do { readb(dev->base_addr + StationAddr); } while (0)
390 static struct pci_device_id rhine_pci_tbl[] =
392 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
393 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
394 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
395 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
396 { } /* terminate list */
398 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
401 /* Offsets to the device registers. */
402 enum register_offsets {
403 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
404 IntrStatus=0x0C, IntrEnable=0x0E,
405 MulticastFilter0=0x10, MulticastFilter1=0x14,
406 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
407 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
408 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
409 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
410 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
411 StickyHW=0x83, IntrStatus2=0x84,
412 WOLcrSet=0xA0, WOLcrClr=0xA4, WOLcrClr1=0xA6,
414 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
417 /* Bits in ConfigD */
419 BackOptional=0x01, BackModify=0x02,
420 BackCaptureEffect=0x04, BackRandom=0x08
424 /* Registers we check that mmio and reg are the same. */
425 int mmio_verify_registers[] = {
426 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
431 /* Bits in the interrupt status/mask registers. */
432 enum intr_status_bits {
433 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
434 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
436 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
437 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
438 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
440 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
441 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
442 IntrTxErrSummary=0x082218,
445 /* The Rx and Tx buffer descriptors. */
448 u32 desc_length; /* Chain flag, Buffer/frame length */
454 u32 desc_length; /* Chain flag, Tx Config, Frame length */
459 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
460 #define TXDESC 0x00e08000
462 enum rx_status_bits {
463 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
466 /* Bits in *_desc.*_status */
467 enum desc_status_bits {
471 /* Bits in ChipCmd. */
473 CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
474 CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
475 CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
476 CmdNoTxPoll=0x0800, CmdReset=0x8000,
479 #define MAX_MII_CNT 4
480 struct rhine_private {
481 /* Descriptor rings */
482 struct rx_desc *rx_ring;
483 struct tx_desc *tx_ring;
484 dma_addr_t rx_ring_dma;
485 dma_addr_t tx_ring_dma;
487 /* The addresses of receive-in-place skbuffs. */
488 struct sk_buff *rx_skbuff[RX_RING_SIZE];
489 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
491 /* The saved address of a sent-in-place packet/buffer, for later free(). */
492 struct sk_buff *tx_skbuff[TX_RING_SIZE];
493 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
495 /* Tx bounce buffers */
496 unsigned char *tx_buf[TX_RING_SIZE];
497 unsigned char *tx_bufs;
498 dma_addr_t tx_bufs_dma;
500 struct pci_dev *pdev;
501 struct net_device_stats stats;
502 struct timer_list timer; /* Media monitoring timer. */
505 /* Frequently used values: keep some adjacent for cache effect. */
507 struct rx_desc *rx_head_desc;
508 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
509 unsigned int cur_tx, dirty_tx;
510 unsigned int rx_buf_sz; /* Based on MTU+slack. */
511 u16 chip_cmd; /* Current setting for ChipCmd */
513 /* These values are keep track of the transceiver/media in use. */
514 u8 tx_thresh, rx_thresh;
516 /* MII transceiver section. */
517 unsigned char phys[MAX_MII_CNT]; /* MII device addresses. */
518 unsigned int mii_cnt; /* number of MIIs found, but only the first one is used */
519 u16 mii_status; /* last read MII status */
520 struct mii_if_info mii_if;
523 static int mdio_read(struct net_device *dev, int phy_id, int location);
524 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
525 static int rhine_open(struct net_device *dev);
526 static void rhine_check_duplex(struct net_device *dev);
527 static void rhine_timer(unsigned long data);
528 static void rhine_tx_timeout(struct net_device *dev);
529 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
530 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
531 static void rhine_tx(struct net_device *dev);
532 static void rhine_rx(struct net_device *dev);
533 static void rhine_error(struct net_device *dev, int intr_status);
534 static void rhine_set_rx_mode(struct net_device *dev);
535 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
536 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
537 static struct ethtool_ops netdev_ethtool_ops;
538 static int rhine_close(struct net_device *dev);
540 static inline u32 get_intr_status(struct net_device *dev)
542 long ioaddr = dev->base_addr;
543 struct rhine_private *rp = netdev_priv(dev);
546 intr_status = readw(ioaddr + IntrStatus);
547 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
548 if (rp->quirks & rqStatusWBRace)
549 intr_status |= readb(ioaddr + IntrStatus2) << 16;
554 * Get power related registers into sane state.
555 * Returns content of power-event (WOL) registers.
557 static void rhine_power_init(struct net_device *dev)
559 long ioaddr = dev->base_addr;
560 struct rhine_private *rp = netdev_priv(dev);
562 if (rp->quirks & rqWOL) {
563 /* Make sure chip is in power state D0 */
564 writeb(readb(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
566 /* Disable "force PME-enable" */
567 writeb(0x80, ioaddr + WOLcgClr);
569 /* Clear power-event config bits (WOL) */
570 writeb(0xFF, ioaddr + WOLcrClr);
571 /* More recent cards can manage two additional patterns */
572 if (rp->quirks & rq6patterns)
573 writeb(0x03, ioaddr + WOLcrClr1);
575 /* Clear power-event status bits */
576 writeb(0xFF, ioaddr + PwrcsrClr);
577 if (rp->quirks & rq6patterns)
578 writeb(0x03, ioaddr + PwrcsrClr1);
582 static void wait_for_reset(struct net_device *dev, u32 quirks, char *name)
584 long ioaddr = dev->base_addr;
589 if (readw(ioaddr + ChipCmd) & CmdReset) {
590 printk(KERN_INFO "%s: Reset not complete yet. "
591 "Trying harder.\n", name);
593 /* Rhine-II needs to be forced sometimes */
594 if (quirks & rqForceReset)
595 writeb(0x40, ioaddr + MiscCmd);
597 /* VT86C100A may need long delay after reset (dlink) */
598 /* Seen on Rhine-II as well (rl) */
599 while ((readw(ioaddr + ChipCmd) & CmdReset) && --boguscnt)
605 printk(KERN_INFO "%s: Reset %s.\n", name,
606 boguscnt ? "succeeded" : "failed");
610 static void __devinit enable_mmio(long ioaddr, u32 quirks)
613 if (quirks & rqRhineI) {
614 /* More recent docs say that this bit is reserved ... */
615 n = inb(ioaddr + ConfigA) | 0x20;
616 outb(n, ioaddr + ConfigA);
618 n = inb(ioaddr + ConfigD) | 0x80;
619 outb(n, ioaddr + ConfigD);
624 static void __devinit reload_eeprom(long ioaddr)
627 outb(0x20, ioaddr + MACRegEEcsr);
628 /* Typically 2 cycles to reload. */
629 for (i = 0; i < 150; i++)
630 if (! (inb(ioaddr + MACRegEEcsr) & 0x20))
634 #ifdef CONFIG_NET_POLL_CONTROLLER
635 static void rhine_poll(struct net_device *dev)
637 disable_irq(dev->irq);
638 rhine_interrupt(dev->irq, (void *)dev, NULL);
639 enable_irq(dev->irq);
643 static int __devinit rhine_init_one(struct pci_dev *pdev,
644 const struct pci_device_id *ent)
646 struct net_device *dev;
647 struct rhine_private *rp;
651 static int card_idx = -1;
655 int phy, phy_idx = 0;
661 /* when built into the kernel, we only print version if device is found */
663 static int printed_version;
664 if (!printed_version++)
669 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
670 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
673 if (pci_rev < VT6102) {
674 quirks = rqRhineI | rqDavicomPhy;
676 name = "VT86C100A Rhine";
679 quirks = rqWOL | rqForceReset;
680 if (pci_rev < VT6105) {
682 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
686 if (pci_rev >= VT6105_B0)
687 quirks |= rq6patterns;
691 rc = pci_enable_device(pdev);
695 /* this should always be supported */
696 rc = pci_set_dma_mask(pdev, 0xffffffff);
698 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
704 if ((pci_resource_len(pdev, 0) < io_size) ||
705 (pci_resource_len(pdev, 1) < io_size)) {
707 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
711 ioaddr = pci_resource_start(pdev, 0);
712 memaddr = pci_resource_start(pdev, 1);
714 pci_set_master(pdev);
716 dev = alloc_etherdev(sizeof(*rp));
719 printk(KERN_ERR "init_ethernet failed for card #%d\n",
723 SET_MODULE_OWNER(dev);
724 SET_NETDEV_DEV(dev, &pdev->dev);
726 rc = pci_request_regions(pdev, shortname);
728 goto err_out_free_netdev;
732 enable_mmio(ioaddr0, quirks);
734 ioaddr = (long) ioremap(memaddr, io_size);
737 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
738 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
739 goto err_out_free_res;
742 /* Check that selected MMIO registers match the PIO ones */
744 while (mmio_verify_registers[i]) {
745 int reg = mmio_verify_registers[i++];
746 unsigned char a = inb(ioaddr0+reg);
747 unsigned char b = readb(ioaddr+reg);
750 printk(KERN_ERR "MMIO do not match PIO [%02x] "
751 "(%02x != %02x)\n", reg, a, b);
755 #endif /* USE_MMIO */
756 dev->base_addr = ioaddr;
758 rhine_power_init(dev);
760 /* Reset the chip to erase previous misconfiguration. */
761 writew(CmdReset, ioaddr + ChipCmd);
763 wait_for_reset(dev, quirks, shortname);
765 /* Reload the station address from the EEPROM. */
767 reload_eeprom(ioaddr0);
768 /* Reloading from eeprom overwrites cfgA-D, so we must re-enable MMIO.
769 If reload_eeprom() was done first this could be avoided, but it is
770 not known if that still works with the "win98-reboot" problem. */
771 enable_mmio(ioaddr0, quirks);
773 reload_eeprom(ioaddr);
776 for (i = 0; i < 6; i++)
777 dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
779 if (!is_valid_ether_addr(dev->dev_addr)) {
781 printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx);
785 if (quirks & rqWOL) {
787 * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA
788 * turned on. it makes MAC receive magic packet
789 * automatically. So, we turn it off. (D-Link)
791 writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
794 /* Select backoff algorithm */
796 writeb(readb(ioaddr + ConfigD) & (0xF0 | backoff),
799 dev->irq = pdev->irq;
801 rp = netdev_priv(dev);
802 spin_lock_init(&rp->lock);
805 rp->mii_if.dev = dev;
806 rp->mii_if.mdio_read = mdio_read;
807 rp->mii_if.mdio_write = mdio_write;
808 rp->mii_if.phy_id_mask = 0x1f;
809 rp->mii_if.reg_num_mask = 0x1f;
812 option = dev->mem_start;
814 /* The chip-specific entries in the device structure. */
815 dev->open = rhine_open;
816 dev->hard_start_xmit = rhine_start_tx;
817 dev->stop = rhine_close;
818 dev->get_stats = rhine_get_stats;
819 dev->set_multicast_list = rhine_set_rx_mode;
820 dev->do_ioctl = netdev_ioctl;
821 dev->ethtool_ops = &netdev_ethtool_ops;
822 dev->tx_timeout = rhine_tx_timeout;
823 dev->watchdog_timeo = TX_TIMEOUT;
824 #ifdef CONFIG_NET_POLL_CONTROLLER
825 dev->poll_controller = rhine_poll;
827 if (rp->quirks & rqRhineI)
828 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
830 /* dev->name not defined before register_netdev()! */
831 rc = register_netdev(dev);
835 /* The lower four bits are the media type. */
838 rp->mii_if.full_duplex = 1;
840 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
841 rp->mii_if.full_duplex = 1;
843 if (rp->mii_if.full_duplex) {
844 printk(KERN_INFO "%s: Set to forced full duplex, "
845 "autonegotiation disabled.\n", dev->name);
846 rp->mii_if.force_media = 1;
849 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
858 for (i = 0; i < 5; i++)
859 printk("%2.2x:", dev->dev_addr[i]);
860 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
862 pci_set_drvdata(pdev, dev);
864 rp->phys[0] = 1; /* Standard for this chip. */
865 for (phy = 1; phy < 32 && phy_idx < MAX_MII_CNT; phy++) {
866 int mii_status = mdio_read(dev, phy, 1);
867 if (mii_status != 0xffff && mii_status != 0x0000) {
868 rp->phys[phy_idx++] = phy;
869 rp->mii_if.advertising = mdio_read(dev, phy, 4);
870 printk(KERN_INFO "%s: MII PHY found at address "
871 "%d, status 0x%4.4x advertising %4.4x "
872 "Link %4.4x.\n", dev->name, phy,
873 mii_status, rp->mii_if.advertising,
874 mdio_read(dev, phy, 5));
876 /* set IFF_RUNNING */
877 if (mii_status & BMSR_LSTATUS)
878 netif_carrier_on(dev);
880 netif_carrier_off(dev);
885 rp->mii_cnt = phy_idx;
886 rp->mii_if.phy_id = rp->phys[0];
888 /* Allow forcing the media type. */
891 rp->mii_if.full_duplex = 1;
892 if (option & 0x330) {
893 printk(KERN_INFO " Forcing %dMbs %s-duplex "
895 (option & 0x300 ? 100 : 10),
896 (option & 0x220 ? "full" : "half"));
898 mdio_write(dev, rp->phys[0], MII_BMCR,
899 ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
900 ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
908 iounmap((void *)ioaddr);
911 pci_release_regions(pdev);
918 static int alloc_ring(struct net_device* dev)
920 struct rhine_private *rp = netdev_priv(dev);
924 ring = pci_alloc_consistent(rp->pdev,
925 RX_RING_SIZE * sizeof(struct rx_desc) +
926 TX_RING_SIZE * sizeof(struct tx_desc),
929 printk(KERN_ERR "Could not allocate DMA memory.\n");
932 if (rp->quirks & rqRhineI) {
933 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
934 PKT_BUF_SZ * TX_RING_SIZE,
936 if (rp->tx_bufs == NULL) {
937 pci_free_consistent(rp->pdev,
938 RX_RING_SIZE * sizeof(struct rx_desc) +
939 TX_RING_SIZE * sizeof(struct tx_desc),
946 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
947 rp->rx_ring_dma = ring_dma;
948 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
953 void free_ring(struct net_device* dev)
955 struct rhine_private *rp = netdev_priv(dev);
957 pci_free_consistent(rp->pdev,
958 RX_RING_SIZE * sizeof(struct rx_desc) +
959 TX_RING_SIZE * sizeof(struct tx_desc),
960 rp->rx_ring, rp->rx_ring_dma);
964 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
965 rp->tx_bufs, rp->tx_bufs_dma);
971 static void alloc_rbufs(struct net_device *dev)
973 struct rhine_private *rp = netdev_priv(dev);
977 rp->dirty_rx = rp->cur_rx = 0;
979 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
980 rp->rx_head_desc = &rp->rx_ring[0];
981 next = rp->rx_ring_dma;
983 /* Init the ring entries */
984 for (i = 0; i < RX_RING_SIZE; i++) {
985 rp->rx_ring[i].rx_status = 0;
986 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
987 next += sizeof(struct rx_desc);
988 rp->rx_ring[i].next_desc = cpu_to_le32(next);
989 rp->rx_skbuff[i] = 0;
991 /* Mark the last entry as wrapping the ring. */
992 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
994 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
995 for (i = 0; i < RX_RING_SIZE; i++) {
996 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
997 rp->rx_skbuff[i] = skb;
1000 skb->dev = dev; /* Mark as being used by this device. */
1002 rp->rx_skbuff_dma[i] =
1003 pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz,
1004 PCI_DMA_FROMDEVICE);
1006 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1007 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1009 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1012 static void free_rbufs(struct net_device* dev)
1014 struct rhine_private *rp = netdev_priv(dev);
1017 /* Free all the skbuffs in the Rx queue. */
1018 for (i = 0; i < RX_RING_SIZE; i++) {
1019 rp->rx_ring[i].rx_status = 0;
1020 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1021 if (rp->rx_skbuff[i]) {
1022 pci_unmap_single(rp->pdev,
1023 rp->rx_skbuff_dma[i],
1024 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1025 dev_kfree_skb(rp->rx_skbuff[i]);
1027 rp->rx_skbuff[i] = 0;
1031 static void alloc_tbufs(struct net_device* dev)
1033 struct rhine_private *rp = netdev_priv(dev);
1037 rp->dirty_tx = rp->cur_tx = 0;
1038 next = rp->tx_ring_dma;
1039 for (i = 0; i < TX_RING_SIZE; i++) {
1040 rp->tx_skbuff[i] = 0;
1041 rp->tx_ring[i].tx_status = 0;
1042 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1043 next += sizeof(struct tx_desc);
1044 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1045 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1047 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1051 static void free_tbufs(struct net_device* dev)
1053 struct rhine_private *rp = netdev_priv(dev);
1056 for (i = 0; i < TX_RING_SIZE; i++) {
1057 rp->tx_ring[i].tx_status = 0;
1058 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1059 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1060 if (rp->tx_skbuff[i]) {
1061 if (rp->tx_skbuff_dma[i]) {
1062 pci_unmap_single(rp->pdev,
1063 rp->tx_skbuff_dma[i],
1064 rp->tx_skbuff[i]->len,
1067 dev_kfree_skb(rp->tx_skbuff[i]);
1069 rp->tx_skbuff[i] = 0;
1074 static void init_registers(struct net_device *dev)
1076 struct rhine_private *rp = netdev_priv(dev);
1077 long ioaddr = dev->base_addr;
1080 for (i = 0; i < 6; i++)
1081 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
1083 /* Initialize other registers. */
1084 writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1085 /* Configure initial FIFO thresholds. */
1086 writeb(0x20, ioaddr + TxConfig);
1087 rp->tx_thresh = 0x20;
1088 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1089 rp->mii_if.full_duplex = 0;
1091 writel(rp->rx_ring_dma, ioaddr + RxRingPtr);
1092 writel(rp->tx_ring_dma, ioaddr + TxRingPtr);
1094 rhine_set_rx_mode(dev);
1096 /* Enable interrupts by setting the interrupt mask. */
1097 writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1098 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1099 IntrTxDone | IntrTxError | IntrTxUnderrun |
1100 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1101 ioaddr + IntrEnable);
1103 rp->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
1104 if (rp->mii_if.force_media)
1105 rp->chip_cmd |= CmdFDuplex;
1106 writew(rp->chip_cmd, ioaddr + ChipCmd);
1108 rhine_check_duplex(dev);
1110 /* The LED outputs of various MII xcvrs should be configured. */
1111 /* For NS or Mison phys, turn on bit 1 in register 0x17 */
1112 mdio_write(dev, rp->phys[0], 0x17, mdio_read(dev, rp->phys[0], 0x17) |
1116 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1118 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1120 long ioaddr = dev->base_addr;
1121 int boguscnt = 1024;
1123 /* Wait for a previous command to complete. */
1124 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1126 writeb(0x00, ioaddr + MIICmd);
1127 writeb(phy_id, ioaddr + MIIPhyAddr);
1128 writeb(regnum, ioaddr + MIIRegAddr);
1129 writeb(0x40, ioaddr + MIICmd); /* Trigger read */
1131 while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
1133 return readw(ioaddr + MIIData);
1136 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1138 struct rhine_private *rp = netdev_priv(dev);
1139 long ioaddr = dev->base_addr;
1140 int boguscnt = 1024;
1142 if (phy_id == rp->phys[0]) {
1144 case MII_BMCR: /* Is user forcing speed/duplex? */
1145 if (value & 0x9000) /* Autonegotiation. */
1146 rp->mii_if.force_media = 0;
1148 rp->mii_if.full_duplex = (value & 0x0100) ? 1 : 0;
1151 rp->mii_if.advertising = value;
1156 /* Wait for a previous command to complete. */
1157 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1159 writeb(0x00, ioaddr + MIICmd);
1160 writeb(phy_id, ioaddr + MIIPhyAddr);
1161 writeb(regnum, ioaddr + MIIRegAddr);
1162 writew(value, ioaddr + MIIData);
1163 writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
1167 static int rhine_open(struct net_device *dev)
1169 struct rhine_private *rp = netdev_priv(dev);
1170 long ioaddr = dev->base_addr;
1173 /* Reset the chip. */
1174 writew(CmdReset, ioaddr + ChipCmd);
1176 i = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
1182 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1183 dev->name, rp->pdev->irq);
1185 i = alloc_ring(dev);
1190 wait_for_reset(dev, rp->quirks, dev->name);
1191 init_registers(dev);
1193 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1194 "MII status: %4.4x.\n",
1195 dev->name, readw(ioaddr + ChipCmd),
1196 mdio_read(dev, rp->phys[0], MII_BMSR));
1198 netif_start_queue(dev);
1200 /* Set the timer to check for link beat. */
1201 init_timer(&rp->timer);
1202 rp->timer.expires = jiffies + 2 * HZ/100;
1203 rp->timer.data = (unsigned long)dev;
1204 rp->timer.function = &rhine_timer; /* timer handler */
1205 add_timer(&rp->timer);
1210 static void rhine_check_duplex(struct net_device *dev)
1212 struct rhine_private *rp = netdev_priv(dev);
1213 long ioaddr = dev->base_addr;
1214 int mii_lpa = mdio_read(dev, rp->phys[0], MII_LPA);
1215 int negotiated = mii_lpa & rp->mii_if.advertising;
1218 if (rp->mii_if.force_media || mii_lpa == 0xffff)
1220 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
1221 if (rp->mii_if.full_duplex != duplex) {
1222 rp->mii_if.full_duplex = duplex;
1224 printk(KERN_INFO "%s: Setting %s-duplex based on "
1225 "MII #%d link partner capability of %4.4x.\n",
1226 dev->name, duplex ? "full" : "half",
1227 rp->phys[0], mii_lpa);
1229 rp->chip_cmd |= CmdFDuplex;
1231 rp->chip_cmd &= ~CmdFDuplex;
1232 writew(rp->chip_cmd, ioaddr + ChipCmd);
1237 static void rhine_timer(unsigned long data)
1239 struct net_device *dev = (struct net_device *)data;
1240 struct rhine_private *rp = netdev_priv(dev);
1241 long ioaddr = dev->base_addr;
1242 int next_tick = 10*HZ;
1246 printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
1247 dev->name, readw(ioaddr + IntrStatus));
1250 spin_lock_irq (&rp->lock);
1252 rhine_check_duplex(dev);
1254 /* make IFF_RUNNING follow the MII status bit "Link established" */
1255 mii_status = mdio_read(dev, rp->phys[0], MII_BMSR);
1256 if ((mii_status & BMSR_LSTATUS) != (rp->mii_status & BMSR_LSTATUS)) {
1257 if (mii_status & BMSR_LSTATUS)
1258 netif_carrier_on(dev);
1260 netif_carrier_off(dev);
1262 rp->mii_status = mii_status;
1264 spin_unlock_irq(&rp->lock);
1266 rp->timer.expires = jiffies + next_tick;
1267 add_timer(&rp->timer);
1271 static void rhine_tx_timeout(struct net_device *dev)
1273 struct rhine_private *rp = netdev_priv(dev);
1274 long ioaddr = dev->base_addr;
1276 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1277 "%4.4x, resetting...\n",
1278 dev->name, readw(ioaddr + IntrStatus),
1279 mdio_read(dev, rp->phys[0], MII_BMSR));
1281 /* protect against concurrent rx interrupts */
1282 disable_irq(rp->pdev->irq);
1284 spin_lock(&rp->lock);
1286 /* Reset the chip. */
1287 writew(CmdReset, ioaddr + ChipCmd);
1289 /* clear all descriptors */
1295 /* Reinitialize the hardware. */
1296 wait_for_reset(dev, rp->quirks, dev->name);
1297 init_registers(dev);
1299 spin_unlock(&rp->lock);
1300 enable_irq(rp->pdev->irq);
1302 dev->trans_start = jiffies;
1303 rp->stats.tx_errors++;
1304 netif_wake_queue(dev);
1307 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1309 struct rhine_private *rp = netdev_priv(dev);
1313 /* Caution: the write order is important here, set the field
1314 with the "ownership" bits last. */
1316 /* Calculate the next Tx descriptor entry. */
1317 entry = rp->cur_tx % TX_RING_SIZE;
1319 if (skb->len < ETH_ZLEN) {
1320 skb = skb_padto(skb, ETH_ZLEN);
1325 rp->tx_skbuff[entry] = skb;
1327 if ((rp->quirks & rqRhineI) &&
1328 (((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1329 /* Must use alignment buffer. */
1330 if (skb->len > PKT_BUF_SZ) {
1331 /* packet too long, drop it */
1333 rp->tx_skbuff[entry] = NULL;
1334 rp->stats.tx_dropped++;
1337 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1338 rp->tx_skbuff_dma[entry] = 0;
1339 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1340 (rp->tx_buf[entry] -
1343 rp->tx_skbuff_dma[entry] =
1344 pci_map_single(rp->pdev, skb->data, skb->len,
1346 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1349 rp->tx_ring[entry].desc_length =
1350 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1353 spin_lock_irq(&rp->lock);
1355 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1360 /* Non-x86 Todo: explicitly flush cache lines here. */
1363 * Wake the potentially-idle transmit channel unless errors are
1364 * pending (the ISR must sort them out first).
1366 intr_status = get_intr_status(dev);
1367 if ((intr_status & IntrTxErrSummary) == 0) {
1368 writew(CmdTxDemand | rp->chip_cmd, dev->base_addr + ChipCmd);
1372 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1373 netif_stop_queue(dev);
1375 dev->trans_start = jiffies;
1377 spin_unlock_irq(&rp->lock);
1380 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1381 dev->name, rp->cur_tx-1, entry);
1386 /* The interrupt handler does all of the Rx thread work and cleans up
1387 after the Tx thread. */
1388 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1390 struct net_device *dev = dev_instance;
1393 int boguscnt = max_interrupt_work;
1396 ioaddr = dev->base_addr;
1398 while ((intr_status = get_intr_status(dev))) {
1401 /* Acknowledge all of the current interrupt sources ASAP. */
1402 if (intr_status & IntrTxDescRace)
1403 writeb(0x08, ioaddr + IntrStatus2);
1404 writew(intr_status & 0xffff, ioaddr + IntrStatus);
1408 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1409 dev->name, intr_status);
1411 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1412 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1415 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1416 if (intr_status & IntrTxErrSummary) {
1418 /* Avoid scavenging before Tx engine turned off */
1419 while ((readw(ioaddr+ChipCmd) & CmdTxOn) && --cnt)
1421 if (debug > 2 && !cnt)
1422 printk(KERN_WARNING "%s: "
1423 "rhine_interrupt() Tx engine"
1424 "still on.\n", dev->name);
1429 /* Abnormal error summary/uncommon events handlers. */
1430 if (intr_status & (IntrPCIErr | IntrLinkChange |
1431 IntrStatsMax | IntrTxError | IntrTxAborted |
1432 IntrTxUnderrun | IntrTxDescRace))
1433 rhine_error(dev, intr_status);
1435 if (--boguscnt < 0) {
1436 printk(KERN_WARNING "%s: Too much work at interrupt, "
1438 dev->name, intr_status);
1444 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1445 dev->name, readw(ioaddr + IntrStatus));
1446 return IRQ_RETVAL(handled);
1449 /* This routine is logically part of the interrupt handler, but isolated
1451 static void rhine_tx(struct net_device *dev)
1453 struct rhine_private *rp = netdev_priv(dev);
1454 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1456 spin_lock(&rp->lock);
1458 /* find and cleanup dirty tx descriptors */
1459 while (rp->dirty_tx != rp->cur_tx) {
1460 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1462 printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
1464 if (txstatus & DescOwn)
1466 if (txstatus & 0x8000) {
1468 printk(KERN_DEBUG "%s: Transmit error, "
1469 "Tx status %8.8x.\n",
1470 dev->name, txstatus);
1471 rp->stats.tx_errors++;
1472 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1473 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1474 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1475 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1476 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1477 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1478 rp->stats.tx_fifo_errors++;
1479 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1480 break; /* Keep the skb - we try again */
1482 /* Transmitter restarted in 'abnormal' handler. */
1484 if (rp->quirks & rqRhineI)
1485 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1487 rp->stats.collisions += txstatus & 0x0F;
1489 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1490 (txstatus >> 3) & 0xF,
1492 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1493 rp->stats.tx_packets++;
1495 /* Free the original skb. */
1496 if (rp->tx_skbuff_dma[entry]) {
1497 pci_unmap_single(rp->pdev,
1498 rp->tx_skbuff_dma[entry],
1499 rp->tx_skbuff[entry]->len,
1502 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1503 rp->tx_skbuff[entry] = NULL;
1504 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1506 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1507 netif_wake_queue(dev);
1509 spin_unlock(&rp->lock);
1512 /* This routine is logically part of the interrupt handler, but isolated
1513 for clarity and better register allocation. */
1514 static void rhine_rx(struct net_device *dev)
1516 struct rhine_private *rp = netdev_priv(dev);
1517 int entry = rp->cur_rx % RX_RING_SIZE;
1518 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1521 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1523 le32_to_cpu(rp->rx_head_desc->rx_status));
1526 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1527 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1528 struct rx_desc *desc = rp->rx_head_desc;
1529 u32 desc_status = le32_to_cpu(desc->rx_status);
1530 int data_size = desc_status >> 16;
1533 printk(KERN_DEBUG " rhine_rx() status is %8.8x.\n",
1537 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1538 if ((desc_status & RxWholePkt) != RxWholePkt) {
1539 printk(KERN_WARNING "%s: Oversized Ethernet "
1540 "frame spanned multiple buffers, entry "
1541 "%#x length %d status %8.8x!\n",
1542 dev->name, entry, data_size,
1544 printk(KERN_WARNING "%s: Oversized Ethernet "
1545 "frame %p vs %p.\n", dev->name,
1546 rp->rx_head_desc, &rp->rx_ring[entry]);
1547 rp->stats.rx_length_errors++;
1548 } else if (desc_status & RxErr) {
1549 /* There was a error. */
1551 printk(KERN_DEBUG " rhine_rx() Rx "
1552 "error was %8.8x.\n",
1554 rp->stats.rx_errors++;
1555 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1556 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1557 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1558 if (desc_status & 0x0002) {
1559 /* this can also be updated outside the interrupt handler */
1560 spin_lock(&rp->lock);
1561 rp->stats.rx_crc_errors++;
1562 spin_unlock(&rp->lock);
1566 struct sk_buff *skb;
1567 /* Length should omit the CRC */
1568 int pkt_len = data_size - 4;
1570 /* Check if the packet is long enough to accept without
1571 copying to a minimally-sized skbuff. */
1572 if (pkt_len < rx_copybreak &&
1573 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1575 skb_reserve(skb, 2); /* 16 byte align the IP header */
1576 pci_dma_sync_single_for_cpu(rp->pdev,
1577 rp->rx_skbuff_dma[entry],
1579 PCI_DMA_FROMDEVICE);
1581 /* *_IP_COPYSUM isn't defined anywhere and
1582 eth_copy_and_sum is memcpy for all archs so
1583 this is kind of pointless right now
1585 eth_copy_and_sum(skb,
1586 rp->rx_skbuff[entry]->tail,
1588 skb_put(skb, pkt_len);
1589 pci_dma_sync_single_for_device(rp->pdev,
1590 rp->rx_skbuff_dma[entry],
1592 PCI_DMA_FROMDEVICE);
1594 skb = rp->rx_skbuff[entry];
1596 printk(KERN_ERR "%s: Inconsistent Rx "
1597 "descriptor chain.\n",
1601 rp->rx_skbuff[entry] = NULL;
1602 skb_put(skb, pkt_len);
1603 pci_unmap_single(rp->pdev,
1604 rp->rx_skbuff_dma[entry],
1606 PCI_DMA_FROMDEVICE);
1608 skb->protocol = eth_type_trans(skb, dev);
1610 dev->last_rx = jiffies;
1611 rp->stats.rx_bytes += pkt_len;
1612 rp->stats.rx_packets++;
1614 entry = (++rp->cur_rx) % RX_RING_SIZE;
1615 rp->rx_head_desc = &rp->rx_ring[entry];
1618 /* Refill the Rx ring buffers. */
1619 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1620 struct sk_buff *skb;
1621 entry = rp->dirty_rx % RX_RING_SIZE;
1622 if (rp->rx_skbuff[entry] == NULL) {
1623 skb = dev_alloc_skb(rp->rx_buf_sz);
1624 rp->rx_skbuff[entry] = skb;
1626 break; /* Better luck next round. */
1627 skb->dev = dev; /* Mark as being used by this device. */
1628 rp->rx_skbuff_dma[entry] =
1629 pci_map_single(rp->pdev, skb->tail,
1631 PCI_DMA_FROMDEVICE);
1632 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1634 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1637 /* Pre-emptively restart Rx engine. */
1638 writew(readw(dev->base_addr + ChipCmd) | CmdRxOn | CmdRxDemand,
1639 dev->base_addr + ChipCmd);
1643 * Clears the "tally counters" for CRC errors and missed frames(?).
1644 * It has been reported that some chips need a write of 0 to clear
1645 * these, for others the counters are set to 1 when written to and
1646 * instead cleared when read. So we clear them both ways ...
1648 static inline void clear_tally_counters(const long ioaddr)
1650 writel(0, ioaddr + RxMissed);
1651 readw(ioaddr + RxCRCErrs);
1652 readw(ioaddr + RxMissed);
1655 static void rhine_restart_tx(struct net_device *dev) {
1656 struct rhine_private *rp = netdev_priv(dev);
1657 long ioaddr = dev->base_addr;
1658 int entry = rp->dirty_tx % TX_RING_SIZE;
1662 * If new errors occured, we need to sort them out before doing Tx.
1663 * In that case the ISR will be back here RSN anyway.
1665 intr_status = get_intr_status(dev);
1667 if ((intr_status & IntrTxErrSummary) == 0) {
1669 /* We know better than the chip where it should continue. */
1670 writel(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1671 ioaddr + TxRingPtr);
1673 writew(CmdTxDemand | rp->chip_cmd, ioaddr + ChipCmd);
1677 /* This should never happen */
1679 printk(KERN_WARNING "%s: rhine_restart_tx() "
1680 "Another error occured %8.8x.\n",
1681 dev->name, intr_status);
1686 static void rhine_error(struct net_device *dev, int intr_status)
1688 struct rhine_private *rp = netdev_priv(dev);
1689 long ioaddr = dev->base_addr;
1691 spin_lock(&rp->lock);
1693 if (intr_status & (IntrLinkChange)) {
1694 if (readb(ioaddr + MIIStatus) & 0x02) {
1695 /* Link failed, restart autonegotiation. */
1696 if (rp->quirks & rqRhineI)
1697 mdio_write(dev, rp->phys[0], MII_BMCR, 0x3300);
1699 rhine_check_duplex(dev);
1701 printk(KERN_ERR "%s: MII status changed: "
1702 "Autonegotiation advertising %4.4x partner "
1703 "%4.4x.\n", dev->name,
1704 mdio_read(dev, rp->phys[0], MII_ADVERTISE),
1705 mdio_read(dev, rp->phys[0], MII_LPA));
1707 if (intr_status & IntrStatsMax) {
1708 rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1709 rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1710 clear_tally_counters(ioaddr);
1712 if (intr_status & IntrTxAborted) {
1714 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1715 dev->name, intr_status);
1717 if (intr_status & IntrTxUnderrun) {
1718 if (rp->tx_thresh < 0xE0)
1719 writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1721 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1722 "threshold now %2.2x.\n",
1723 dev->name, rp->tx_thresh);
1725 if (intr_status & IntrTxDescRace) {
1727 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1730 if ((intr_status & IntrTxError) &&
1731 (intr_status & (IntrTxAborted |
1732 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1733 if (rp->tx_thresh < 0xE0) {
1734 writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1737 printk(KERN_INFO "%s: Unspecified error. Tx "
1738 "threshold now %2.2x.\n",
1739 dev->name, rp->tx_thresh);
1741 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1743 rhine_restart_tx(dev);
1745 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1746 IntrTxError | IntrTxAborted | IntrNormalSummary |
1749 printk(KERN_ERR "%s: Something Wicked happened! "
1750 "%8.8x.\n", dev->name, intr_status);
1753 spin_unlock(&rp->lock);
1756 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1758 struct rhine_private *rp = netdev_priv(dev);
1759 long ioaddr = dev->base_addr;
1760 unsigned long flags;
1762 spin_lock_irqsave(&rp->lock, flags);
1763 rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1764 rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1765 clear_tally_counters(ioaddr);
1766 spin_unlock_irqrestore(&rp->lock, flags);
1771 static void rhine_set_rx_mode(struct net_device *dev)
1773 struct rhine_private *rp = netdev_priv(dev);
1774 long ioaddr = dev->base_addr;
1775 u32 mc_filter[2]; /* Multicast hash filter */
1776 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1778 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1779 /* Unconditionally log net taps. */
1780 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1783 writel(0xffffffff, ioaddr + MulticastFilter0);
1784 writel(0xffffffff, ioaddr + MulticastFilter1);
1785 } else if ((dev->mc_count > multicast_filter_limit)
1786 || (dev->flags & IFF_ALLMULTI)) {
1787 /* Too many to match, or accept all multicasts. */
1788 writel(0xffffffff, ioaddr + MulticastFilter0);
1789 writel(0xffffffff, ioaddr + MulticastFilter1);
1792 struct dev_mc_list *mclist;
1794 memset(mc_filter, 0, sizeof(mc_filter));
1795 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1796 i++, mclist = mclist->next) {
1797 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1799 mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
1801 writel(mc_filter[0], ioaddr + MulticastFilter0);
1802 writel(mc_filter[1], ioaddr + MulticastFilter1);
1805 writeb(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1808 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1810 struct rhine_private *rp = netdev_priv(dev);
1812 strcpy(info->driver, DRV_NAME);
1813 strcpy(info->version, DRV_VERSION);
1814 strcpy(info->bus_info, pci_name(rp->pdev));
1817 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1819 struct rhine_private *rp = netdev_priv(dev);
1822 spin_lock_irq(&rp->lock);
1823 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1824 spin_unlock_irq(&rp->lock);
1829 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1831 struct rhine_private *rp = netdev_priv(dev);
1834 spin_lock_irq(&rp->lock);
1835 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1836 spin_unlock_irq(&rp->lock);
1841 static int netdev_nway_reset(struct net_device *dev)
1843 struct rhine_private *rp = netdev_priv(dev);
1845 return mii_nway_restart(&rp->mii_if);
1848 static u32 netdev_get_link(struct net_device *dev)
1850 struct rhine_private *rp = netdev_priv(dev);
1852 return mii_link_ok(&rp->mii_if);
1855 static u32 netdev_get_msglevel(struct net_device *dev)
1860 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1865 static struct ethtool_ops netdev_ethtool_ops = {
1866 .get_drvinfo = netdev_get_drvinfo,
1867 .get_settings = netdev_get_settings,
1868 .set_settings = netdev_set_settings,
1869 .nway_reset = netdev_nway_reset,
1870 .get_link = netdev_get_link,
1871 .get_msglevel = netdev_get_msglevel,
1872 .set_msglevel = netdev_set_msglevel,
1873 .get_sg = ethtool_op_get_sg,
1874 .get_tx_csum = ethtool_op_get_tx_csum,
1877 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1879 struct rhine_private *rp = netdev_priv(dev);
1882 if (!netif_running(dev))
1885 spin_lock_irq(&rp->lock);
1886 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1887 spin_unlock_irq(&rp->lock);
1892 static int rhine_close(struct net_device *dev)
1894 long ioaddr = dev->base_addr;
1895 struct rhine_private *rp = netdev_priv(dev);
1897 del_timer_sync(&rp->timer);
1899 spin_lock_irq(&rp->lock);
1901 netif_stop_queue(dev);
1904 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1905 "status was %4.4x.\n",
1906 dev->name, readw(ioaddr + ChipCmd));
1908 /* Switch to loopback mode to avoid hardware races. */
1909 writeb(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1911 /* Disable interrupts by clearing the interrupt mask. */
1912 writew(0x0000, ioaddr + IntrEnable);
1914 /* Stop the chip's Tx and Rx processes. */
1915 writew(CmdStop, ioaddr + ChipCmd);
1917 spin_unlock_irq(&rp->lock);
1919 free_irq(rp->pdev->irq, dev);
1928 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1930 struct net_device *dev = pci_get_drvdata(pdev);
1932 unregister_netdev(dev);
1934 pci_release_regions(pdev);
1937 iounmap((char *)(dev->base_addr));
1941 pci_disable_device(pdev);
1942 pci_set_drvdata(pdev, NULL);
1946 static struct pci_driver rhine_driver = {
1947 .name = "via-rhine",
1948 .id_table = rhine_pci_tbl,
1949 .probe = rhine_init_one,
1950 .remove = __devexit_p(rhine_remove_one),
1954 static int __init rhine_init(void)
1956 /* when a module, this is printed whether or not devices are found in probe */
1960 return pci_module_init(&rhine_driver);
1964 static void __exit rhine_cleanup(void)
1966 pci_unregister_driver(&rhine_driver);
1970 module_init(rhine_init);
1971 module_exit(rhine_cleanup);