1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
30 Linux kernel version history:
33 - Jeff Garzik: softnet 'n stuff
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
63 - Manfred Spraul: added reset into tx_timeout
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
85 - Replace some MII-related magic numbers with constants
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
105 - Use new MII lib helper generic_mii_ioctl
107 LK1.1.16 (Roger Luethi)
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
130 #define DRV_NAME "via-rhine"
131 #define DRV_VERSION "1.1.20-2.6"
132 #define DRV_RELDATE "May-23-2004"
135 /* A few user-configurable values.
136 These may be modified when a driver module is loaded. */
138 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
139 static int max_interrupt_work = 20;
141 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
142 Setting to > 1518 effectively disables this feature. */
143 static int rx_copybreak;
145 /* Select a backoff algorithm (Ethernet capture effect) */
148 /* Used to pass the media type, etc.
149 Both 'options[]' and 'full_duplex[]' should exist for driver
151 The media type is usually passed in 'options[]'.
152 The default is autonegotiation for speed and duplex.
153 This should rarely be overridden.
154 Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
155 Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
156 Use option values 0x20 and 0x200 for forcing full duplex operation.
158 #define MAX_UNITS 8 /* More are supported, limit only on options */
159 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
160 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
162 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
163 The Rhine has a 64 element 8390-like hash table. */
164 static const int multicast_filter_limit = 32;
167 /* Operational parameters that are set at compile time. */
169 /* Keep the ring sizes a power of two for compile efficiency.
170 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
171 Making the Tx ring too large decreases the effectiveness of channel
172 bonding and packet priority.
173 There are no ill effects from too-large receive rings. */
174 #define TX_RING_SIZE 16
175 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
176 #define RX_RING_SIZE 16
179 /* Operational parameters that usually are not changed. */
181 /* Time in jiffies before concluding the transmitter is hung. */
182 #define TX_TIMEOUT (2*HZ)
184 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
186 #include <linux/module.h>
187 #include <linux/kernel.h>
188 #include <linux/string.h>
189 #include <linux/timer.h>
190 #include <linux/errno.h>
191 #include <linux/ioport.h>
192 #include <linux/slab.h>
193 #include <linux/interrupt.h>
194 #include <linux/pci.h>
195 #include <linux/netdevice.h>
196 #include <linux/etherdevice.h>
197 #include <linux/skbuff.h>
198 #include <linux/init.h>
199 #include <linux/delay.h>
200 #include <linux/mii.h>
201 #include <linux/ethtool.h>
202 #include <linux/crc32.h>
203 #include <asm/processor.h> /* Processor type for cache alignment. */
204 #include <asm/bitops.h>
207 #include <asm/uaccess.h>
209 /* These identify the driver base version and may not be removed. */
210 static char version[] __devinitdata =
211 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
213 static char shortname[] = DRV_NAME;
216 /* This driver was written to use PCI memory space. Some early versions
217 of the Rhine may only work correctly with I/O space accesses. */
218 #ifdef CONFIG_VIA_RHINE_MMIO
235 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
236 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
237 MODULE_LICENSE("GPL");
239 MODULE_PARM(max_interrupt_work, "i");
240 MODULE_PARM(debug, "i");
241 MODULE_PARM(rx_copybreak, "i");
242 MODULE_PARM(backoff, "i");
243 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
244 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
245 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
246 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
247 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
248 MODULE_PARM_DESC(backoff, "VIA Rhine: Bits 0-3: backoff algorithm");
249 MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex");
250 MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)");
255 I. Board Compatibility
257 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
260 II. Board-specific settings
262 Boards with this chip are functional only in a bus-master PCI slot.
264 Many operational settings are loaded from the EEPROM to the Config word at
265 offset 0x78. For most of these settings, this driver assumes that they are
267 If this driver is compiled to use PCI memory space operations the EEPROM
268 must be configured to enable memory ops.
270 III. Driver operation
274 This driver uses two statically allocated fixed-size descriptor lists
275 formed into rings by a branch from the final descriptor to the beginning of
276 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
278 IIIb/c. Transmit/Receive Structure
280 This driver attempts to use a zero-copy receive and transmit scheme.
282 Alas, all data buffers are required to start on a 32 bit boundary, so
283 the driver must often copy transmit packets into bounce buffers.
285 The driver allocates full frame size skbuffs for the Rx ring buffers at
286 open() time and passes the skb->data field to the chip as receive data
287 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
288 a fresh skbuff is allocated and the frame is copied to the new skbuff.
289 When the incoming frame is larger, the skbuff is passed directly up the
290 protocol stack. Buffers consumed this way are replaced by newly allocated
291 skbuffs in the last phase of rhine_rx().
293 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
294 using a full-sized skbuff for small frames vs. the copying costs of larger
295 frames. New boards are typically used in generously configured machines
296 and the underfilled buffers have negligible impact compared to the benefit of
297 a single allocation size, so the default value of zero results in never
298 copying packets. When copying is done, the cost is usually mitigated by using
299 a combined copy/checksum routine. Copying also preloads the cache, which is
300 most useful with small frames.
302 Since the VIA chips are only able to transfer data to buffers on 32 bit
303 boundaries, the IP header at offset 14 in an ethernet frame isn't
304 longword aligned for further processing. Copying these unaligned buffers
305 has the beneficial effect of 16-byte aligning the IP header.
307 IIId. Synchronization
309 The driver runs as two independent, single-threaded flows of control. One
310 is the send-packet routine, which enforces single-threaded use by the
311 dev->priv->lock spinlock. The other thread is the interrupt handler, which
312 is single threaded by the hardware and interrupt handling software.
314 The send packet thread has partial control over the Tx ring. It locks the
315 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
316 is not available it stops the transmit queue by calling netif_stop_queue.
318 The interrupt handler has exclusive control over the Rx ring and records stats
319 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
320 empty by incrementing the dirty_tx mark. If at least half of the entries in
321 the Rx ring are available the transmit queue is woken up if it was stopped.
327 Preliminary VT86C100A manual from http://www.via.com.tw/
328 http://www.scyld.com/expert/100mbps.html
329 http://www.scyld.com/expert/NWay.html
330 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
331 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
336 The VT86C100A manual is not reliable information.
337 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
338 in significant performance degradation for bounce buffer copies on transmit
339 and unaligned IP headers on receive.
340 The chip does not pad to minimum transmit length.
345 /* This table drives the PCI probe routines. It's mostly boilerplate in all
346 of the drivers, and will likely be provided by some future kernel.
347 Note the matching code -- the first table entry matchs all 56** cards but
348 second only the 1234 card.
354 VT8231 = 0x50, /* Integrated MAC */
355 VT8233 = 0x60, /* Integrated MAC */
356 VT8235 = 0x74, /* Integrated MAC */
357 VT8237 = 0x78, /* Integrated MAC */
368 rqWOL = 0x0001, /* Wake-On-LAN support */
369 rqForceReset = 0x0002,
370 rqDavicomPhy = 0x0020,
371 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
372 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
373 rqRhineI = 0x0100, /* See comment below */
376 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
377 * MMIO as well as for the collision counter and the Tx FIFO underflow
378 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
381 /* Beware of PCI posted writes */
382 #define IOSYNC do { readb(dev->base_addr + StationAddr); } while (0)
384 static struct pci_device_id rhine_pci_tbl[] =
386 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
387 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
388 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
389 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
390 { } /* terminate list */
392 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
395 /* Offsets to the device registers. */
396 enum register_offsets {
397 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
398 IntrStatus=0x0C, IntrEnable=0x0E,
399 MulticastFilter0=0x10, MulticastFilter1=0x14,
400 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
401 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
402 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
403 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
404 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
405 StickyHW=0x83, IntrStatus2=0x84,
406 WOLcrSet=0xA0, WOLcrClr=0xA4, WOLcrClr1=0xA6,
408 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
411 /* Bits in ConfigD */
413 BackOptional=0x01, BackModify=0x02,
414 BackCaptureEffect=0x04, BackRandom=0x08
418 /* Registers we check that mmio and reg are the same. */
419 int mmio_verify_registers[] = {
420 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
425 /* Bits in the interrupt status/mask registers. */
426 enum intr_status_bits {
427 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
428 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
430 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
431 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
432 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
434 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
435 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
436 IntrTxErrSummary=0x082218,
439 /* The Rx and Tx buffer descriptors. */
442 u32 desc_length; /* Chain flag, Buffer/frame length */
448 u32 desc_length; /* Chain flag, Tx Config, Frame length */
453 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
454 #define TXDESC 0x00e08000
456 enum rx_status_bits {
457 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
460 /* Bits in *_desc.*_status */
461 enum desc_status_bits {
465 /* Bits in ChipCmd. */
467 CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
468 CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
469 CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
470 CmdNoTxPoll=0x0800, CmdReset=0x8000,
473 #define MAX_MII_CNT 4
474 struct rhine_private {
475 /* Descriptor rings */
476 struct rx_desc *rx_ring;
477 struct tx_desc *tx_ring;
478 dma_addr_t rx_ring_dma;
479 dma_addr_t tx_ring_dma;
481 /* The addresses of receive-in-place skbuffs. */
482 struct sk_buff *rx_skbuff[RX_RING_SIZE];
483 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
485 /* The saved address of a sent-in-place packet/buffer, for later free(). */
486 struct sk_buff *tx_skbuff[TX_RING_SIZE];
487 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
489 /* Tx bounce buffers */
490 unsigned char *tx_buf[TX_RING_SIZE];
491 unsigned char *tx_bufs;
492 dma_addr_t tx_bufs_dma;
494 struct pci_dev *pdev;
495 struct net_device_stats stats;
496 struct timer_list timer; /* Media monitoring timer. */
499 /* Frequently used values: keep some adjacent for cache effect. */
501 struct rx_desc *rx_head_desc;
502 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
503 unsigned int cur_tx, dirty_tx;
504 unsigned int rx_buf_sz; /* Based on MTU+slack. */
505 u16 chip_cmd; /* Current setting for ChipCmd */
507 /* These values are keep track of the transceiver/media in use. */
508 u8 tx_thresh, rx_thresh;
510 /* MII transceiver section. */
511 unsigned char phys[MAX_MII_CNT]; /* MII device addresses. */
512 unsigned int mii_cnt; /* number of MIIs found, but only the first one is used */
513 u16 mii_status; /* last read MII status */
514 struct mii_if_info mii_if;
517 static int mdio_read(struct net_device *dev, int phy_id, int location);
518 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
519 static int rhine_open(struct net_device *dev);
520 static void rhine_check_duplex(struct net_device *dev);
521 static void rhine_timer(unsigned long data);
522 static void rhine_tx_timeout(struct net_device *dev);
523 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
524 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
525 static void rhine_tx(struct net_device *dev);
526 static void rhine_rx(struct net_device *dev);
527 static void rhine_error(struct net_device *dev, int intr_status);
528 static void rhine_set_rx_mode(struct net_device *dev);
529 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
530 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
531 static struct ethtool_ops netdev_ethtool_ops;
532 static int rhine_close(struct net_device *dev);
534 static inline u32 get_intr_status(struct net_device *dev)
536 long ioaddr = dev->base_addr;
537 struct rhine_private *rp = netdev_priv(dev);
540 intr_status = readw(ioaddr + IntrStatus);
541 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
542 if (rp->quirks & rqStatusWBRace)
543 intr_status |= readb(ioaddr + IntrStatus2) << 16;
548 * Get power related registers into sane state.
549 * Returns content of power-event (WOL) registers.
551 static void rhine_power_init(struct net_device *dev)
553 long ioaddr = dev->base_addr;
554 struct rhine_private *rp = netdev_priv(dev);
556 if (rp->quirks & rqWOL) {
557 /* Make sure chip is in power state D0 */
558 writeb(readb(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
560 /* Disable "force PME-enable" */
561 writeb(0x80, ioaddr + WOLcgClr);
563 /* Clear power-event config bits (WOL) */
564 writeb(0xFF, ioaddr + WOLcrClr);
565 /* More recent cards can manage two additional patterns */
566 if (rp->quirks & rq6patterns)
567 writeb(0x03, ioaddr + WOLcrClr1);
569 /* Clear power-event status bits */
570 writeb(0xFF, ioaddr + PwrcsrClr);
571 if (rp->quirks & rq6patterns)
572 writeb(0x03, ioaddr + PwrcsrClr1);
576 static void wait_for_reset(struct net_device *dev, u32 quirks, char *name)
578 long ioaddr = dev->base_addr;
583 if (readw(ioaddr + ChipCmd) & CmdReset) {
584 printk(KERN_INFO "%s: Reset not complete yet. "
585 "Trying harder.\n", name);
587 /* Rhine-II needs to be forced sometimes */
588 if (quirks & rqForceReset)
589 writeb(0x40, ioaddr + MiscCmd);
591 /* VT86C100A may need long delay after reset (dlink) */
592 /* Seen on Rhine-II as well (rl) */
593 while ((readw(ioaddr + ChipCmd) & CmdReset) && --boguscnt)
599 printk(KERN_INFO "%s: Reset %s.\n", name,
600 boguscnt ? "succeeded" : "failed");
604 static void __devinit enable_mmio(long ioaddr, u32 quirks)
607 if (quirks & rqRhineI) {
608 /* More recent docs say that this bit is reserved ... */
609 n = inb(ioaddr + ConfigA) | 0x20;
610 outb(n, ioaddr + ConfigA);
612 n = inb(ioaddr + ConfigD) | 0x80;
613 outb(n, ioaddr + ConfigD);
618 static void __devinit reload_eeprom(long ioaddr)
621 outb(0x20, ioaddr + MACRegEEcsr);
622 /* Typically 2 cycles to reload. */
623 for (i = 0; i < 150; i++)
624 if (! (inb(ioaddr + MACRegEEcsr) & 0x20))
628 #ifdef CONFIG_NET_POLL_CONTROLLER
629 static void rhine_poll(struct net_device *dev)
631 disable_irq(dev->irq);
632 rhine_interrupt(dev->irq, (void *)dev, NULL);
633 enable_irq(dev->irq);
637 static int __devinit rhine_init_one(struct pci_dev *pdev,
638 const struct pci_device_id *ent)
640 struct net_device *dev;
641 struct rhine_private *rp;
645 static int card_idx = -1;
649 int phy, phy_idx = 0;
655 /* when built into the kernel, we only print version if device is found */
657 static int printed_version;
658 if (!printed_version++)
663 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
664 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
667 if (pci_rev < VT6102) {
668 quirks = rqRhineI | rqDavicomPhy;
670 name = "VT86C100A Rhine";
673 quirks = rqWOL | rqForceReset;
674 if (pci_rev < VT6105) {
676 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
680 if (pci_rev >= VT6105_B0)
681 quirks |= rq6patterns;
685 rc = pci_enable_device(pdev);
689 /* this should always be supported */
690 rc = pci_set_dma_mask(pdev, 0xffffffff);
692 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
698 if ((pci_resource_len(pdev, 0) < io_size) ||
699 (pci_resource_len(pdev, 1) < io_size)) {
701 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
705 ioaddr = pci_resource_start(pdev, 0);
706 memaddr = pci_resource_start(pdev, 1);
708 pci_set_master(pdev);
710 dev = alloc_etherdev(sizeof(*rp));
713 printk(KERN_ERR "init_ethernet failed for card #%d\n",
717 SET_MODULE_OWNER(dev);
718 SET_NETDEV_DEV(dev, &pdev->dev);
720 rc = pci_request_regions(pdev, shortname);
722 goto err_out_free_netdev;
726 enable_mmio(ioaddr0, quirks);
728 ioaddr = (long) ioremap(memaddr, io_size);
731 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
732 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
733 goto err_out_free_res;
736 /* Check that selected MMIO registers match the PIO ones */
738 while (mmio_verify_registers[i]) {
739 int reg = mmio_verify_registers[i++];
740 unsigned char a = inb(ioaddr0+reg);
741 unsigned char b = readb(ioaddr+reg);
744 printk(KERN_ERR "MMIO do not match PIO [%02x] "
745 "(%02x != %02x)\n", reg, a, b);
749 #endif /* USE_MMIO */
750 dev->base_addr = ioaddr;
751 rp = netdev_priv(dev);
754 rhine_power_init(dev);
756 /* Reset the chip to erase previous misconfiguration. */
757 writew(CmdReset, ioaddr + ChipCmd);
759 wait_for_reset(dev, quirks, shortname);
761 /* Reload the station address from the EEPROM. */
763 reload_eeprom(ioaddr0);
764 /* Reloading from eeprom overwrites cfgA-D, so we must re-enable MMIO.
765 If reload_eeprom() was done first this could be avoided, but it is
766 not known if that still works with the "win98-reboot" problem. */
767 enable_mmio(ioaddr0, quirks);
769 reload_eeprom(ioaddr);
772 for (i = 0; i < 6; i++)
773 dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
775 if (!is_valid_ether_addr(dev->dev_addr)) {
777 printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx);
781 if (quirks & rqWOL) {
783 * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA
784 * turned on. it makes MAC receive magic packet
785 * automatically. So, we turn it off. (D-Link)
787 writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
790 /* Select backoff algorithm */
792 writeb(readb(ioaddr + ConfigD) & (0xF0 | backoff),
795 dev->irq = pdev->irq;
797 spin_lock_init(&rp->lock);
799 rp->mii_if.dev = dev;
800 rp->mii_if.mdio_read = mdio_read;
801 rp->mii_if.mdio_write = mdio_write;
802 rp->mii_if.phy_id_mask = 0x1f;
803 rp->mii_if.reg_num_mask = 0x1f;
806 option = dev->mem_start;
808 /* The chip-specific entries in the device structure. */
809 dev->open = rhine_open;
810 dev->hard_start_xmit = rhine_start_tx;
811 dev->stop = rhine_close;
812 dev->get_stats = rhine_get_stats;
813 dev->set_multicast_list = rhine_set_rx_mode;
814 dev->do_ioctl = netdev_ioctl;
815 dev->ethtool_ops = &netdev_ethtool_ops;
816 dev->tx_timeout = rhine_tx_timeout;
817 dev->watchdog_timeo = TX_TIMEOUT;
818 #ifdef CONFIG_NET_POLL_CONTROLLER
819 dev->poll_controller = rhine_poll;
821 if (rp->quirks & rqRhineI)
822 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
824 /* dev->name not defined before register_netdev()! */
825 rc = register_netdev(dev);
829 /* The lower four bits are the media type. */
832 rp->mii_if.full_duplex = 1;
834 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
835 rp->mii_if.full_duplex = 1;
837 if (rp->mii_if.full_duplex) {
838 printk(KERN_INFO "%s: Set to forced full duplex, "
839 "autonegotiation disabled.\n", dev->name);
840 rp->mii_if.force_media = 1;
843 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
852 for (i = 0; i < 5; i++)
853 printk("%2.2x:", dev->dev_addr[i]);
854 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
856 pci_set_drvdata(pdev, dev);
858 rp->phys[0] = 1; /* Standard for this chip. */
859 for (phy = 1; phy < 32 && phy_idx < MAX_MII_CNT; phy++) {
860 int mii_status = mdio_read(dev, phy, 1);
861 if (mii_status != 0xffff && mii_status != 0x0000) {
862 rp->phys[phy_idx++] = phy;
863 rp->mii_if.advertising = mdio_read(dev, phy, 4);
864 printk(KERN_INFO "%s: MII PHY found at address "
865 "%d, status 0x%4.4x advertising %4.4x "
866 "Link %4.4x.\n", dev->name, phy,
867 mii_status, rp->mii_if.advertising,
868 mdio_read(dev, phy, 5));
870 /* set IFF_RUNNING */
871 if (mii_status & BMSR_LSTATUS)
872 netif_carrier_on(dev);
874 netif_carrier_off(dev);
879 rp->mii_cnt = phy_idx;
880 rp->mii_if.phy_id = rp->phys[0];
882 /* Allow forcing the media type. */
885 rp->mii_if.full_duplex = 1;
886 if (option & 0x330) {
887 printk(KERN_INFO " Forcing %dMbs %s-duplex "
889 (option & 0x300 ? 100 : 10),
890 (option & 0x220 ? "full" : "half"));
892 mdio_write(dev, rp->phys[0], MII_BMCR,
893 ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
894 ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
902 iounmap((void *)ioaddr);
905 pci_release_regions(pdev);
912 static int alloc_ring(struct net_device* dev)
914 struct rhine_private *rp = netdev_priv(dev);
918 ring = pci_alloc_consistent(rp->pdev,
919 RX_RING_SIZE * sizeof(struct rx_desc) +
920 TX_RING_SIZE * sizeof(struct tx_desc),
923 printk(KERN_ERR "Could not allocate DMA memory.\n");
926 if (rp->quirks & rqRhineI) {
927 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
928 PKT_BUF_SZ * TX_RING_SIZE,
930 if (rp->tx_bufs == NULL) {
931 pci_free_consistent(rp->pdev,
932 RX_RING_SIZE * sizeof(struct rx_desc) +
933 TX_RING_SIZE * sizeof(struct tx_desc),
940 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
941 rp->rx_ring_dma = ring_dma;
942 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
947 void free_ring(struct net_device* dev)
949 struct rhine_private *rp = netdev_priv(dev);
951 pci_free_consistent(rp->pdev,
952 RX_RING_SIZE * sizeof(struct rx_desc) +
953 TX_RING_SIZE * sizeof(struct tx_desc),
954 rp->rx_ring, rp->rx_ring_dma);
958 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
959 rp->tx_bufs, rp->tx_bufs_dma);
965 static void alloc_rbufs(struct net_device *dev)
967 struct rhine_private *rp = netdev_priv(dev);
971 rp->dirty_rx = rp->cur_rx = 0;
973 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
974 rp->rx_head_desc = &rp->rx_ring[0];
975 next = rp->rx_ring_dma;
977 /* Init the ring entries */
978 for (i = 0; i < RX_RING_SIZE; i++) {
979 rp->rx_ring[i].rx_status = 0;
980 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
981 next += sizeof(struct rx_desc);
982 rp->rx_ring[i].next_desc = cpu_to_le32(next);
983 rp->rx_skbuff[i] = NULL;
985 /* Mark the last entry as wrapping the ring. */
986 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
988 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
989 for (i = 0; i < RX_RING_SIZE; i++) {
990 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
991 rp->rx_skbuff[i] = skb;
994 skb->dev = dev; /* Mark as being used by this device. */
996 rp->rx_skbuff_dma[i] =
997 pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz,
1000 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1001 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1003 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1006 static void free_rbufs(struct net_device* dev)
1008 struct rhine_private *rp = netdev_priv(dev);
1011 /* Free all the skbuffs in the Rx queue. */
1012 for (i = 0; i < RX_RING_SIZE; i++) {
1013 rp->rx_ring[i].rx_status = 0;
1014 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1015 if (rp->rx_skbuff[i]) {
1016 pci_unmap_single(rp->pdev,
1017 rp->rx_skbuff_dma[i],
1018 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1019 dev_kfree_skb(rp->rx_skbuff[i]);
1021 rp->rx_skbuff[i] = NULL;
1025 static void alloc_tbufs(struct net_device* dev)
1027 struct rhine_private *rp = netdev_priv(dev);
1031 rp->dirty_tx = rp->cur_tx = 0;
1032 next = rp->tx_ring_dma;
1033 for (i = 0; i < TX_RING_SIZE; i++) {
1034 rp->tx_skbuff[i] = NULL;
1035 rp->tx_ring[i].tx_status = 0;
1036 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1037 next += sizeof(struct tx_desc);
1038 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1039 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1041 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1045 static void free_tbufs(struct net_device* dev)
1047 struct rhine_private *rp = netdev_priv(dev);
1050 for (i = 0; i < TX_RING_SIZE; i++) {
1051 rp->tx_ring[i].tx_status = 0;
1052 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1053 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1054 if (rp->tx_skbuff[i]) {
1055 if (rp->tx_skbuff_dma[i]) {
1056 pci_unmap_single(rp->pdev,
1057 rp->tx_skbuff_dma[i],
1058 rp->tx_skbuff[i]->len,
1061 dev_kfree_skb(rp->tx_skbuff[i]);
1063 rp->tx_skbuff[i] = NULL;
1064 rp->tx_buf[i] = NULL;
1068 static void init_registers(struct net_device *dev)
1070 struct rhine_private *rp = netdev_priv(dev);
1071 long ioaddr = dev->base_addr;
1074 for (i = 0; i < 6; i++)
1075 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
1077 /* Initialize other registers. */
1078 writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1079 /* Configure initial FIFO thresholds. */
1080 writeb(0x20, ioaddr + TxConfig);
1081 rp->tx_thresh = 0x20;
1082 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1083 rp->mii_if.full_duplex = 0;
1085 writel(rp->rx_ring_dma, ioaddr + RxRingPtr);
1086 writel(rp->tx_ring_dma, ioaddr + TxRingPtr);
1088 rhine_set_rx_mode(dev);
1090 /* Enable interrupts by setting the interrupt mask. */
1091 writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1092 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1093 IntrTxDone | IntrTxError | IntrTxUnderrun |
1094 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1095 ioaddr + IntrEnable);
1097 rp->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
1098 if (rp->mii_if.force_media)
1099 rp->chip_cmd |= CmdFDuplex;
1100 writew(rp->chip_cmd, ioaddr + ChipCmd);
1102 rhine_check_duplex(dev);
1104 /* The LED outputs of various MII xcvrs should be configured. */
1105 /* For NS or Mison phys, turn on bit 1 in register 0x17 */
1106 mdio_write(dev, rp->phys[0], 0x17, mdio_read(dev, rp->phys[0], 0x17) |
1110 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1112 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1114 long ioaddr = dev->base_addr;
1115 int boguscnt = 1024;
1117 /* Wait for a previous command to complete. */
1118 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1120 writeb(0x00, ioaddr + MIICmd);
1121 writeb(phy_id, ioaddr + MIIPhyAddr);
1122 writeb(regnum, ioaddr + MIIRegAddr);
1123 writeb(0x40, ioaddr + MIICmd); /* Trigger read */
1125 while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
1127 return readw(ioaddr + MIIData);
1130 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1132 struct rhine_private *rp = netdev_priv(dev);
1133 long ioaddr = dev->base_addr;
1134 int boguscnt = 1024;
1136 if (phy_id == rp->phys[0]) {
1138 case MII_BMCR: /* Is user forcing speed/duplex? */
1139 if (value & 0x9000) /* Autonegotiation. */
1140 rp->mii_if.force_media = 0;
1142 rp->mii_if.full_duplex = (value & 0x0100) ? 1 : 0;
1145 rp->mii_if.advertising = value;
1150 /* Wait for a previous command to complete. */
1151 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1153 writeb(0x00, ioaddr + MIICmd);
1154 writeb(phy_id, ioaddr + MIIPhyAddr);
1155 writeb(regnum, ioaddr + MIIRegAddr);
1156 writew(value, ioaddr + MIIData);
1157 writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
1161 static int rhine_open(struct net_device *dev)
1163 struct rhine_private *rp = netdev_priv(dev);
1164 long ioaddr = dev->base_addr;
1167 /* Reset the chip. */
1168 writew(CmdReset, ioaddr + ChipCmd);
1170 i = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
1176 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1177 dev->name, rp->pdev->irq);
1179 i = alloc_ring(dev);
1184 wait_for_reset(dev, rp->quirks, dev->name);
1185 init_registers(dev);
1187 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1188 "MII status: %4.4x.\n",
1189 dev->name, readw(ioaddr + ChipCmd),
1190 mdio_read(dev, rp->phys[0], MII_BMSR));
1192 netif_start_queue(dev);
1194 /* Set the timer to check for link beat. */
1195 init_timer(&rp->timer);
1196 rp->timer.expires = jiffies + 2 * HZ/100;
1197 rp->timer.data = (unsigned long)dev;
1198 rp->timer.function = &rhine_timer; /* timer handler */
1199 add_timer(&rp->timer);
1204 static void rhine_check_duplex(struct net_device *dev)
1206 struct rhine_private *rp = netdev_priv(dev);
1207 long ioaddr = dev->base_addr;
1208 int mii_lpa = mdio_read(dev, rp->phys[0], MII_LPA);
1209 int negotiated = mii_lpa & rp->mii_if.advertising;
1212 if (rp->mii_if.force_media || mii_lpa == 0xffff)
1214 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
1215 if (rp->mii_if.full_duplex != duplex) {
1216 rp->mii_if.full_duplex = duplex;
1218 printk(KERN_INFO "%s: Setting %s-duplex based on "
1219 "MII #%d link partner capability of %4.4x.\n",
1220 dev->name, duplex ? "full" : "half",
1221 rp->phys[0], mii_lpa);
1223 rp->chip_cmd |= CmdFDuplex;
1225 rp->chip_cmd &= ~CmdFDuplex;
1226 writew(rp->chip_cmd, ioaddr + ChipCmd);
1231 static void rhine_timer(unsigned long data)
1233 struct net_device *dev = (struct net_device *)data;
1234 struct rhine_private *rp = netdev_priv(dev);
1235 long ioaddr = dev->base_addr;
1236 int next_tick = 10*HZ;
1240 printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
1241 dev->name, readw(ioaddr + IntrStatus));
1244 spin_lock_irq (&rp->lock);
1246 rhine_check_duplex(dev);
1248 /* make IFF_RUNNING follow the MII status bit "Link established" */
1249 mii_status = mdio_read(dev, rp->phys[0], MII_BMSR);
1250 if ((mii_status & BMSR_LSTATUS) != (rp->mii_status & BMSR_LSTATUS)) {
1251 if (mii_status & BMSR_LSTATUS)
1252 netif_carrier_on(dev);
1254 netif_carrier_off(dev);
1256 rp->mii_status = mii_status;
1258 spin_unlock_irq(&rp->lock);
1260 rp->timer.expires = jiffies + next_tick;
1261 add_timer(&rp->timer);
1265 static void rhine_tx_timeout(struct net_device *dev)
1267 struct rhine_private *rp = netdev_priv(dev);
1268 long ioaddr = dev->base_addr;
1270 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1271 "%4.4x, resetting...\n",
1272 dev->name, readw(ioaddr + IntrStatus),
1273 mdio_read(dev, rp->phys[0], MII_BMSR));
1275 /* protect against concurrent rx interrupts */
1276 disable_irq(rp->pdev->irq);
1278 spin_lock(&rp->lock);
1280 /* Reset the chip. */
1281 writew(CmdReset, ioaddr + ChipCmd);
1283 /* clear all descriptors */
1289 /* Reinitialize the hardware. */
1290 wait_for_reset(dev, rp->quirks, dev->name);
1291 init_registers(dev);
1293 spin_unlock(&rp->lock);
1294 enable_irq(rp->pdev->irq);
1296 dev->trans_start = jiffies;
1297 rp->stats.tx_errors++;
1298 netif_wake_queue(dev);
1301 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1303 struct rhine_private *rp = netdev_priv(dev);
1307 /* Caution: the write order is important here, set the field
1308 with the "ownership" bits last. */
1310 /* Calculate the next Tx descriptor entry. */
1311 entry = rp->cur_tx % TX_RING_SIZE;
1313 if (skb->len < ETH_ZLEN) {
1314 skb = skb_padto(skb, ETH_ZLEN);
1319 rp->tx_skbuff[entry] = skb;
1321 if ((rp->quirks & rqRhineI) &&
1322 (((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1323 /* Must use alignment buffer. */
1324 if (skb->len > PKT_BUF_SZ) {
1325 /* packet too long, drop it */
1327 rp->tx_skbuff[entry] = NULL;
1328 rp->stats.tx_dropped++;
1331 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1332 rp->tx_skbuff_dma[entry] = 0;
1333 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1334 (rp->tx_buf[entry] -
1337 rp->tx_skbuff_dma[entry] =
1338 pci_map_single(rp->pdev, skb->data, skb->len,
1340 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1343 rp->tx_ring[entry].desc_length =
1344 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1347 spin_lock_irq(&rp->lock);
1349 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1354 /* Non-x86 Todo: explicitly flush cache lines here. */
1357 * Wake the potentially-idle transmit channel unless errors are
1358 * pending (the ISR must sort them out first).
1360 intr_status = get_intr_status(dev);
1361 if ((intr_status & IntrTxErrSummary) == 0) {
1362 writew(CmdTxDemand | rp->chip_cmd, dev->base_addr + ChipCmd);
1366 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1367 netif_stop_queue(dev);
1369 dev->trans_start = jiffies;
1371 spin_unlock_irq(&rp->lock);
1374 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1375 dev->name, rp->cur_tx-1, entry);
1380 /* The interrupt handler does all of the Rx thread work and cleans up
1381 after the Tx thread. */
1382 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1384 struct net_device *dev = dev_instance;
1387 int boguscnt = max_interrupt_work;
1390 ioaddr = dev->base_addr;
1392 while ((intr_status = get_intr_status(dev))) {
1395 /* Acknowledge all of the current interrupt sources ASAP. */
1396 if (intr_status & IntrTxDescRace)
1397 writeb(0x08, ioaddr + IntrStatus2);
1398 writew(intr_status & 0xffff, ioaddr + IntrStatus);
1402 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1403 dev->name, intr_status);
1405 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1406 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1409 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1410 if (intr_status & IntrTxErrSummary) {
1412 /* Avoid scavenging before Tx engine turned off */
1413 while ((readw(ioaddr+ChipCmd) & CmdTxOn) && --cnt)
1415 if (debug > 2 && !cnt)
1416 printk(KERN_WARNING "%s: "
1417 "rhine_interrupt() Tx engine"
1418 "still on.\n", dev->name);
1423 /* Abnormal error summary/uncommon events handlers. */
1424 if (intr_status & (IntrPCIErr | IntrLinkChange |
1425 IntrStatsMax | IntrTxError | IntrTxAborted |
1426 IntrTxUnderrun | IntrTxDescRace))
1427 rhine_error(dev, intr_status);
1429 if (--boguscnt < 0) {
1430 printk(KERN_WARNING "%s: Too much work at interrupt, "
1432 dev->name, intr_status);
1438 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1439 dev->name, readw(ioaddr + IntrStatus));
1440 return IRQ_RETVAL(handled);
1443 /* This routine is logically part of the interrupt handler, but isolated
1445 static void rhine_tx(struct net_device *dev)
1447 struct rhine_private *rp = netdev_priv(dev);
1448 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1450 spin_lock(&rp->lock);
1452 /* find and cleanup dirty tx descriptors */
1453 while (rp->dirty_tx != rp->cur_tx) {
1454 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1456 printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
1458 if (txstatus & DescOwn)
1460 if (txstatus & 0x8000) {
1462 printk(KERN_DEBUG "%s: Transmit error, "
1463 "Tx status %8.8x.\n",
1464 dev->name, txstatus);
1465 rp->stats.tx_errors++;
1466 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1467 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1468 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1469 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1470 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1471 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1472 rp->stats.tx_fifo_errors++;
1473 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1474 break; /* Keep the skb - we try again */
1476 /* Transmitter restarted in 'abnormal' handler. */
1478 if (rp->quirks & rqRhineI)
1479 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1481 rp->stats.collisions += txstatus & 0x0F;
1483 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1484 (txstatus >> 3) & 0xF,
1486 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1487 rp->stats.tx_packets++;
1489 /* Free the original skb. */
1490 if (rp->tx_skbuff_dma[entry]) {
1491 pci_unmap_single(rp->pdev,
1492 rp->tx_skbuff_dma[entry],
1493 rp->tx_skbuff[entry]->len,
1496 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1497 rp->tx_skbuff[entry] = NULL;
1498 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1500 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1501 netif_wake_queue(dev);
1503 spin_unlock(&rp->lock);
1506 /* This routine is logically part of the interrupt handler, but isolated
1507 for clarity and better register allocation. */
1508 static void rhine_rx(struct net_device *dev)
1510 struct rhine_private *rp = netdev_priv(dev);
1511 int entry = rp->cur_rx % RX_RING_SIZE;
1512 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1515 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1517 le32_to_cpu(rp->rx_head_desc->rx_status));
1520 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1521 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1522 struct rx_desc *desc = rp->rx_head_desc;
1523 u32 desc_status = le32_to_cpu(desc->rx_status);
1524 int data_size = desc_status >> 16;
1527 printk(KERN_DEBUG " rhine_rx() status is %8.8x.\n",
1531 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1532 if ((desc_status & RxWholePkt) != RxWholePkt) {
1533 printk(KERN_WARNING "%s: Oversized Ethernet "
1534 "frame spanned multiple buffers, entry "
1535 "%#x length %d status %8.8x!\n",
1536 dev->name, entry, data_size,
1538 printk(KERN_WARNING "%s: Oversized Ethernet "
1539 "frame %p vs %p.\n", dev->name,
1540 rp->rx_head_desc, &rp->rx_ring[entry]);
1541 rp->stats.rx_length_errors++;
1542 } else if (desc_status & RxErr) {
1543 /* There was a error. */
1545 printk(KERN_DEBUG " rhine_rx() Rx "
1546 "error was %8.8x.\n",
1548 rp->stats.rx_errors++;
1549 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1550 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1551 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1552 if (desc_status & 0x0002) {
1553 /* this can also be updated outside the interrupt handler */
1554 spin_lock(&rp->lock);
1555 rp->stats.rx_crc_errors++;
1556 spin_unlock(&rp->lock);
1560 struct sk_buff *skb;
1561 /* Length should omit the CRC */
1562 int pkt_len = data_size - 4;
1564 /* Check if the packet is long enough to accept without
1565 copying to a minimally-sized skbuff. */
1566 if (pkt_len < rx_copybreak &&
1567 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1569 skb_reserve(skb, 2); /* 16 byte align the IP header */
1570 pci_dma_sync_single_for_cpu(rp->pdev,
1571 rp->rx_skbuff_dma[entry],
1573 PCI_DMA_FROMDEVICE);
1575 /* *_IP_COPYSUM isn't defined anywhere and
1576 eth_copy_and_sum is memcpy for all archs so
1577 this is kind of pointless right now
1579 eth_copy_and_sum(skb,
1580 rp->rx_skbuff[entry]->tail,
1582 skb_put(skb, pkt_len);
1583 pci_dma_sync_single_for_device(rp->pdev,
1584 rp->rx_skbuff_dma[entry],
1586 PCI_DMA_FROMDEVICE);
1588 skb = rp->rx_skbuff[entry];
1590 printk(KERN_ERR "%s: Inconsistent Rx "
1591 "descriptor chain.\n",
1595 rp->rx_skbuff[entry] = NULL;
1596 skb_put(skb, pkt_len);
1597 pci_unmap_single(rp->pdev,
1598 rp->rx_skbuff_dma[entry],
1600 PCI_DMA_FROMDEVICE);
1602 skb->protocol = eth_type_trans(skb, dev);
1604 dev->last_rx = jiffies;
1605 rp->stats.rx_bytes += pkt_len;
1606 rp->stats.rx_packets++;
1608 entry = (++rp->cur_rx) % RX_RING_SIZE;
1609 rp->rx_head_desc = &rp->rx_ring[entry];
1612 /* Refill the Rx ring buffers. */
1613 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1614 struct sk_buff *skb;
1615 entry = rp->dirty_rx % RX_RING_SIZE;
1616 if (rp->rx_skbuff[entry] == NULL) {
1617 skb = dev_alloc_skb(rp->rx_buf_sz);
1618 rp->rx_skbuff[entry] = skb;
1620 break; /* Better luck next round. */
1621 skb->dev = dev; /* Mark as being used by this device. */
1622 rp->rx_skbuff_dma[entry] =
1623 pci_map_single(rp->pdev, skb->tail,
1625 PCI_DMA_FROMDEVICE);
1626 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1628 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1631 /* Pre-emptively restart Rx engine. */
1632 writew(readw(dev->base_addr + ChipCmd) | CmdRxOn | CmdRxDemand,
1633 dev->base_addr + ChipCmd);
1637 * Clears the "tally counters" for CRC errors and missed frames(?).
1638 * It has been reported that some chips need a write of 0 to clear
1639 * these, for others the counters are set to 1 when written to and
1640 * instead cleared when read. So we clear them both ways ...
1642 static inline void clear_tally_counters(const long ioaddr)
1644 writel(0, ioaddr + RxMissed);
1645 readw(ioaddr + RxCRCErrs);
1646 readw(ioaddr + RxMissed);
1649 static void rhine_restart_tx(struct net_device *dev) {
1650 struct rhine_private *rp = netdev_priv(dev);
1651 long ioaddr = dev->base_addr;
1652 int entry = rp->dirty_tx % TX_RING_SIZE;
1656 * If new errors occured, we need to sort them out before doing Tx.
1657 * In that case the ISR will be back here RSN anyway.
1659 intr_status = get_intr_status(dev);
1661 if ((intr_status & IntrTxErrSummary) == 0) {
1663 /* We know better than the chip where it should continue. */
1664 writel(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1665 ioaddr + TxRingPtr);
1667 writew(CmdTxDemand | rp->chip_cmd, ioaddr + ChipCmd);
1671 /* This should never happen */
1673 printk(KERN_WARNING "%s: rhine_restart_tx() "
1674 "Another error occured %8.8x.\n",
1675 dev->name, intr_status);
1680 static void rhine_error(struct net_device *dev, int intr_status)
1682 struct rhine_private *rp = netdev_priv(dev);
1683 long ioaddr = dev->base_addr;
1685 spin_lock(&rp->lock);
1687 if (intr_status & (IntrLinkChange)) {
1688 if (readb(ioaddr + MIIStatus) & 0x02) {
1689 /* Link failed, restart autonegotiation. */
1690 if (rp->quirks & rqRhineI)
1691 mdio_write(dev, rp->phys[0], MII_BMCR, 0x3300);
1693 rhine_check_duplex(dev);
1695 printk(KERN_ERR "%s: MII status changed: "
1696 "Autonegotiation advertising %4.4x partner "
1697 "%4.4x.\n", dev->name,
1698 mdio_read(dev, rp->phys[0], MII_ADVERTISE),
1699 mdio_read(dev, rp->phys[0], MII_LPA));
1701 if (intr_status & IntrStatsMax) {
1702 rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1703 rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1704 clear_tally_counters(ioaddr);
1706 if (intr_status & IntrTxAborted) {
1708 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1709 dev->name, intr_status);
1711 if (intr_status & IntrTxUnderrun) {
1712 if (rp->tx_thresh < 0xE0)
1713 writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1715 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1716 "threshold now %2.2x.\n",
1717 dev->name, rp->tx_thresh);
1719 if (intr_status & IntrTxDescRace) {
1721 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1724 if ((intr_status & IntrTxError) &&
1725 (intr_status & (IntrTxAborted |
1726 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1727 if (rp->tx_thresh < 0xE0) {
1728 writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1731 printk(KERN_INFO "%s: Unspecified error. Tx "
1732 "threshold now %2.2x.\n",
1733 dev->name, rp->tx_thresh);
1735 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1737 rhine_restart_tx(dev);
1739 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1740 IntrTxError | IntrTxAborted | IntrNormalSummary |
1743 printk(KERN_ERR "%s: Something Wicked happened! "
1744 "%8.8x.\n", dev->name, intr_status);
1747 spin_unlock(&rp->lock);
1750 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1752 struct rhine_private *rp = netdev_priv(dev);
1753 long ioaddr = dev->base_addr;
1754 unsigned long flags;
1756 spin_lock_irqsave(&rp->lock, flags);
1757 rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1758 rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1759 clear_tally_counters(ioaddr);
1760 spin_unlock_irqrestore(&rp->lock, flags);
1765 static void rhine_set_rx_mode(struct net_device *dev)
1767 struct rhine_private *rp = netdev_priv(dev);
1768 long ioaddr = dev->base_addr;
1769 u32 mc_filter[2]; /* Multicast hash filter */
1770 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1772 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1773 /* Unconditionally log net taps. */
1774 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1777 writel(0xffffffff, ioaddr + MulticastFilter0);
1778 writel(0xffffffff, ioaddr + MulticastFilter1);
1779 } else if ((dev->mc_count > multicast_filter_limit)
1780 || (dev->flags & IFF_ALLMULTI)) {
1781 /* Too many to match, or accept all multicasts. */
1782 writel(0xffffffff, ioaddr + MulticastFilter0);
1783 writel(0xffffffff, ioaddr + MulticastFilter1);
1786 struct dev_mc_list *mclist;
1788 memset(mc_filter, 0, sizeof(mc_filter));
1789 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1790 i++, mclist = mclist->next) {
1791 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1793 mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
1795 writel(mc_filter[0], ioaddr + MulticastFilter0);
1796 writel(mc_filter[1], ioaddr + MulticastFilter1);
1799 writeb(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1802 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1804 struct rhine_private *rp = netdev_priv(dev);
1806 strcpy(info->driver, DRV_NAME);
1807 strcpy(info->version, DRV_VERSION);
1808 strcpy(info->bus_info, pci_name(rp->pdev));
1811 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1813 struct rhine_private *rp = netdev_priv(dev);
1816 spin_lock_irq(&rp->lock);
1817 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1818 spin_unlock_irq(&rp->lock);
1823 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1825 struct rhine_private *rp = netdev_priv(dev);
1828 spin_lock_irq(&rp->lock);
1829 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1830 spin_unlock_irq(&rp->lock);
1835 static int netdev_nway_reset(struct net_device *dev)
1837 struct rhine_private *rp = netdev_priv(dev);
1839 return mii_nway_restart(&rp->mii_if);
1842 static u32 netdev_get_link(struct net_device *dev)
1844 struct rhine_private *rp = netdev_priv(dev);
1846 return mii_link_ok(&rp->mii_if);
1849 static u32 netdev_get_msglevel(struct net_device *dev)
1854 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1859 static struct ethtool_ops netdev_ethtool_ops = {
1860 .get_drvinfo = netdev_get_drvinfo,
1861 .get_settings = netdev_get_settings,
1862 .set_settings = netdev_set_settings,
1863 .nway_reset = netdev_nway_reset,
1864 .get_link = netdev_get_link,
1865 .get_msglevel = netdev_get_msglevel,
1866 .set_msglevel = netdev_set_msglevel,
1867 .get_sg = ethtool_op_get_sg,
1868 .get_tx_csum = ethtool_op_get_tx_csum,
1871 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1873 struct rhine_private *rp = netdev_priv(dev);
1876 if (!netif_running(dev))
1879 spin_lock_irq(&rp->lock);
1880 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1881 spin_unlock_irq(&rp->lock);
1886 static int rhine_close(struct net_device *dev)
1888 long ioaddr = dev->base_addr;
1889 struct rhine_private *rp = netdev_priv(dev);
1891 del_timer_sync(&rp->timer);
1893 spin_lock_irq(&rp->lock);
1895 netif_stop_queue(dev);
1898 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1899 "status was %4.4x.\n",
1900 dev->name, readw(ioaddr + ChipCmd));
1902 /* Switch to loopback mode to avoid hardware races. */
1903 writeb(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1905 /* Disable interrupts by clearing the interrupt mask. */
1906 writew(0x0000, ioaddr + IntrEnable);
1908 /* Stop the chip's Tx and Rx processes. */
1909 writew(CmdStop, ioaddr + ChipCmd);
1911 spin_unlock_irq(&rp->lock);
1913 free_irq(rp->pdev->irq, dev);
1922 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1924 struct net_device *dev = pci_get_drvdata(pdev);
1926 unregister_netdev(dev);
1928 pci_release_regions(pdev);
1931 iounmap((char *)(dev->base_addr));
1935 pci_disable_device(pdev);
1936 pci_set_drvdata(pdev, NULL);
1940 static struct pci_driver rhine_driver = {
1941 .name = "via-rhine",
1942 .id_table = rhine_pci_tbl,
1943 .probe = rhine_init_one,
1944 .remove = __devexit_p(rhine_remove_one),
1948 static int __init rhine_init(void)
1950 /* when a module, this is printed whether or not devices are found in probe */
1954 return pci_module_init(&rhine_driver);
1958 static void __exit rhine_cleanup(void)
1960 pci_unregister_driver(&rhine_driver);
1964 module_init(rhine_init);
1965 module_exit(rhine_cleanup);