1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
30 Linux kernel version history:
33 - Jeff Garzik: softnet 'n stuff
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
63 - Manfred Spraul: added reset into tx_timeout
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
85 - Replace some MII-related magic numbers with constants
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
105 - Use new MII lib helper generic_mii_ioctl
107 LK1.1.16 (Roger Luethi)
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
130 #define DRV_NAME "via-rhine"
131 #define DRV_VERSION "1.1.19-2.5"
132 #define DRV_RELDATE "July-12-2003"
135 /* A few user-configurable values.
136 These may be modified when a driver module is loaded. */
138 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
139 static int max_interrupt_work = 20;
141 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
142 Setting to > 1518 effectively disables this feature. */
143 static int rx_copybreak;
145 /* Select a backoff algorithm (Ethernet capture effect) */
148 /* Used to pass the media type, etc.
149 Both 'options[]' and 'full_duplex[]' should exist for driver
151 The media type is usually passed in 'options[]'.
152 The default is autonegotiation for speed and duplex.
153 This should rarely be overridden.
154 Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
155 Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
156 Use option values 0x20 and 0x200 for forcing full duplex operation.
158 #define MAX_UNITS 8 /* More are supported, limit only on options */
159 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
160 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
162 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
163 The Rhine has a 64 element 8390-like hash table. */
164 static const int multicast_filter_limit = 32;
167 /* Operational parameters that are set at compile time. */
169 /* Keep the ring sizes a power of two for compile efficiency.
170 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
171 Making the Tx ring too large decreases the effectiveness of channel
172 bonding and packet priority.
173 There are no ill effects from too-large receive rings. */
174 #define TX_RING_SIZE 16
175 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
176 #define RX_RING_SIZE 16
179 /* Operational parameters that usually are not changed. */
181 /* Time in jiffies before concluding the transmitter is hung. */
182 #define TX_TIMEOUT (2*HZ)
184 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
186 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
187 #warning You must compile this file with the correct options!
188 #warning See the last lines of the source file.
189 #error You must compile this driver with "-O".
192 #include <linux/module.h>
193 #include <linux/kernel.h>
194 #include <linux/string.h>
195 #include <linux/timer.h>
196 #include <linux/errno.h>
197 #include <linux/ioport.h>
198 #include <linux/slab.h>
199 #include <linux/interrupt.h>
200 #include <linux/pci.h>
201 #include <linux/netdevice.h>
202 #include <linux/etherdevice.h>
203 #include <linux/skbuff.h>
204 #include <linux/init.h>
205 #include <linux/delay.h>
206 #include <linux/mii.h>
207 #include <linux/ethtool.h>
208 #include <linux/crc32.h>
209 #include <asm/processor.h> /* Processor type for cache alignment. */
210 #include <asm/bitops.h>
213 #include <asm/uaccess.h>
215 /* These identify the driver base version and may not be removed. */
216 static char version[] __devinitdata =
217 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
218 KERN_INFO " http://www.scyld.com/network/via-rhine.html\n";
220 static char shortname[] = DRV_NAME;
223 /* This driver was written to use PCI memory space, however most versions
224 of the Rhine only work correctly with I/O space accesses. */
225 #ifdef CONFIG_VIA_RHINE_MMIO
243 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
244 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
245 MODULE_LICENSE("GPL");
247 MODULE_PARM(max_interrupt_work, "i");
248 MODULE_PARM(debug, "i");
249 MODULE_PARM(rx_copybreak, "i");
250 MODULE_PARM(backoff, "i");
251 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
252 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
253 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
254 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
255 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
256 MODULE_PARM_DESC(backoff, "VIA Rhine: Bits 0-3: backoff algorithm");
257 MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex");
258 MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)");
263 I. Board Compatibility
265 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
268 II. Board-specific settings
270 Boards with this chip are functional only in a bus-master PCI slot.
272 Many operational settings are loaded from the EEPROM to the Config word at
273 offset 0x78. For most of these settings, this driver assumes that they are
275 If this driver is compiled to use PCI memory space operations the EEPROM
276 must be configured to enable memory ops.
278 III. Driver operation
282 This driver uses two statically allocated fixed-size descriptor lists
283 formed into rings by a branch from the final descriptor to the beginning of
284 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
286 IIIb/c. Transmit/Receive Structure
288 This driver attempts to use a zero-copy receive and transmit scheme.
290 Alas, all data buffers are required to start on a 32 bit boundary, so
291 the driver must often copy transmit packets into bounce buffers.
293 The driver allocates full frame size skbuffs for the Rx ring buffers at
294 open() time and passes the skb->data field to the chip as receive data
295 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
296 a fresh skbuff is allocated and the frame is copied to the new skbuff.
297 When the incoming frame is larger, the skbuff is passed directly up the
298 protocol stack. Buffers consumed this way are replaced by newly allocated
299 skbuffs in the last phase of via_rhine_rx().
301 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
302 using a full-sized skbuff for small frames vs. the copying costs of larger
303 frames. New boards are typically used in generously configured machines
304 and the underfilled buffers have negligible impact compared to the benefit of
305 a single allocation size, so the default value of zero results in never
306 copying packets. When copying is done, the cost is usually mitigated by using
307 a combined copy/checksum routine. Copying also preloads the cache, which is
308 most useful with small frames.
310 Since the VIA chips are only able to transfer data to buffers on 32 bit
311 boundaries, the IP header at offset 14 in an ethernet frame isn't
312 longword aligned for further processing. Copying these unaligned buffers
313 has the beneficial effect of 16-byte aligning the IP header.
315 IIId. Synchronization
317 The driver runs as two independent, single-threaded flows of control. One
318 is the send-packet routine, which enforces single-threaded use by the
319 dev->priv->lock spinlock. The other thread is the interrupt handler, which
320 is single threaded by the hardware and interrupt handling software.
322 The send packet thread has partial control over the Tx ring. It locks the
323 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
324 is not available it stops the transmit queue by calling netif_stop_queue.
326 The interrupt handler has exclusive control over the Rx ring and records stats
327 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
328 empty by incrementing the dirty_tx mark. If at least half of the entries in
329 the Rx ring are available the transmit queue is woken up if it was stopped.
335 Preliminary VT86C100A manual from http://www.via.com.tw/
336 http://www.scyld.com/expert/100mbps.html
337 http://www.scyld.com/expert/NWay.html
338 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
339 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
344 The VT86C100A manual is not reliable information.
345 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
346 in significant performance degradation for bounce buffer copies on transmit
347 and unaligned IP headers on receive.
348 The chip does not pad to minimum transmit length.
353 /* This table drives the PCI probe routines. It's mostly boilerplate in all
354 of the drivers, and will likely be provided by some future kernel.
355 Note the matching code -- the first table entry matchs all 56** cards but
356 second only the 1234 card.
360 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
361 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
364 enum via_rhine_chips {
371 struct via_rhine_chip_info {
379 enum chip_capability_flags {
380 CanHaveMII=1, HasESIPhy=2, HasDavicomPhy=4,
381 ReqTxAlign=0x10, HasWOL=0x20, };
384 #define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
386 #define RHINE_IOTYPE (PCI_USES_IO | PCI_USES_MASTER | PCI_ADDR0)
388 /* Beware of PCI posted writes */
389 #define IOSYNC do { readb(dev->base_addr + StationAddr); } while (0)
391 /* directly indexed by enum via_rhine_chips, above */
392 static struct via_rhine_chip_info via_rhine_chip_info[] __devinitdata =
394 { "VIA VT86C100A Rhine", RHINE_IOTYPE, 128,
395 CanHaveMII | ReqTxAlign | HasDavicomPhy },
396 { "VIA VT6102 Rhine-II", RHINE_IOTYPE, 256,
397 CanHaveMII | HasWOL },
398 { "VIA VT6105 Rhine-III", RHINE_IOTYPE, 256,
399 CanHaveMII | HasWOL },
400 { "VIA VT6105M Rhine-III", RHINE_IOTYPE, 256,
401 CanHaveMII | HasWOL },
404 static struct pci_device_id via_rhine_pci_tbl[] =
406 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT86C100A},
407 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6102},
408 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105}, /* 6105{,L,LOM} */
409 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105M},
410 {0,} /* terminate list */
412 MODULE_DEVICE_TABLE(pci, via_rhine_pci_tbl);
415 /* Offsets to the device registers. */
416 enum register_offsets {
417 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
418 IntrStatus=0x0C, IntrEnable=0x0E,
419 MulticastFilter0=0x10, MulticastFilter1=0x14,
420 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
421 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
422 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
423 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
424 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
425 StickyHW=0x83, IntrStatus2=0x84, WOLcrClr=0xA4, WOLcgClr=0xA7,
429 /* Bits in ConfigD */
431 BackOptional=0x01, BackModify=0x02,
432 BackCaptureEffect=0x04, BackRandom=0x08
436 /* Registers we check that mmio and reg are the same. */
437 int mmio_verify_registers[] = {
438 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
443 /* Bits in the interrupt status/mask registers. */
444 enum intr_status_bits {
445 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
446 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
448 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
449 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
450 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
452 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
453 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
454 IntrTxErrSummary=0x082218,
457 /* The Rx and Tx buffer descriptors. */
460 u32 desc_length; /* Chain flag, Buffer/frame length */
466 u32 desc_length; /* Chain flag, Tx Config, Frame length */
471 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
472 #define TXDESC 0x00e08000
474 enum rx_status_bits {
475 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
478 /* Bits in *_desc.*_status */
479 enum desc_status_bits {
483 /* Bits in ChipCmd. */
485 CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
486 CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
487 CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
488 CmdNoTxPoll=0x0800, CmdReset=0x8000,
491 #define MAX_MII_CNT 4
492 struct netdev_private {
493 /* Descriptor rings */
494 struct rx_desc *rx_ring;
495 struct tx_desc *tx_ring;
496 dma_addr_t rx_ring_dma;
497 dma_addr_t tx_ring_dma;
499 /* The addresses of receive-in-place skbuffs. */
500 struct sk_buff *rx_skbuff[RX_RING_SIZE];
501 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
503 /* The saved address of a sent-in-place packet/buffer, for later free(). */
504 struct sk_buff *tx_skbuff[TX_RING_SIZE];
505 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
507 /* Tx bounce buffers */
508 unsigned char *tx_buf[TX_RING_SIZE];
509 unsigned char *tx_bufs;
510 dma_addr_t tx_bufs_dma;
512 struct pci_dev *pdev;
513 struct net_device_stats stats;
514 struct timer_list timer; /* Media monitoring timer. */
517 /* Frequently used values: keep some adjacent for cache effect. */
518 int chip_id, drv_flags;
519 struct rx_desc *rx_head_desc;
520 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
521 unsigned int cur_tx, dirty_tx;
522 unsigned int rx_buf_sz; /* Based on MTU+slack. */
523 u16 chip_cmd; /* Current setting for ChipCmd */
525 /* These values are keep track of the transceiver/media in use. */
526 unsigned int default_port:4; /* Last dev->if_port value. */
527 u8 tx_thresh, rx_thresh;
529 /* MII transceiver section. */
530 unsigned char phys[MAX_MII_CNT]; /* MII device addresses. */
531 unsigned int mii_cnt; /* number of MIIs found, but only the first one is used */
532 u16 mii_status; /* last read MII status */
533 struct mii_if_info mii_if;
536 static int mdio_read(struct net_device *dev, int phy_id, int location);
537 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
538 static int via_rhine_open(struct net_device *dev);
539 static void via_rhine_check_duplex(struct net_device *dev);
540 static void via_rhine_timer(unsigned long data);
541 static void via_rhine_tx_timeout(struct net_device *dev);
542 static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
543 static irqreturn_t via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
544 static void via_rhine_tx(struct net_device *dev);
545 static void via_rhine_rx(struct net_device *dev);
546 static void via_rhine_error(struct net_device *dev, int intr_status);
547 static void via_rhine_set_rx_mode(struct net_device *dev);
548 static struct net_device_stats *via_rhine_get_stats(struct net_device *dev);
549 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
550 static struct ethtool_ops netdev_ethtool_ops;
551 static int via_rhine_close(struct net_device *dev);
553 static inline u32 get_intr_status(struct net_device *dev)
555 long ioaddr = dev->base_addr;
556 struct netdev_private *np = dev->priv;
559 intr_status = readw(ioaddr + IntrStatus);
560 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
561 if (np->chip_id == VT6102)
562 intr_status |= readb(ioaddr + IntrStatus2) << 16;
566 static void wait_for_reset(struct net_device *dev, int chip_id, char *name)
568 long ioaddr = dev->base_addr;
573 if (readw(ioaddr + ChipCmd) & CmdReset) {
574 printk(KERN_INFO "%s: Reset not complete yet. "
575 "Trying harder.\n", name);
577 /* Rhine-II needs to be forced sometimes */
578 if (chip_id == VT6102)
579 writeb(0x40, ioaddr + MiscCmd);
581 /* VT86C100A may need long delay after reset (dlink) */
582 /* Seen on Rhine-II as well (rl) */
583 while ((readw(ioaddr + ChipCmd) & CmdReset) && --boguscnt)
589 printk(KERN_INFO "%s: Reset %s.\n", name,
590 boguscnt ? "succeeded" : "failed");
594 static void __devinit enable_mmio(long ioaddr, int chip_id)
597 if (chip_id == VT86C100A) {
598 /* More recent docs say that this bit is reserved ... */
599 n = inb(ioaddr + ConfigA) | 0x20;
600 outb(n, ioaddr + ConfigA);
602 n = inb(ioaddr + ConfigD) | 0x80;
603 outb(n, ioaddr + ConfigD);
608 static void __devinit reload_eeprom(long ioaddr)
611 outb(0x20, ioaddr + MACRegEEcsr);
612 /* Typically 2 cycles to reload. */
613 for (i = 0; i < 150; i++)
614 if (! (inb(ioaddr + MACRegEEcsr) & 0x20))
618 #ifdef CONFIG_NET_POLL_CONTROLLER
619 static void via_rhine_poll(struct net_device *dev)
621 disable_irq(dev->irq);
622 via_rhine_interrupt(dev->irq, (void *)dev, NULL);
623 enable_irq(dev->irq);
627 static int __devinit via_rhine_init_one (struct pci_dev *pdev,
628 const struct pci_device_id *ent)
630 struct net_device *dev;
631 struct netdev_private *np;
633 int chip_id = (int) ent->driver_data;
634 static int card_idx = -1;
643 /* when built into the kernel, we only print version if device is found */
645 static int printed_version;
646 if (!printed_version++)
651 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
652 io_size = via_rhine_chip_info[chip_id].io_size;
653 pci_flags = via_rhine_chip_info[chip_id].pci_flags;
655 if (pci_enable_device (pdev))
658 /* this should always be supported */
659 if (pci_set_dma_mask(pdev, 0xffffffff)) {
660 printk(KERN_ERR "32-bit PCI DMA addresses not supported by the card!?\n");
665 if ((pci_resource_len (pdev, 0) < io_size) ||
666 (pci_resource_len (pdev, 1) < io_size)) {
667 printk (KERN_ERR "Insufficient PCI resources, aborting\n");
671 ioaddr = pci_resource_start (pdev, 0);
672 memaddr = pci_resource_start (pdev, 1);
674 if (pci_flags & PCI_USES_MASTER)
675 pci_set_master (pdev);
677 dev = alloc_etherdev(sizeof(*np));
679 printk (KERN_ERR "init_ethernet failed for card #%d\n", card_idx);
682 SET_MODULE_OWNER(dev);
683 SET_NETDEV_DEV(dev, &pdev->dev);
685 if (pci_request_regions(pdev, shortname))
686 goto err_out_free_netdev;
690 enable_mmio(ioaddr0, chip_id);
692 ioaddr = (long) ioremap (memaddr, io_size);
694 printk (KERN_ERR "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
695 pci_name(pdev), io_size, memaddr);
696 goto err_out_free_res;
699 /* Check that selected MMIO registers match the PIO ones */
701 while (mmio_verify_registers[i]) {
702 int reg = mmio_verify_registers[i++];
703 unsigned char a = inb(ioaddr0+reg);
704 unsigned char b = readb(ioaddr+reg);
706 printk (KERN_ERR "MMIO do not match PIO [%02x] (%02x != %02x)\n",
713 /* D-Link provided reset code (with comment additions) */
714 if (via_rhine_chip_info[chip_id].drv_flags & HasWOL) {
715 unsigned char byOrgValue;
717 /* clear sticky bit before reset & read ethernet address */
718 byOrgValue = readb(ioaddr + StickyHW);
719 byOrgValue = byOrgValue & 0xFC;
720 writeb(byOrgValue, ioaddr + StickyHW);
722 /* (bits written are cleared?) */
723 /* disable force PME-enable */
724 writeb(0x80, ioaddr + WOLcgClr);
725 /* disable power-event config bit */
726 writeb(0xFF, ioaddr + WOLcrClr);
727 /* clear power status (undocumented in vt6102 docs?) */
728 writeb(0xFF, ioaddr + PwrcsrClr);
731 /* Reset the chip to erase previous misconfiguration. */
732 writew(CmdReset, ioaddr + ChipCmd);
734 dev->base_addr = ioaddr;
735 wait_for_reset(dev, chip_id, shortname);
737 /* Reload the station address from the EEPROM. */
739 reload_eeprom(ioaddr);
741 reload_eeprom(ioaddr0);
742 /* Reloading from eeprom overwrites cfgA-D, so we must re-enable MMIO.
743 If reload_eeprom() was done first this could be avoided, but it is
744 not known if that still works with the "win98-reboot" problem. */
745 enable_mmio(ioaddr0, chip_id);
748 for (i = 0; i < 6; i++)
749 dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
751 if (!is_valid_ether_addr(dev->dev_addr)) {
752 printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx);
756 if (chip_id == VT6102) {
758 * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA
759 * turned on. it makes MAC receive magic packet
760 * automatically. So, we turn it off. (D-Link)
762 writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
765 /* Select backoff algorithm */
767 writeb(readb(ioaddr + ConfigD) & (0xF0 | backoff),
770 dev->irq = pdev->irq;
773 spin_lock_init (&np->lock);
774 np->chip_id = chip_id;
775 np->drv_flags = via_rhine_chip_info[chip_id].drv_flags;
777 np->mii_if.dev = dev;
778 np->mii_if.mdio_read = mdio_read;
779 np->mii_if.mdio_write = mdio_write;
780 np->mii_if.phy_id_mask = 0x1f;
781 np->mii_if.reg_num_mask = 0x1f;
784 option = dev->mem_start;
786 /* The chip-specific entries in the device structure. */
787 dev->open = via_rhine_open;
788 dev->hard_start_xmit = via_rhine_start_tx;
789 dev->stop = via_rhine_close;
790 dev->get_stats = via_rhine_get_stats;
791 dev->set_multicast_list = via_rhine_set_rx_mode;
792 dev->do_ioctl = netdev_ioctl;
793 dev->ethtool_ops = &netdev_ethtool_ops;
794 dev->tx_timeout = via_rhine_tx_timeout;
795 dev->watchdog_timeo = TX_TIMEOUT;
796 #ifdef CONFIG_NET_POLL_CONTROLLER
797 dev->poll_controller = via_rhine_poll;
799 if (np->drv_flags & ReqTxAlign)
800 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
802 /* dev->name not defined before register_netdev()! */
803 i = register_netdev(dev);
807 /* The lower four bits are the media type. */
810 np->mii_if.full_duplex = 1;
811 np->default_port = option & 15;
813 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
814 np->mii_if.full_duplex = 1;
816 if (np->mii_if.full_duplex) {
817 printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
818 " disabled.\n", dev->name);
819 np->mii_if.force_media = 1;
822 printk(KERN_INFO "%s: %s at 0x%lx, ",
823 dev->name, via_rhine_chip_info[chip_id].name,
824 (pci_flags & PCI_USES_IO) ? ioaddr : memaddr);
826 for (i = 0; i < 5; i++)
827 printk("%2.2x:", dev->dev_addr[i]);
828 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
830 pci_set_drvdata(pdev, dev);
832 if (np->drv_flags & CanHaveMII) {
833 int phy, phy_idx = 0;
834 np->phys[0] = 1; /* Standard for this chip. */
835 for (phy = 1; phy < 32 && phy_idx < MAX_MII_CNT; phy++) {
836 int mii_status = mdio_read(dev, phy, 1);
837 if (mii_status != 0xffff && mii_status != 0x0000) {
838 np->phys[phy_idx++] = phy;
839 np->mii_if.advertising = mdio_read(dev, phy, 4);
840 printk(KERN_INFO "%s: MII PHY found at address %d, status "
841 "0x%4.4x advertising %4.4x Link %4.4x.\n",
842 dev->name, phy, mii_status, np->mii_if.advertising,
843 mdio_read(dev, phy, 5));
845 /* set IFF_RUNNING */
846 if (mii_status & BMSR_LSTATUS)
847 netif_carrier_on(dev);
849 netif_carrier_off(dev);
854 np->mii_cnt = phy_idx;
855 np->mii_if.phy_id = np->phys[0];
858 /* Allow forcing the media type. */
861 np->mii_if.full_duplex = 1;
862 np->default_port = option & 0x3ff;
863 if (np->default_port & 0x330) {
864 /* FIXME: shouldn't someone check this variable? */
865 /* np->medialock = 1; */
866 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
867 (option & 0x300 ? 100 : 10),
868 (option & 0x220 ? "full" : "half"));
870 mdio_write(dev, np->phys[0], MII_BMCR,
871 ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
872 ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
880 iounmap((void *)ioaddr);
883 pci_release_regions(pdev);
890 static int alloc_ring(struct net_device* dev)
892 struct netdev_private *np = dev->priv;
896 ring = pci_alloc_consistent(np->pdev,
897 RX_RING_SIZE * sizeof(struct rx_desc) +
898 TX_RING_SIZE * sizeof(struct tx_desc),
901 printk(KERN_ERR "Could not allocate DMA memory.\n");
904 if (np->drv_flags & ReqTxAlign) {
905 np->tx_bufs = pci_alloc_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
907 if (np->tx_bufs == NULL) {
908 pci_free_consistent(np->pdev,
909 RX_RING_SIZE * sizeof(struct rx_desc) +
910 TX_RING_SIZE * sizeof(struct tx_desc),
917 np->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
918 np->rx_ring_dma = ring_dma;
919 np->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
924 void free_ring(struct net_device* dev)
926 struct netdev_private *np = dev->priv;
928 pci_free_consistent(np->pdev,
929 RX_RING_SIZE * sizeof(struct rx_desc) +
930 TX_RING_SIZE * sizeof(struct tx_desc),
931 np->rx_ring, np->rx_ring_dma);
935 pci_free_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
936 np->tx_bufs, np->tx_bufs_dma);
942 static void alloc_rbufs(struct net_device *dev)
944 struct netdev_private *np = dev->priv;
948 np->dirty_rx = np->cur_rx = 0;
950 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
951 np->rx_head_desc = &np->rx_ring[0];
952 next = np->rx_ring_dma;
954 /* Init the ring entries */
955 for (i = 0; i < RX_RING_SIZE; i++) {
956 np->rx_ring[i].rx_status = 0;
957 np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);
958 next += sizeof(struct rx_desc);
959 np->rx_ring[i].next_desc = cpu_to_le32(next);
960 np->rx_skbuff[i] = 0;
962 /* Mark the last entry as wrapping the ring. */
963 np->rx_ring[i-1].next_desc = cpu_to_le32(np->rx_ring_dma);
965 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
966 for (i = 0; i < RX_RING_SIZE; i++) {
967 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
968 np->rx_skbuff[i] = skb;
971 skb->dev = dev; /* Mark as being used by this device. */
973 np->rx_skbuff_dma[i] =
974 pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
977 np->rx_ring[i].addr = cpu_to_le32(np->rx_skbuff_dma[i]);
978 np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
980 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
983 static void free_rbufs(struct net_device* dev)
985 struct netdev_private *np = dev->priv;
988 /* Free all the skbuffs in the Rx queue. */
989 for (i = 0; i < RX_RING_SIZE; i++) {
990 np->rx_ring[i].rx_status = 0;
991 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
992 if (np->rx_skbuff[i]) {
993 pci_unmap_single(np->pdev,
994 np->rx_skbuff_dma[i],
995 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
996 dev_kfree_skb(np->rx_skbuff[i]);
998 np->rx_skbuff[i] = 0;
1002 static void alloc_tbufs(struct net_device* dev)
1004 struct netdev_private *np = dev->priv;
1008 np->dirty_tx = np->cur_tx = 0;
1009 next = np->tx_ring_dma;
1010 for (i = 0; i < TX_RING_SIZE; i++) {
1011 np->tx_skbuff[i] = 0;
1012 np->tx_ring[i].tx_status = 0;
1013 np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1014 next += sizeof(struct tx_desc);
1015 np->tx_ring[i].next_desc = cpu_to_le32(next);
1016 np->tx_buf[i] = &np->tx_bufs[i * PKT_BUF_SZ];
1018 np->tx_ring[i-1].next_desc = cpu_to_le32(np->tx_ring_dma);
1022 static void free_tbufs(struct net_device* dev)
1024 struct netdev_private *np = dev->priv;
1027 for (i = 0; i < TX_RING_SIZE; i++) {
1028 np->tx_ring[i].tx_status = 0;
1029 np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1030 np->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1031 if (np->tx_skbuff[i]) {
1032 if (np->tx_skbuff_dma[i]) {
1033 pci_unmap_single(np->pdev,
1034 np->tx_skbuff_dma[i],
1035 np->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
1037 dev_kfree_skb(np->tx_skbuff[i]);
1039 np->tx_skbuff[i] = 0;
1044 static void init_registers(struct net_device *dev)
1046 struct netdev_private *np = dev->priv;
1047 long ioaddr = dev->base_addr;
1050 for (i = 0; i < 6; i++)
1051 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
1053 /* Initialize other registers. */
1054 writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1055 /* Configure initial FIFO thresholds. */
1056 writeb(0x20, ioaddr + TxConfig);
1057 np->tx_thresh = 0x20;
1058 np->rx_thresh = 0x60; /* Written in via_rhine_set_rx_mode(). */
1059 np->mii_if.full_duplex = 0;
1061 if (dev->if_port == 0)
1062 dev->if_port = np->default_port;
1064 writel(np->rx_ring_dma, ioaddr + RxRingPtr);
1065 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
1067 via_rhine_set_rx_mode(dev);
1069 /* Enable interrupts by setting the interrupt mask. */
1070 writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1071 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1072 IntrTxDone | IntrTxError | IntrTxUnderrun |
1073 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1074 ioaddr + IntrEnable);
1076 np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
1077 if (np->mii_if.force_media)
1078 np->chip_cmd |= CmdFDuplex;
1079 writew(np->chip_cmd, ioaddr + ChipCmd);
1081 via_rhine_check_duplex(dev);
1083 /* The LED outputs of various MII xcvrs should be configured. */
1084 /* For NS or Mison phys, turn on bit 1 in register 0x17 */
1085 /* For ESI phys, turn on bit 7 in register 0x17. */
1086 mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |
1087 (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
1089 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1091 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1093 long ioaddr = dev->base_addr;
1094 int boguscnt = 1024;
1096 /* Wait for a previous command to complete. */
1097 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1099 writeb(0x00, ioaddr + MIICmd);
1100 writeb(phy_id, ioaddr + MIIPhyAddr);
1101 writeb(regnum, ioaddr + MIIRegAddr);
1102 writeb(0x40, ioaddr + MIICmd); /* Trigger read */
1104 while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
1106 return readw(ioaddr + MIIData);
1109 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1111 struct netdev_private *np = dev->priv;
1112 long ioaddr = dev->base_addr;
1113 int boguscnt = 1024;
1115 if (phy_id == np->phys[0]) {
1117 case MII_BMCR: /* Is user forcing speed/duplex? */
1118 if (value & 0x9000) /* Autonegotiation. */
1119 np->mii_if.force_media = 0;
1121 np->mii_if.full_duplex = (value & 0x0100) ? 1 : 0;
1124 np->mii_if.advertising = value;
1129 /* Wait for a previous command to complete. */
1130 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1132 writeb(0x00, ioaddr + MIICmd);
1133 writeb(phy_id, ioaddr + MIIPhyAddr);
1134 writeb(regnum, ioaddr + MIIRegAddr);
1135 writew(value, ioaddr + MIIData);
1136 writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
1140 static int via_rhine_open(struct net_device *dev)
1142 struct netdev_private *np = dev->priv;
1143 long ioaddr = dev->base_addr;
1146 /* Reset the chip. */
1147 writew(CmdReset, ioaddr + ChipCmd);
1149 i = request_irq(np->pdev->irq, &via_rhine_interrupt, SA_SHIRQ, dev->name, dev);
1154 printk(KERN_DEBUG "%s: via_rhine_open() irq %d.\n",
1155 dev->name, np->pdev->irq);
1157 i = alloc_ring(dev);
1162 wait_for_reset(dev, np->chip_id, dev->name);
1163 init_registers(dev);
1165 printk(KERN_DEBUG "%s: Done via_rhine_open(), status %4.4x "
1166 "MII status: %4.4x.\n",
1167 dev->name, readw(ioaddr + ChipCmd),
1168 mdio_read(dev, np->phys[0], MII_BMSR));
1170 netif_start_queue(dev);
1172 /* Set the timer to check for link beat. */
1173 init_timer(&np->timer);
1174 np->timer.expires = jiffies + 2 * HZ/100;
1175 np->timer.data = (unsigned long)dev;
1176 np->timer.function = &via_rhine_timer; /* timer handler */
1177 add_timer(&np->timer);
1182 static void via_rhine_check_duplex(struct net_device *dev)
1184 struct netdev_private *np = dev->priv;
1185 long ioaddr = dev->base_addr;
1186 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1187 int negotiated = mii_lpa & np->mii_if.advertising;
1190 if (np->mii_if.force_media || mii_lpa == 0xffff)
1192 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
1193 if (np->mii_if.full_duplex != duplex) {
1194 np->mii_if.full_duplex = duplex;
1196 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
1197 " partner capability of %4.4x.\n", dev->name,
1198 duplex ? "full" : "half", np->phys[0], mii_lpa);
1200 np->chip_cmd |= CmdFDuplex;
1202 np->chip_cmd &= ~CmdFDuplex;
1203 writew(np->chip_cmd, ioaddr + ChipCmd);
1208 static void via_rhine_timer(unsigned long data)
1210 struct net_device *dev = (struct net_device *)data;
1211 struct netdev_private *np = dev->priv;
1212 long ioaddr = dev->base_addr;
1213 int next_tick = 10*HZ;
1217 printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
1218 dev->name, readw(ioaddr + IntrStatus));
1221 spin_lock_irq (&np->lock);
1223 via_rhine_check_duplex(dev);
1225 /* make IFF_RUNNING follow the MII status bit "Link established" */
1226 mii_status = mdio_read(dev, np->phys[0], MII_BMSR);
1227 if ( (mii_status & BMSR_LSTATUS) != (np->mii_status & BMSR_LSTATUS) ) {
1228 if (mii_status & BMSR_LSTATUS)
1229 netif_carrier_on(dev);
1231 netif_carrier_off(dev);
1233 np->mii_status = mii_status;
1235 spin_unlock_irq (&np->lock);
1237 np->timer.expires = jiffies + next_tick;
1238 add_timer(&np->timer);
1242 static void via_rhine_tx_timeout (struct net_device *dev)
1244 struct netdev_private *np = dev->priv;
1245 long ioaddr = dev->base_addr;
1247 printk (KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1248 "%4.4x, resetting...\n",
1249 dev->name, readw (ioaddr + IntrStatus),
1250 mdio_read (dev, np->phys[0], MII_BMSR));
1254 /* protect against concurrent rx interrupts */
1255 disable_irq(np->pdev->irq);
1257 spin_lock(&np->lock);
1259 /* Reset the chip. */
1260 writew(CmdReset, ioaddr + ChipCmd);
1262 /* clear all descriptors */
1268 /* Reinitialize the hardware. */
1269 wait_for_reset(dev, np->chip_id, dev->name);
1270 init_registers(dev);
1272 spin_unlock(&np->lock);
1273 enable_irq(np->pdev->irq);
1275 dev->trans_start = jiffies;
1276 np->stats.tx_errors++;
1277 netif_wake_queue(dev);
1280 static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1282 struct netdev_private *np = dev->priv;
1286 /* Caution: the write order is important here, set the field
1287 with the "ownership" bits last. */
1289 /* Calculate the next Tx descriptor entry. */
1290 entry = np->cur_tx % TX_RING_SIZE;
1292 if (skb->len < ETH_ZLEN) {
1293 skb = skb_padto(skb, ETH_ZLEN);
1298 np->tx_skbuff[entry] = skb;
1300 if ((np->drv_flags & ReqTxAlign) &&
1301 (((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)
1303 /* Must use alignment buffer. */
1304 if (skb->len > PKT_BUF_SZ) {
1305 /* packet too long, drop it */
1307 np->tx_skbuff[entry] = NULL;
1308 np->stats.tx_dropped++;
1311 skb_copy_and_csum_dev(skb, np->tx_buf[entry]);
1312 np->tx_skbuff_dma[entry] = 0;
1313 np->tx_ring[entry].addr = cpu_to_le32(np->tx_bufs_dma +
1314 (np->tx_buf[entry] - np->tx_bufs));
1316 np->tx_skbuff_dma[entry] =
1317 pci_map_single(np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
1318 np->tx_ring[entry].addr = cpu_to_le32(np->tx_skbuff_dma[entry]);
1321 np->tx_ring[entry].desc_length =
1322 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1325 spin_lock_irq (&np->lock);
1327 np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1332 /* Non-x86 Todo: explicitly flush cache lines here. */
1335 * Wake the potentially-idle transmit channel unless errors are
1336 * pending (the ISR must sort them out first).
1338 intr_status = get_intr_status(dev);
1339 if ((intr_status & IntrTxErrSummary) == 0) {
1340 writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1344 if (np->cur_tx == np->dirty_tx + TX_QUEUE_LEN)
1345 netif_stop_queue(dev);
1347 dev->trans_start = jiffies;
1349 spin_unlock_irq (&np->lock);
1352 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1353 dev->name, np->cur_tx-1, entry);
1358 /* The interrupt handler does all of the Rx thread work and cleans up
1359 after the Tx thread. */
1360 static irqreturn_t via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1362 struct net_device *dev = dev_instance;
1365 int boguscnt = max_interrupt_work;
1368 ioaddr = dev->base_addr;
1370 while ((intr_status = get_intr_status(dev))) {
1373 /* Acknowledge all of the current interrupt sources ASAP. */
1374 if (intr_status & IntrTxDescRace)
1375 writeb(0x08, ioaddr + IntrStatus2);
1376 writew(intr_status & 0xffff, ioaddr + IntrStatus);
1380 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1381 dev->name, intr_status);
1383 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1384 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1387 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1388 if (intr_status & IntrTxErrSummary) {
1390 /* Avoid scavenging before Tx engine turned off */
1391 while ((readw(ioaddr+ChipCmd) & CmdTxOn) && --cnt)
1393 if (debug > 2 && !cnt)
1394 printk(KERN_WARNING "%s: via_rhine_interrupt() "
1395 "Tx engine still on.\n",
1401 /* Abnormal error summary/uncommon events handlers. */
1402 if (intr_status & (IntrPCIErr | IntrLinkChange |
1403 IntrStatsMax | IntrTxError | IntrTxAborted |
1404 IntrTxUnderrun | IntrTxDescRace))
1405 via_rhine_error(dev, intr_status);
1407 if (--boguscnt < 0) {
1408 printk(KERN_WARNING "%s: Too much work at interrupt, "
1410 dev->name, intr_status);
1416 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1417 dev->name, readw(ioaddr + IntrStatus));
1418 return IRQ_RETVAL(handled);
1421 /* This routine is logically part of the interrupt handler, but isolated
1423 static void via_rhine_tx(struct net_device *dev)
1425 struct netdev_private *np = dev->priv;
1426 int txstatus = 0, entry = np->dirty_tx % TX_RING_SIZE;
1428 spin_lock (&np->lock);
1430 /* find and cleanup dirty tx descriptors */
1431 while (np->dirty_tx != np->cur_tx) {
1432 txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);
1434 printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
1436 if (txstatus & DescOwn)
1438 if (txstatus & 0x8000) {
1440 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1441 dev->name, txstatus);
1442 np->stats.tx_errors++;
1443 if (txstatus & 0x0400) np->stats.tx_carrier_errors++;
1444 if (txstatus & 0x0200) np->stats.tx_window_errors++;
1445 if (txstatus & 0x0100) np->stats.tx_aborted_errors++;
1446 if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;
1447 if (((np->chip_id == VT86C100A) && txstatus & 0x0002) ||
1448 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1449 np->stats.tx_fifo_errors++;
1450 np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1451 break; /* Keep the skb - we try again */
1453 /* Transmitter restarted in 'abnormal' handler. */
1455 if (np->chip_id == VT86C100A)
1456 np->stats.collisions += (txstatus >> 3) & 0x0F;
1458 np->stats.collisions += txstatus & 0x0F;
1460 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1461 (txstatus >> 3) & 0xF,
1463 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1464 np->stats.tx_packets++;
1466 /* Free the original skb. */
1467 if (np->tx_skbuff_dma[entry]) {
1468 pci_unmap_single(np->pdev,
1469 np->tx_skbuff_dma[entry],
1470 np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1472 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1473 np->tx_skbuff[entry] = NULL;
1474 entry = (++np->dirty_tx) % TX_RING_SIZE;
1476 if ((np->cur_tx - np->dirty_tx) < TX_QUEUE_LEN - 4)
1477 netif_wake_queue (dev);
1479 spin_unlock (&np->lock);
1482 /* This routine is logically part of the interrupt handler, but isolated
1483 for clarity and better register allocation. */
1484 static void via_rhine_rx(struct net_device *dev)
1486 struct netdev_private *np = dev->priv;
1487 int entry = np->cur_rx % RX_RING_SIZE;
1488 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1491 printk(KERN_DEBUG "%s: via_rhine_rx(), entry %d status %8.8x.\n",
1492 dev->name, entry, le32_to_cpu(np->rx_head_desc->rx_status));
1495 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1496 while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1497 struct rx_desc *desc = np->rx_head_desc;
1498 u32 desc_status = le32_to_cpu(desc->rx_status);
1499 int data_size = desc_status >> 16;
1502 printk(KERN_DEBUG " via_rhine_rx() status is %8.8x.\n",
1506 if ( (desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1507 if ((desc_status & RxWholePkt) != RxWholePkt) {
1508 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1509 "multiple buffers, entry %#x length %d status %8.8x!\n",
1510 dev->name, entry, data_size, desc_status);
1511 printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
1512 dev->name, np->rx_head_desc, &np->rx_ring[entry]);
1513 np->stats.rx_length_errors++;
1514 } else if (desc_status & RxErr) {
1515 /* There was a error. */
1517 printk(KERN_DEBUG " via_rhine_rx() Rx error was %8.8x.\n",
1519 np->stats.rx_errors++;
1520 if (desc_status & 0x0030) np->stats.rx_length_errors++;
1521 if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
1522 if (desc_status & 0x0004) np->stats.rx_frame_errors++;
1523 if (desc_status & 0x0002) {
1524 /* this can also be updated outside the interrupt handler */
1525 spin_lock (&np->lock);
1526 np->stats.rx_crc_errors++;
1527 spin_unlock (&np->lock);
1531 struct sk_buff *skb;
1532 /* Length should omit the CRC */
1533 int pkt_len = data_size - 4;
1535 /* Check if the packet is long enough to accept without copying
1536 to a minimally-sized skbuff. */
1537 if (pkt_len < rx_copybreak &&
1538 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1540 skb_reserve(skb, 2); /* 16 byte align the IP header */
1541 pci_dma_sync_single_for_cpu(np->pdev, np->rx_skbuff_dma[entry],
1542 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1544 /* *_IP_COPYSUM isn't defined anywhere and eth_copy_and_sum
1545 is memcpy for all archs so this is kind of pointless right
1547 #if HAS_IP_COPYSUM /* Call copy + cksum if available. */
1548 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1549 skb_put(skb, pkt_len);
1551 memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
1554 pci_dma_sync_single_for_device(np->pdev, np->rx_skbuff_dma[entry],
1555 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1557 skb = np->rx_skbuff[entry];
1559 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1563 np->rx_skbuff[entry] = NULL;
1564 skb_put(skb, pkt_len);
1565 pci_unmap_single(np->pdev, np->rx_skbuff_dma[entry],
1566 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1568 skb->protocol = eth_type_trans(skb, dev);
1570 dev->last_rx = jiffies;
1571 np->stats.rx_bytes += pkt_len;
1572 np->stats.rx_packets++;
1574 entry = (++np->cur_rx) % RX_RING_SIZE;
1575 np->rx_head_desc = &np->rx_ring[entry];
1578 /* Refill the Rx ring buffers. */
1579 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1580 struct sk_buff *skb;
1581 entry = np->dirty_rx % RX_RING_SIZE;
1582 if (np->rx_skbuff[entry] == NULL) {
1583 skb = dev_alloc_skb(np->rx_buf_sz);
1584 np->rx_skbuff[entry] = skb;
1586 break; /* Better luck next round. */
1587 skb->dev = dev; /* Mark as being used by this device. */
1588 np->rx_skbuff_dma[entry] =
1589 pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
1590 PCI_DMA_FROMDEVICE);
1591 np->rx_ring[entry].addr = cpu_to_le32(np->rx_skbuff_dma[entry]);
1593 np->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1596 /* Pre-emptively restart Rx engine. */
1597 writew(readw(dev->base_addr + ChipCmd) | CmdRxOn | CmdRxDemand,
1598 dev->base_addr + ChipCmd);
1601 /* Clears the "tally counters" for CRC errors and missed frames(?).
1602 It has been reported that some chips need a write of 0 to clear
1603 these, for others the counters are set to 1 when written to and
1604 instead cleared when read. So we clear them both ways ... */
1605 static inline void clear_tally_counters(const long ioaddr)
1607 writel(0, ioaddr + RxMissed);
1608 readw(ioaddr + RxCRCErrs);
1609 readw(ioaddr + RxMissed);
1612 static void via_rhine_restart_tx(struct net_device *dev) {
1613 struct netdev_private *np = dev->priv;
1614 long ioaddr = dev->base_addr;
1615 int entry = np->dirty_tx % TX_RING_SIZE;
1619 * If new errors occured, we need to sort them out before doing Tx.
1620 * In that case the ISR will be back here RSN anyway.
1622 intr_status = get_intr_status(dev);
1624 if ((intr_status & IntrTxErrSummary) == 0) {
1626 /* We know better than the chip where it should continue. */
1627 writel(np->tx_ring_dma + entry * sizeof(struct tx_desc),
1628 ioaddr + TxRingPtr);
1630 writew(CmdTxDemand | np->chip_cmd, ioaddr + ChipCmd);
1634 /* This should never happen */
1636 printk(KERN_WARNING "%s: via_rhine_restart_tx() "
1637 "Another error occured %8.8x.\n",
1638 dev->name, intr_status);
1643 static void via_rhine_error(struct net_device *dev, int intr_status)
1645 struct netdev_private *np = dev->priv;
1646 long ioaddr = dev->base_addr;
1648 spin_lock (&np->lock);
1650 if (intr_status & (IntrLinkChange)) {
1651 if (readb(ioaddr + MIIStatus) & 0x02) {
1652 /* Link failed, restart autonegotiation. */
1653 if (np->drv_flags & HasDavicomPhy)
1654 mdio_write(dev, np->phys[0], MII_BMCR, 0x3300);
1656 via_rhine_check_duplex(dev);
1658 printk(KERN_ERR "%s: MII status changed: Autonegotiation "
1659 "advertising %4.4x partner %4.4x.\n", dev->name,
1660 mdio_read(dev, np->phys[0], MII_ADVERTISE),
1661 mdio_read(dev, np->phys[0], MII_LPA));
1663 if (intr_status & IntrStatsMax) {
1664 np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1665 np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1666 clear_tally_counters(ioaddr);
1668 if (intr_status & IntrTxAborted) {
1670 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1671 dev->name, intr_status);
1673 if (intr_status & IntrTxUnderrun) {
1674 if (np->tx_thresh < 0xE0)
1675 writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
1677 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1678 "threshold now %2.2x.\n",
1679 dev->name, np->tx_thresh);
1681 if (intr_status & IntrTxDescRace) {
1683 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1686 if ((intr_status & IntrTxError) && ~( IntrTxAborted | IntrTxUnderrun |
1688 if (np->tx_thresh < 0xE0) {
1689 writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
1692 printk(KERN_INFO "%s: Unspecified error. Tx "
1693 "threshold now %2.2x.\n",
1694 dev->name, np->tx_thresh);
1696 if (intr_status & ( IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1698 via_rhine_restart_tx(dev);
1700 if (intr_status & ~( IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1701 IntrTxError | IntrTxAborted | IntrNormalSummary |
1704 printk(KERN_ERR "%s: Something Wicked happened! %8.8x.\n",
1705 dev->name, intr_status);
1708 spin_unlock (&np->lock);
1711 static struct net_device_stats *via_rhine_get_stats(struct net_device *dev)
1713 struct netdev_private *np = dev->priv;
1714 long ioaddr = dev->base_addr;
1715 unsigned long flags;
1717 spin_lock_irqsave(&np->lock, flags);
1718 np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1719 np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1720 clear_tally_counters(ioaddr);
1721 spin_unlock_irqrestore(&np->lock, flags);
1726 static void via_rhine_set_rx_mode(struct net_device *dev)
1728 struct netdev_private *np = dev->priv;
1729 long ioaddr = dev->base_addr;
1730 u32 mc_filter[2]; /* Multicast hash filter */
1731 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1733 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1734 /* Unconditionally log net taps. */
1735 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1737 writel(0xffffffff, ioaddr + MulticastFilter0);
1738 writel(0xffffffff, ioaddr + MulticastFilter1);
1739 } else if ((dev->mc_count > multicast_filter_limit)
1740 || (dev->flags & IFF_ALLMULTI)) {
1741 /* Too many to match, or accept all multicasts. */
1742 writel(0xffffffff, ioaddr + MulticastFilter0);
1743 writel(0xffffffff, ioaddr + MulticastFilter1);
1746 struct dev_mc_list *mclist;
1748 memset(mc_filter, 0, sizeof(mc_filter));
1749 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1750 i++, mclist = mclist->next) {
1751 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1753 mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
1755 writel(mc_filter[0], ioaddr + MulticastFilter0);
1756 writel(mc_filter[1], ioaddr + MulticastFilter1);
1759 writeb(np->rx_thresh | rx_mode, ioaddr + RxConfig);
1762 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1764 struct netdev_private *np = dev->priv;
1766 strcpy (info->driver, DRV_NAME);
1767 strcpy (info->version, DRV_VERSION);
1768 strcpy (info->bus_info, pci_name(np->pdev));
1771 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1773 struct netdev_private *np = dev->priv;
1776 if (!(np->drv_flags & CanHaveMII))
1779 spin_lock_irq(&np->lock);
1780 rc = mii_ethtool_gset(&np->mii_if, cmd);
1781 spin_unlock_irq(&np->lock);
1786 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1788 struct netdev_private *np = dev->priv;
1791 if (!(np->drv_flags & CanHaveMII))
1794 spin_lock_irq(&np->lock);
1795 rc = mii_ethtool_sset(&np->mii_if, cmd);
1796 spin_unlock_irq(&np->lock);
1801 static int netdev_nway_reset(struct net_device *dev)
1803 struct netdev_private *np = dev->priv;
1805 if (!(np->drv_flags & CanHaveMII))
1808 return mii_nway_restart(&np->mii_if);
1811 static u32 netdev_get_link(struct net_device *dev)
1813 struct netdev_private *np = dev->priv;
1815 if (!(np->drv_flags & CanHaveMII))
1816 return 0; /* -EINVAL */
1818 return mii_link_ok(&np->mii_if);
1821 static u32 netdev_get_msglevel(struct net_device *dev)
1826 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1831 static struct ethtool_ops netdev_ethtool_ops = {
1832 .get_drvinfo = netdev_get_drvinfo,
1833 .get_settings = netdev_get_settings,
1834 .set_settings = netdev_set_settings,
1835 .nway_reset = netdev_nway_reset,
1836 .get_link = netdev_get_link,
1837 .get_msglevel = netdev_get_msglevel,
1838 .set_msglevel = netdev_set_msglevel,
1839 .get_sg = ethtool_op_get_sg,
1840 .get_tx_csum = ethtool_op_get_tx_csum,
1843 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1845 struct netdev_private *np = dev->priv;
1846 struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
1849 if (!netif_running(dev))
1852 spin_lock_irq(&np->lock);
1853 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1854 spin_unlock_irq(&np->lock);
1859 static int via_rhine_close(struct net_device *dev)
1861 long ioaddr = dev->base_addr;
1862 struct netdev_private *np = dev->priv;
1864 del_timer_sync(&np->timer);
1866 spin_lock_irq(&np->lock);
1868 netif_stop_queue(dev);
1871 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1872 dev->name, readw(ioaddr + ChipCmd));
1874 /* Switch to loopback mode to avoid hardware races. */
1875 writeb(np->tx_thresh | 0x02, ioaddr + TxConfig);
1877 /* Disable interrupts by clearing the interrupt mask. */
1878 writew(0x0000, ioaddr + IntrEnable);
1880 /* Stop the chip's Tx and Rx processes. */
1881 writew(CmdStop, ioaddr + ChipCmd);
1883 spin_unlock_irq(&np->lock);
1885 free_irq(np->pdev->irq, dev);
1894 static void __devexit via_rhine_remove_one (struct pci_dev *pdev)
1896 struct net_device *dev = pci_get_drvdata(pdev);
1898 unregister_netdev(dev);
1900 pci_release_regions(pdev);
1903 iounmap((char *)(dev->base_addr));
1907 pci_disable_device(pdev);
1908 pci_set_drvdata(pdev, NULL);
1912 static struct pci_driver via_rhine_driver = {
1913 .name = "via-rhine",
1914 .id_table = via_rhine_pci_tbl,
1915 .probe = via_rhine_init_one,
1916 .remove = __devexit_p(via_rhine_remove_one),
1920 static int __init via_rhine_init (void)
1922 /* when a module, this is printed whether or not devices are found in probe */
1926 return pci_module_init (&via_rhine_driver);
1930 static void __exit via_rhine_cleanup (void)
1932 pci_unregister_driver (&via_rhine_driver);
1936 module_init(via_rhine_init);
1937 module_exit(via_rhine_cleanup);
1942 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"