1 /* winbond-840.c: A Linux PCI network adapter device driver. */
3 Written 1998-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
43 * enable pci_power_off
47 #define DRV_NAME "winbond-840"
48 #define DRV_VERSION "1.01-d"
49 #define DRV_RELDATE "Nov-17-2001"
52 /* Automatically extracted configuration info:
53 probe-func: winbond840_probe
54 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
56 c-help-name: Winbond W89c840 PCI Ethernet support
57 c-help-symbol: CONFIG_WINBOND_840
58 c-help: This driver is for the Winbond W89c840 chip. It also works with
59 c-help: the TX9882 chip on the Compex RL100-ATX board.
60 c-help: More specific information and updates are available from
61 c-help: http://www.scyld.com/network/drivers.html
64 /* The user-configurable values.
65 These may be modified when a driver module is loaded.*/
67 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
68 static int max_interrupt_work = 20;
69 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
70 The '840 uses a 64 element hash table based on the Ethernet CRC. */
71 static int multicast_filter_limit = 32;
73 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
74 Setting to > 1518 effectively disables this feature. */
75 static int rx_copybreak;
77 /* Used to pass the media type, etc.
78 Both 'options[]' and 'full_duplex[]' should exist for driver
80 The media type is usually passed in 'options[]'.
82 #define MAX_UNITS 8 /* More are supported, limit only on options */
83 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 /* Operational parameters that are set at compile time. */
88 /* Keep the ring sizes a power of two for compile efficiency.
89 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
90 Making the Tx ring too large decreases the effectiveness of channel
91 bonding and packet priority.
92 There are no ill effects from too-large receive rings. */
93 #define TX_RING_SIZE 16
94 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
95 #define TX_QUEUE_LEN_RESTART 5
96 #define RX_RING_SIZE 32
98 #define TX_BUFLIMIT (1024-128)
100 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101 To avoid overflowing we don't queue again until we have room for a
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
112 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
114 /* Include files, designed to support most kernel versions 2.0.0 and later. */
115 #include <linux/module.h>
116 #include <linux/kernel.h>
117 #include <linux/string.h>
118 #include <linux/timer.h>
119 #include <linux/errno.h>
120 #include <linux/ioport.h>
121 #include <linux/slab.h>
122 #include <linux/interrupt.h>
123 #include <linux/pci.h>
124 #include <linux/netdevice.h>
125 #include <linux/etherdevice.h>
126 #include <linux/skbuff.h>
127 #include <linux/init.h>
128 #include <linux/delay.h>
129 #include <linux/ethtool.h>
130 #include <linux/mii.h>
131 #include <linux/rtnetlink.h>
132 #include <linux/crc32.h>
133 #include <asm/uaccess.h>
134 #include <asm/processor.h> /* Processor type for cache alignment. */
135 #include <asm/bitops.h>
139 /* These identify the driver base version and may not be removed. */
140 static char version[] __devinitdata =
141 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
142 KERN_INFO " http://www.scyld.com/network/drivers.html\n";
144 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
145 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
146 MODULE_LICENSE("GPL");
147 MODULE_VERSION(DRV_VERSION);
149 MODULE_PARM(max_interrupt_work, "i");
150 MODULE_PARM(debug, "i");
151 MODULE_PARM(rx_copybreak, "i");
152 MODULE_PARM(multicast_filter_limit, "i");
153 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
154 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
155 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
156 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
157 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
158 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
159 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
160 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
165 I. Board Compatibility
167 This driver is for the Winbond w89c840 chip.
169 II. Board-specific settings
173 III. Driver operation
175 This chip is very similar to the Digital 21*4* "Tulip" family. The first
176 twelve registers and the descriptor format are nearly identical. Read a
177 Tulip manual for operational details.
179 A significant difference is that the multicast filter and station address are
180 stored in registers rather than loaded through a pseudo-transmit packet.
182 Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
183 full-sized packet we must use both data buffers in a descriptor. Thus the
184 driver uses ring mode where descriptors are implicitly sequential in memory,
185 rather than using the second descriptor address as a chain pointer to
186 subsequent descriptors.
190 If you are going to almost clone a Tulip, why not go all the way and avoid
191 the need for a new driver?
195 http://www.scyld.com/expert/100mbps.html
196 http://www.scyld.com/expert/NWay.html
197 http://www.winbond.com.tw/
201 A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
202 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
203 silent data corruption.
205 Test with 'ping -s 10000' on a fast computer.
214 enum pci_id_flags_bits {
215 /* Set PCI command register bits before calling probe1(). */
216 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
217 /* Read and map the single following PCI BAR. */
218 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
219 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
221 enum chip_capability_flags {
222 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
224 #define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
226 #define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
229 static struct pci_device_id w840_pci_tbl[] = {
230 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
231 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
232 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
235 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
240 int pci, pci_mask, subsystem, subsystem_mask;
241 int revision, revision_mask; /* Only 8 bits. */
243 enum pci_id_flags_bits pci_flags;
244 int io_size; /* Needed for I/O region check or ioremap(). */
245 int drv_flags; /* Driver use, intended as capability flags. */
247 static struct pci_id_info pci_id_tbl[] = {
248 {"Winbond W89c840", /* Sometime a Level-One switch card. */
249 { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
250 W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
251 {"Winbond W89c840", { 0x08401050, 0xffffffff, },
252 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
253 {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
254 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
255 {NULL,}, /* 0 terminated list. */
258 /* This driver was written to use PCI memory space, however some x86 systems
259 work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
260 accesses instead of memory space. */
277 /* Offsets to the Command and Status Registers, "CSRs".
278 While similar to the Tulip, these registers are longword aligned.
279 Note: It's not useful to define symbolic names for every register bit in
280 the device. The name can only partially document the semantics and make
281 the driver longer and more difficult to read.
284 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
285 RxRingPtr=0x0C, TxRingPtr=0x10,
286 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
287 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
288 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
289 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
290 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
293 /* Bits in the interrupt status/enable registers. */
294 /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
295 enum intr_status_bits {
296 NormalIntr=0x10000, AbnormalIntr=0x8000,
297 IntrPCIErr=0x2000, TimerInt=0x800,
298 IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
299 TxFIFOUnderflow=0x20, RxErrIntr=0x10,
300 TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
303 /* Bits in the NetworkConfig register. */
305 AcceptErr=0x80, AcceptRunt=0x40,
306 AcceptBroadcast=0x20, AcceptMulticast=0x10,
307 AcceptAllPhys=0x08, AcceptMyPhys=0x02,
311 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
312 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
315 /* The Tulip Rx and Tx buffer descriptors. */
316 struct w840_rx_desc {
323 struct w840_tx_desc {
326 u32 buffer1, buffer2;
329 /* Bits in network_desc.status */
330 enum desc_status_bits {
331 DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
332 DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
336 #define MII_CNT 1 /* winbond only supports one MII */
337 struct netdev_private {
338 struct w840_rx_desc *rx_ring;
339 dma_addr_t rx_addr[RX_RING_SIZE];
340 struct w840_tx_desc *tx_ring;
341 dma_addr_t tx_addr[TX_RING_SIZE];
342 dma_addr_t ring_dma_addr;
343 /* The addresses of receive-in-place skbuffs. */
344 struct sk_buff* rx_skbuff[RX_RING_SIZE];
345 /* The saved address of a sent-in-place packet/buffer, for later free(). */
346 struct sk_buff* tx_skbuff[TX_RING_SIZE];
347 struct net_device_stats stats;
348 struct timer_list timer; /* Media monitoring timer. */
349 /* Frequently used values: keep some adjacent for cache effect. */
351 int chip_id, drv_flags;
352 struct pci_dev *pci_dev;
354 struct w840_rx_desc *rx_head_desc;
355 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
356 unsigned int rx_buf_sz; /* Based on MTU+slack. */
357 unsigned int cur_tx, dirty_tx;
358 unsigned int tx_q_bytes;
359 unsigned int tx_full; /* The Tx queue is full. */
360 /* MII transceiver section. */
361 int mii_cnt; /* MII device addresses. */
362 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
364 struct mii_if_info mii_if;
367 static int eeprom_read(long ioaddr, int location);
368 static int mdio_read(struct net_device *dev, int phy_id, int location);
369 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
370 static int netdev_open(struct net_device *dev);
371 static int update_link(struct net_device *dev);
372 static void netdev_timer(unsigned long data);
373 static void init_rxtx_rings(struct net_device *dev);
374 static void free_rxtx_rings(struct netdev_private *np);
375 static void init_registers(struct net_device *dev);
376 static void tx_timeout(struct net_device *dev);
377 static int alloc_ringdesc(struct net_device *dev);
378 static void free_ringdesc(struct netdev_private *np);
379 static int start_tx(struct sk_buff *skb, struct net_device *dev);
380 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
381 static void netdev_error(struct net_device *dev, int intr_status);
382 static int netdev_rx(struct net_device *dev);
383 static u32 __set_rx_mode(struct net_device *dev);
384 static void set_rx_mode(struct net_device *dev);
385 static struct net_device_stats *get_stats(struct net_device *dev);
386 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
387 static struct ethtool_ops netdev_ethtool_ops;
388 static int netdev_close(struct net_device *dev);
392 static int __devinit w840_probe1 (struct pci_dev *pdev,
393 const struct pci_device_id *ent)
395 struct net_device *dev;
396 struct netdev_private *np;
398 int chip_idx = ent->driver_data;
400 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
403 i = pci_enable_device(pdev);
406 pci_set_master(pdev);
410 if (pci_set_dma_mask(pdev,0xFFFFffff)) {
411 printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
415 dev = alloc_etherdev(sizeof(*np));
418 SET_MODULE_OWNER(dev);
419 SET_NETDEV_DEV(dev, &pdev->dev);
421 if (pci_request_regions(pdev, DRV_NAME))
425 ioaddr = pci_resource_start(pdev, 0);
427 ioaddr = pci_resource_start(pdev, 1);
428 ioaddr = (long) ioremap (ioaddr, pci_id_tbl[chip_idx].io_size);
430 goto err_out_free_res;
433 for (i = 0; i < 3; i++)
434 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
436 /* Reset the chip to erase previous misconfiguration.
437 No hold time required! */
438 writel(0x00000001, ioaddr + PCIBusCfg);
440 dev->base_addr = ioaddr;
445 np->chip_id = chip_idx;
446 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
447 spin_lock_init(&np->lock);
448 np->mii_if.dev = dev;
449 np->mii_if.mdio_read = mdio_read;
450 np->mii_if.mdio_write = mdio_write;
452 pci_set_drvdata(pdev, dev);
455 option = dev->mem_start;
457 /* The lower four bits are the media type. */
460 np->mii_if.full_duplex = 1;
462 printk(KERN_INFO "%s: ignoring user supplied media type %d",
463 dev->name, option & 15);
465 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
466 np->mii_if.full_duplex = 1;
468 if (np->mii_if.full_duplex)
469 np->mii_if.force_media = 1;
471 /* The chip-specific entries in the device structure. */
472 dev->open = &netdev_open;
473 dev->hard_start_xmit = &start_tx;
474 dev->stop = &netdev_close;
475 dev->get_stats = &get_stats;
476 dev->set_multicast_list = &set_rx_mode;
477 dev->do_ioctl = &netdev_ioctl;
478 dev->ethtool_ops = &netdev_ethtool_ops;
479 dev->tx_timeout = &tx_timeout;
480 dev->watchdog_timeo = TX_TIMEOUT;
482 i = register_netdev(dev);
484 goto err_out_cleardev;
486 printk(KERN_INFO "%s: %s at 0x%lx, ",
487 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
488 for (i = 0; i < 5; i++)
489 printk("%2.2x:", dev->dev_addr[i]);
490 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
492 if (np->drv_flags & CanHaveMII) {
493 int phy, phy_idx = 0;
494 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
495 int mii_status = mdio_read(dev, phy, MII_BMSR);
496 if (mii_status != 0xffff && mii_status != 0x0000) {
497 np->phys[phy_idx++] = phy;
498 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
499 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
500 mdio_read(dev, phy, MII_PHYSID2);
501 printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
502 "0x%4.4x advertising %4.4x.\n",
503 dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
506 np->mii_cnt = phy_idx;
507 np->mii_if.phy_id = np->phys[0];
509 printk(KERN_WARNING "%s: MII PHY not found -- this device may "
510 "not operate correctly.\n", dev->name);
518 pci_set_drvdata(pdev, NULL);
520 iounmap((void *)ioaddr);
523 pci_release_regions(pdev);
530 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
531 often serial bit streams generated by the host processor.
532 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
534 /* Delay between EEPROM clock transitions.
535 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
536 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
537 made udelay() unreliable.
538 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
541 #define eeprom_delay(ee_addr) readl(ee_addr)
543 enum EEPROM_Ctrl_Bits {
544 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
545 EE_ChipSelect=0x801, EE_DataIn=0x08,
548 /* The EEPROM commands include the alway-set leading bit. */
550 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
553 static int eeprom_read(long addr, int location)
557 long ee_addr = addr + EECtrl;
558 int read_cmd = location | EE_ReadCmd;
559 writel(EE_ChipSelect, ee_addr);
561 /* Shift the read command bits out. */
562 for (i = 10; i >= 0; i--) {
563 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
564 writel(dataval, ee_addr);
565 eeprom_delay(ee_addr);
566 writel(dataval | EE_ShiftClk, ee_addr);
567 eeprom_delay(ee_addr);
569 writel(EE_ChipSelect, ee_addr);
570 eeprom_delay(ee_addr);
572 for (i = 16; i > 0; i--) {
573 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
574 eeprom_delay(ee_addr);
575 retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
576 writel(EE_ChipSelect, ee_addr);
577 eeprom_delay(ee_addr);
580 /* Terminate the EEPROM access. */
585 /* MII transceiver control section.
586 Read and write the MII registers using software-generated serial
587 MDIO protocol. See the MII specifications or DP83840A data sheet
590 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
591 met by back-to-back 33Mhz PCI cycles. */
592 #define mdio_delay(mdio_addr) readl(mdio_addr)
594 /* Set iff a MII transceiver on any interface requires mdio preamble.
595 This only set with older transceivers, so the extra
596 code size of a per-interface flag is not worthwhile. */
597 static char mii_preamble_required = 1;
599 #define MDIO_WRITE0 (MDIO_EnbOutput)
600 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
602 /* Generate the preamble required for initial synchronization and
603 a few older transceivers. */
604 static void mdio_sync(long mdio_addr)
608 /* Establish sync by sending at least 32 logic ones. */
609 while (--bits >= 0) {
610 writel(MDIO_WRITE1, mdio_addr);
611 mdio_delay(mdio_addr);
612 writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
613 mdio_delay(mdio_addr);
617 static int mdio_read(struct net_device *dev, int phy_id, int location)
619 long mdio_addr = dev->base_addr + MIICtrl;
620 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
623 if (mii_preamble_required)
624 mdio_sync(mdio_addr);
626 /* Shift the read command bits out. */
627 for (i = 15; i >= 0; i--) {
628 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
630 writel(dataval, mdio_addr);
631 mdio_delay(mdio_addr);
632 writel(dataval | MDIO_ShiftClk, mdio_addr);
633 mdio_delay(mdio_addr);
635 /* Read the two transition, 16 data, and wire-idle bits. */
636 for (i = 20; i > 0; i--) {
637 writel(MDIO_EnbIn, mdio_addr);
638 mdio_delay(mdio_addr);
639 retval = (retval << 1) | ((readl(mdio_addr) & MDIO_DataIn) ? 1 : 0);
640 writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
641 mdio_delay(mdio_addr);
643 return (retval>>1) & 0xffff;
646 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
648 struct netdev_private *np = dev->priv;
649 long mdio_addr = dev->base_addr + MIICtrl;
650 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
653 if (location == 4 && phy_id == np->phys[0])
654 np->mii_if.advertising = value;
656 if (mii_preamble_required)
657 mdio_sync(mdio_addr);
659 /* Shift the command bits out. */
660 for (i = 31; i >= 0; i--) {
661 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
663 writel(dataval, mdio_addr);
664 mdio_delay(mdio_addr);
665 writel(dataval | MDIO_ShiftClk, mdio_addr);
666 mdio_delay(mdio_addr);
668 /* Clear out extra bits. */
669 for (i = 2; i > 0; i--) {
670 writel(MDIO_EnbIn, mdio_addr);
671 mdio_delay(mdio_addr);
672 writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
673 mdio_delay(mdio_addr);
679 static int netdev_open(struct net_device *dev)
681 struct netdev_private *np = dev->priv;
682 long ioaddr = dev->base_addr;
685 writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */
687 netif_device_detach(dev);
688 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
693 printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
694 dev->name, dev->irq);
696 if((i=alloc_ringdesc(dev)))
699 spin_lock_irq(&np->lock);
700 netif_device_attach(dev);
702 spin_unlock_irq(&np->lock);
704 netif_start_queue(dev);
706 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
708 /* Set the timer to check for link beat. */
709 init_timer(&np->timer);
710 np->timer.expires = jiffies + 1*HZ;
711 np->timer.data = (unsigned long)dev;
712 np->timer.function = &netdev_timer; /* timer handler */
713 add_timer(&np->timer);
716 netif_device_attach(dev);
720 #define MII_DAVICOM_DM9101 0x0181b800
722 static int update_link(struct net_device *dev)
724 struct netdev_private *np = dev->priv;
725 int duplex, fasteth, result, mii_reg;
728 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
730 if (mii_reg == 0xffff)
732 /* reread: the link status bit is sticky */
733 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
734 if (!(mii_reg & 0x4)) {
735 if (netif_carrier_ok(dev)) {
737 printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
738 dev->name, np->phys[0]);
739 netif_carrier_off(dev);
743 if (!netif_carrier_ok(dev)) {
745 printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
746 dev->name, np->phys[0]);
747 netif_carrier_on(dev);
750 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
751 /* If the link partner doesn't support autonegotiation
752 * the MII detects it's abilities with the "parallel detection".
753 * Some MIIs update the LPA register to the result of the parallel
754 * detection, some don't.
755 * The Davicom PHY [at least 0181b800] doesn't.
756 * Instead bit 9 and 13 of the BMCR are updated to the result
757 * of the negotiation..
759 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
760 duplex = mii_reg & BMCR_FULLDPLX;
761 fasteth = mii_reg & BMCR_SPEED100;
764 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
765 negotiated = mii_reg & np->mii_if.advertising;
767 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
768 fasteth = negotiated & 0x380;
770 duplex |= np->mii_if.force_media;
771 /* remove fastether and fullduplex */
772 result = np->csr6 & ~0x20000200;
776 result |= 0x20000000;
777 if (result != np->csr6 && debug)
778 printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
779 dev->name, fasteth ? 100 : 10,
780 duplex ? "full" : "half", np->phys[0]);
784 #define RXTX_TIMEOUT 2000
785 static inline void update_csr6(struct net_device *dev, int new)
787 struct netdev_private *np = dev->priv;
788 long ioaddr = dev->base_addr;
789 int limit = RXTX_TIMEOUT;
791 if (!netif_device_present(dev))
795 /* stop both Tx and Rx processes */
796 writel(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
797 /* wait until they have really stopped */
799 int csr5 = readl(ioaddr + IntrStatus);
802 t = (csr5 >> 17) & 0x07;
805 t = (csr5 >> 20) & 0x07;
812 printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
819 /* and restart them with the new configuration */
820 writel(np->csr6, ioaddr + NetworkConfig);
822 np->mii_if.full_duplex = 1;
825 static void netdev_timer(unsigned long data)
827 struct net_device *dev = (struct net_device *)data;
828 struct netdev_private *np = dev->priv;
829 long ioaddr = dev->base_addr;
832 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
834 dev->name, (int)readl(ioaddr + IntrStatus),
835 (int)readl(ioaddr + NetworkConfig));
836 spin_lock_irq(&np->lock);
837 update_csr6(dev, update_link(dev));
838 spin_unlock_irq(&np->lock);
839 np->timer.expires = jiffies + 10*HZ;
840 add_timer(&np->timer);
843 static void init_rxtx_rings(struct net_device *dev)
845 struct netdev_private *np = dev->priv;
848 np->rx_head_desc = &np->rx_ring[0];
849 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
851 /* Initial all Rx descriptors. */
852 for (i = 0; i < RX_RING_SIZE; i++) {
853 np->rx_ring[i].length = np->rx_buf_sz;
854 np->rx_ring[i].status = 0;
855 np->rx_skbuff[i] = NULL;
857 /* Mark the last entry as wrapping the ring. */
858 np->rx_ring[i-1].length |= DescEndRing;
860 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
861 for (i = 0; i < RX_RING_SIZE; i++) {
862 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
863 np->rx_skbuff[i] = skb;
866 skb->dev = dev; /* Mark as being used by this device. */
867 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail,
868 skb->len,PCI_DMA_FROMDEVICE);
870 np->rx_ring[i].buffer1 = np->rx_addr[i];
871 np->rx_ring[i].status = DescOwn;
875 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
877 /* Initialize the Tx descriptors */
878 for (i = 0; i < TX_RING_SIZE; i++) {
879 np->tx_skbuff[i] = NULL;
880 np->tx_ring[i].status = 0;
883 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
885 writel(np->ring_dma_addr, dev->base_addr + RxRingPtr);
886 writel(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
887 dev->base_addr + TxRingPtr);
891 static void free_rxtx_rings(struct netdev_private* np)
894 /* Free all the skbuffs in the Rx queue. */
895 for (i = 0; i < RX_RING_SIZE; i++) {
896 np->rx_ring[i].status = 0;
897 if (np->rx_skbuff[i]) {
898 pci_unmap_single(np->pci_dev,
900 np->rx_skbuff[i]->len,
902 dev_kfree_skb(np->rx_skbuff[i]);
904 np->rx_skbuff[i] = NULL;
906 for (i = 0; i < TX_RING_SIZE; i++) {
907 if (np->tx_skbuff[i]) {
908 pci_unmap_single(np->pci_dev,
910 np->tx_skbuff[i]->len,
912 dev_kfree_skb(np->tx_skbuff[i]);
914 np->tx_skbuff[i] = NULL;
918 static void init_registers(struct net_device *dev)
920 struct netdev_private *np = dev->priv;
921 long ioaddr = dev->base_addr;
924 for (i = 0; i < 6; i++)
925 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
927 /* Initialize other registers. */
929 i = (1<<20); /* Big-endian descriptors */
933 i |= (0x04<<2); /* skip length 4 u32 */
934 i |= 0x02; /* give Rx priority */
936 /* Configure the PCI bus bursts and FIFO thresholds.
937 486: Set 8 longword cache alignment, 8 longword burst.
938 586: Set 16 longword cache alignment, no burst limit.
939 Cache alignment bits 15:14 Burst length 13:8
940 0000 <not allowed> 0000 align to cache 0800 8 longwords
941 4000 8 longwords 0100 1 longword 1000 16 longwords
942 8000 16 longwords 0200 2 longwords 2000 32 longwords
943 C000 32 longwords 0400 4 longwords */
945 #if defined (__i386__) && !defined(MODULE)
946 /* When not a module we can work around broken '486 PCI boards. */
947 if (boot_cpu_data.x86 <= 4) {
949 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
950 "alignment to 8 longwords.\n", dev->name);
954 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
956 #elif defined(__sparc__)
959 #warning Processor architecture undefined
962 writel(i, ioaddr + PCIBusCfg);
965 /* 128 byte Tx threshold;
966 Transmit on; Receive on; */
967 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
969 /* Clear and Enable interrupts by setting the interrupt mask. */
970 writel(0x1A0F5, ioaddr + IntrStatus);
971 writel(0x1A0F5, ioaddr + IntrEnable);
973 writel(0, ioaddr + RxStartDemand);
976 static void tx_timeout(struct net_device *dev)
978 struct netdev_private *np = dev->priv;
979 long ioaddr = dev->base_addr;
981 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
982 " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
986 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
987 for (i = 0; i < RX_RING_SIZE; i++)
988 printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
989 printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
990 for (i = 0; i < TX_RING_SIZE; i++)
991 printk(" %8.8x", np->tx_ring[i].status);
994 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
995 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
996 printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",readl(ioaddr+0x4C));
998 disable_irq(dev->irq);
999 spin_lock_irq(&np->lock);
1001 * Under high load dirty_tx and the internal tx descriptor pointer
1002 * come out of sync, thus perform a software reset and reinitialize
1006 writel(1, dev->base_addr+PCIBusCfg);
1009 free_rxtx_rings(np);
1010 init_rxtx_rings(dev);
1011 init_registers(dev);
1012 spin_unlock_irq(&np->lock);
1013 enable_irq(dev->irq);
1015 netif_wake_queue(dev);
1016 dev->trans_start = jiffies;
1017 np->stats.tx_errors++;
1021 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1022 static int alloc_ringdesc(struct net_device *dev)
1024 struct netdev_private *np = dev->priv;
1026 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1028 np->rx_ring = pci_alloc_consistent(np->pci_dev,
1029 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1030 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1031 &np->ring_dma_addr);
1034 init_rxtx_rings(dev);
1038 static void free_ringdesc(struct netdev_private *np)
1040 pci_free_consistent(np->pci_dev,
1041 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1042 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1043 np->rx_ring, np->ring_dma_addr);
1047 static int start_tx(struct sk_buff *skb, struct net_device *dev)
1049 struct netdev_private *np = dev->priv;
1052 /* Caution: the write order is important here, set the field
1053 with the "ownership" bits last. */
1055 /* Calculate the next Tx descriptor entry. */
1056 entry = np->cur_tx % TX_RING_SIZE;
1058 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1059 skb->data,skb->len, PCI_DMA_TODEVICE);
1060 np->tx_skbuff[entry] = skb;
1062 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1063 if (skb->len < TX_BUFLIMIT) {
1064 np->tx_ring[entry].length = DescWholePkt | skb->len;
1066 int len = skb->len - TX_BUFLIMIT;
1068 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1069 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1071 if(entry == TX_RING_SIZE-1)
1072 np->tx_ring[entry].length |= DescEndRing;
1074 /* Now acquire the irq spinlock.
1075 * The difficult race is the the ordering between
1076 * increasing np->cur_tx and setting DescOwn:
1077 * - if np->cur_tx is increased first the interrupt
1078 * handler could consider the packet as transmitted
1079 * since DescOwn is cleared.
1080 * - If DescOwn is set first the NIC could report the
1081 * packet as sent, but the interrupt handler would ignore it
1082 * since the np->cur_tx was not yet increased.
1084 spin_lock_irq(&np->lock);
1087 wmb(); /* flush length, buffer1, buffer2 */
1088 np->tx_ring[entry].status = DescOwn;
1089 wmb(); /* flush status and kick the hardware */
1090 writel(0, dev->base_addr + TxStartDemand);
1091 np->tx_q_bytes += skb->len;
1092 /* Work around horrible bug in the chip by marking the queue as full
1093 when we do not have FIFO room for a maximum sized packet. */
1094 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1095 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1096 netif_stop_queue(dev);
1100 spin_unlock_irq(&np->lock);
1102 dev->trans_start = jiffies;
1105 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1106 dev->name, np->cur_tx, entry);
1111 static void netdev_tx_done(struct net_device *dev)
1113 struct netdev_private *np = dev->priv;
1114 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1115 int entry = np->dirty_tx % TX_RING_SIZE;
1116 int tx_status = np->tx_ring[entry].status;
1120 if (tx_status & 0x8000) { /* There was an error, log it. */
1121 #ifndef final_version
1123 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1124 dev->name, tx_status);
1126 np->stats.tx_errors++;
1127 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1128 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1129 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1130 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1131 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1132 np->stats.tx_heartbeat_errors++;
1134 #ifndef final_version
1136 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
1137 dev->name, entry, tx_status);
1139 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1140 np->stats.collisions += (tx_status >> 3) & 15;
1141 np->stats.tx_packets++;
1143 /* Free the original skb. */
1144 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1145 np->tx_skbuff[entry]->len,
1147 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1148 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1149 np->tx_skbuff[entry] = NULL;
1152 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1153 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1154 /* The ring is no longer full, clear tbusy. */
1157 netif_wake_queue(dev);
1161 /* The interrupt handler does all of the Rx thread work and cleans up
1162 after the Tx thread. */
1163 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1165 struct net_device *dev = (struct net_device *)dev_instance;
1166 struct netdev_private *np = dev->priv;
1167 long ioaddr = dev->base_addr;
1168 int work_limit = max_interrupt_work;
1171 if (!netif_device_present(dev))
1174 u32 intr_status = readl(ioaddr + IntrStatus);
1176 /* Acknowledge all of the current interrupt sources ASAP. */
1177 writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
1180 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1181 dev->name, intr_status);
1183 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1188 if (intr_status & (IntrRxDone | RxNoBuf))
1190 if (intr_status & RxNoBuf)
1191 writel(0, ioaddr + RxStartDemand);
1193 if (intr_status & (TxIdle | IntrTxDone) &&
1194 np->cur_tx != np->dirty_tx) {
1195 spin_lock(&np->lock);
1196 netdev_tx_done(dev);
1197 spin_unlock(&np->lock);
1200 /* Abnormal error summary/uncommon events handlers. */
1201 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
1202 TimerInt | IntrTxStopped))
1203 netdev_error(dev, intr_status);
1205 if (--work_limit < 0) {
1206 printk(KERN_WARNING "%s: Too much work at interrupt, "
1207 "status=0x%4.4x.\n", dev->name, intr_status);
1208 /* Set the timer to re-enable the other interrupts after
1210 spin_lock(&np->lock);
1211 if (netif_device_present(dev)) {
1212 writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1213 writel(10, ioaddr + GPTimer);
1215 spin_unlock(&np->lock);
1221 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1222 dev->name, (int)readl(ioaddr + IntrStatus));
1223 return IRQ_RETVAL(handled);
1226 /* This routine is logically part of the interrupt handler, but separated
1227 for clarity and better register allocation. */
1228 static int netdev_rx(struct net_device *dev)
1230 struct netdev_private *np = dev->priv;
1231 int entry = np->cur_rx % RX_RING_SIZE;
1232 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1235 printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
1236 entry, np->rx_ring[entry].status);
1239 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1240 while (--work_limit >= 0) {
1241 struct w840_rx_desc *desc = np->rx_head_desc;
1242 s32 status = desc->status;
1245 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1249 if ((status & 0x38008300) != 0x0300) {
1250 if ((status & 0x38000300) != 0x0300) {
1251 /* Ingore earlier buffers. */
1252 if ((status & 0xffff) != 0x7fff) {
1253 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1254 "multiple buffers, entry %#x status %4.4x!\n",
1255 dev->name, np->cur_rx, status);
1256 np->stats.rx_length_errors++;
1258 } else if (status & 0x8000) {
1259 /* There was a fatal error. */
1261 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1263 np->stats.rx_errors++; /* end of a packet.*/
1264 if (status & 0x0890) np->stats.rx_length_errors++;
1265 if (status & 0x004C) np->stats.rx_frame_errors++;
1266 if (status & 0x0002) np->stats.rx_crc_errors++;
1269 struct sk_buff *skb;
1270 /* Omit the four octet CRC from the length. */
1271 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1273 #ifndef final_version
1275 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1276 " status %x.\n", pkt_len, status);
1278 /* Check if the packet is long enough to accept without copying
1279 to a minimally-sized skbuff. */
1280 if (pkt_len < rx_copybreak
1281 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1283 skb_reserve(skb, 2); /* 16 byte align the IP header */
1284 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1285 np->rx_skbuff[entry]->len,
1286 PCI_DMA_FROMDEVICE);
1287 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1288 skb_put(skb, pkt_len);
1289 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1290 np->rx_skbuff[entry]->len,
1291 PCI_DMA_FROMDEVICE);
1293 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1294 np->rx_skbuff[entry]->len,
1295 PCI_DMA_FROMDEVICE);
1296 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1297 np->rx_skbuff[entry] = NULL;
1299 #ifndef final_version /* Remove after testing. */
1300 /* You will want this info for the initial debug. */
1302 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1303 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
1305 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1306 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1307 skb->data[8], skb->data[9], skb->data[10],
1308 skb->data[11], skb->data[12], skb->data[13],
1309 skb->data[14], skb->data[15], skb->data[16],
1312 skb->protocol = eth_type_trans(skb, dev);
1314 dev->last_rx = jiffies;
1315 np->stats.rx_packets++;
1316 np->stats.rx_bytes += pkt_len;
1318 entry = (++np->cur_rx) % RX_RING_SIZE;
1319 np->rx_head_desc = &np->rx_ring[entry];
1322 /* Refill the Rx ring buffers. */
1323 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1324 struct sk_buff *skb;
1325 entry = np->dirty_rx % RX_RING_SIZE;
1326 if (np->rx_skbuff[entry] == NULL) {
1327 skb = dev_alloc_skb(np->rx_buf_sz);
1328 np->rx_skbuff[entry] = skb;
1330 break; /* Better luck next round. */
1331 skb->dev = dev; /* Mark as being used by this device. */
1332 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1334 skb->len, PCI_DMA_FROMDEVICE);
1335 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1338 np->rx_ring[entry].status = DescOwn;
1344 static void netdev_error(struct net_device *dev, int intr_status)
1346 long ioaddr = dev->base_addr;
1347 struct netdev_private *np = dev->priv;
1350 printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
1351 dev->name, intr_status);
1352 if (intr_status == 0xffffffff)
1354 spin_lock(&np->lock);
1355 if (intr_status & TxFIFOUnderflow) {
1357 /* Bump up the Tx threshold */
1359 /* This causes lots of dropped packets,
1360 * and under high load even tx_timeouts
1362 new = np->csr6 + 0x4000;
1364 new = (np->csr6 >> 14)&0x7f;
1368 new = 127; /* load full packet before starting */
1369 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1371 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
1373 update_csr6(dev, new);
1375 if (intr_status & IntrRxDied) { /* Missed a Rx frame. */
1376 np->stats.rx_errors++;
1378 if (intr_status & TimerInt) {
1379 /* Re-enable other interrupts. */
1380 if (netif_device_present(dev))
1381 writel(0x1A0F5, ioaddr + IntrEnable);
1383 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1384 writel(0, ioaddr + RxStartDemand);
1385 spin_unlock(&np->lock);
1388 static struct net_device_stats *get_stats(struct net_device *dev)
1390 long ioaddr = dev->base_addr;
1391 struct netdev_private *np = dev->priv;
1393 /* The chip only need report frame silently dropped. */
1394 spin_lock_irq(&np->lock);
1395 if (netif_running(dev) && netif_device_present(dev))
1396 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1397 spin_unlock_irq(&np->lock);
1403 static u32 __set_rx_mode(struct net_device *dev)
1405 long ioaddr = dev->base_addr;
1406 u32 mc_filter[2]; /* Multicast hash filter */
1409 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1410 /* Unconditionally log net taps. */
1411 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1412 memset(mc_filter, 0xff, sizeof(mc_filter));
1413 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
1415 } else if ((dev->mc_count > multicast_filter_limit)
1416 || (dev->flags & IFF_ALLMULTI)) {
1417 /* Too many to match, or accept all multicasts. */
1418 memset(mc_filter, 0xff, sizeof(mc_filter));
1419 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1421 struct dev_mc_list *mclist;
1423 memset(mc_filter, 0, sizeof(mc_filter));
1424 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1425 i++, mclist = mclist->next) {
1426 int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
1428 mc_filter[filterbit >> 5] |= cpu_to_le32(1 << (filterbit & 31));
1430 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1432 writel(mc_filter[0], ioaddr + MulticastFilter0);
1433 writel(mc_filter[1], ioaddr + MulticastFilter1);
1437 static void set_rx_mode(struct net_device *dev)
1439 struct netdev_private *np = dev->priv;
1440 u32 rx_mode = __set_rx_mode(dev);
1441 spin_lock_irq(&np->lock);
1442 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1443 spin_unlock_irq(&np->lock);
1446 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1448 struct netdev_private *np = dev->priv;
1450 strcpy (info->driver, DRV_NAME);
1451 strcpy (info->version, DRV_VERSION);
1452 strcpy (info->bus_info, pci_name(np->pci_dev));
1455 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1457 struct netdev_private *np = dev->priv;
1460 spin_lock_irq(&np->lock);
1461 rc = mii_ethtool_gset(&np->mii_if, cmd);
1462 spin_unlock_irq(&np->lock);
1467 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1469 struct netdev_private *np = dev->priv;
1472 spin_lock_irq(&np->lock);
1473 rc = mii_ethtool_sset(&np->mii_if, cmd);
1474 spin_unlock_irq(&np->lock);
1479 static int netdev_nway_reset(struct net_device *dev)
1481 struct netdev_private *np = dev->priv;
1482 return mii_nway_restart(&np->mii_if);
1485 static u32 netdev_get_link(struct net_device *dev)
1487 struct netdev_private *np = dev->priv;
1488 return mii_link_ok(&np->mii_if);
1491 static u32 netdev_get_msglevel(struct net_device *dev)
1496 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1501 static struct ethtool_ops netdev_ethtool_ops = {
1502 .get_drvinfo = netdev_get_drvinfo,
1503 .get_settings = netdev_get_settings,
1504 .set_settings = netdev_set_settings,
1505 .nway_reset = netdev_nway_reset,
1506 .get_link = netdev_get_link,
1507 .get_msglevel = netdev_get_msglevel,
1508 .set_msglevel = netdev_set_msglevel,
1509 .get_sg = ethtool_op_get_sg,
1510 .get_tx_csum = ethtool_op_get_tx_csum,
1513 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1515 struct mii_ioctl_data *data = if_mii(rq);
1516 struct netdev_private *np = netdev_priv(dev);
1519 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1520 data->phy_id = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
1523 case SIOCGMIIREG: /* Read MII PHY register. */
1524 spin_lock_irq(&np->lock);
1525 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1526 spin_unlock_irq(&np->lock);
1529 case SIOCSMIIREG: /* Write MII PHY register. */
1530 if (!capable(CAP_NET_ADMIN))
1532 spin_lock_irq(&np->lock);
1533 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1534 spin_unlock_irq(&np->lock);
1541 static int netdev_close(struct net_device *dev)
1543 long ioaddr = dev->base_addr;
1544 struct netdev_private *np = dev->priv;
1546 netif_stop_queue(dev);
1549 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
1550 "Config %8.8x.\n", dev->name, (int)readl(ioaddr + IntrStatus),
1551 (int)readl(ioaddr + NetworkConfig));
1552 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1553 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1556 /* Stop the chip's Tx and Rx processes. */
1557 spin_lock_irq(&np->lock);
1558 netif_device_detach(dev);
1559 update_csr6(dev, 0);
1560 writel(0x0000, ioaddr + IntrEnable);
1561 spin_unlock_irq(&np->lock);
1563 free_irq(dev->irq, dev);
1565 netif_device_attach(dev);
1567 if (readl(ioaddr + NetworkConfig) != 0xffffffff)
1568 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1574 printk(KERN_DEBUG" Tx ring at %8.8x:\n",
1576 for (i = 0; i < TX_RING_SIZE; i++)
1577 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
1578 i, np->tx_ring[i].length,
1579 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1580 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1582 for (i = 0; i < RX_RING_SIZE; i++) {
1583 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1584 i, np->rx_ring[i].length,
1585 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1588 #endif /* __i386__ debugging only */
1590 del_timer_sync(&np->timer);
1592 free_rxtx_rings(np);
1598 static void __devexit w840_remove1 (struct pci_dev *pdev)
1600 struct net_device *dev = pci_get_drvdata(pdev);
1603 unregister_netdev(dev);
1604 pci_release_regions(pdev);
1606 iounmap((char *)(dev->base_addr));
1611 pci_set_drvdata(pdev, NULL);
1617 * suspend/resume synchronization:
1618 * - open, close, do_ioctl:
1619 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1621 * spin_lock_irq(np->lock), doesn't touch hw if not present
1622 * - hard_start_xmit:
1623 * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
1625 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1626 * - set_multicast_list
1627 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1628 * - interrupt handler
1629 * doesn't touch hw if not present, synchronize_irq waits for
1630 * running instances of the interrupt handler.
1632 * Disabling hw requires clearing csr6 & IntrEnable.
1633 * update_csr6 & all function that write IntrEnable check netif_device_present
1634 * before settings any bits.
1636 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1637 * device would cause an irq storm.
1639 static int w840_suspend (struct pci_dev *pdev, u32 state)
1641 struct net_device *dev = pci_get_drvdata (pdev);
1642 struct netdev_private *np = dev->priv;
1643 long ioaddr = dev->base_addr;
1646 if (netif_running (dev)) {
1647 del_timer_sync(&np->timer);
1649 spin_lock_irq(&np->lock);
1650 netif_device_detach(dev);
1651 update_csr6(dev, 0);
1652 writel(0, ioaddr + IntrEnable);
1653 netif_stop_queue(dev);
1654 spin_unlock_irq(&np->lock);
1656 spin_unlock_wait(&dev->xmit_lock);
1657 synchronize_irq(dev->irq);
1659 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1661 /* no more hardware accesses behind this line. */
1663 if (np->csr6) BUG();
1664 if (readl(ioaddr + IntrEnable)) BUG();
1666 /* pci_power_off(pdev, -1); */
1668 free_rxtx_rings(np);
1670 netif_device_detach(dev);
1676 static int w840_resume (struct pci_dev *pdev)
1678 struct net_device *dev = pci_get_drvdata (pdev);
1679 struct netdev_private *np = dev->priv;
1682 if (netif_device_present(dev))
1683 goto out; /* device not suspended */
1684 if (netif_running(dev)) {
1685 pci_enable_device(pdev);
1686 /* pci_power_on(pdev); */
1688 spin_lock_irq(&np->lock);
1689 writel(1, dev->base_addr+PCIBusCfg);
1690 readl(dev->base_addr+PCIBusCfg);
1692 netif_device_attach(dev);
1693 init_rxtx_rings(dev);
1694 init_registers(dev);
1695 spin_unlock_irq(&np->lock);
1697 netif_wake_queue(dev);
1699 mod_timer(&np->timer, jiffies + 1*HZ);
1701 netif_device_attach(dev);
1709 static struct pci_driver w840_driver = {
1711 .id_table = w840_pci_tbl,
1712 .probe = w840_probe1,
1713 .remove = __devexit_p(w840_remove1),
1715 .suspend = w840_suspend,
1716 .resume = w840_resume,
1720 static int __init w840_init(void)
1723 return pci_module_init(&w840_driver);
1726 static void __exit w840_exit(void)
1728 pci_unregister_driver(&w840_driver);
1731 module_init(w840_init);
1732 module_exit(w840_exit);