1 /* winbond-840.c: A Linux PCI network adapter device driver. */
3 Written 1998-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
43 * enable pci_power_off
47 #define DRV_NAME "winbond-840"
48 #define DRV_VERSION "1.01-d"
49 #define DRV_RELDATE "Nov-17-2001"
52 /* Automatically extracted configuration info:
53 probe-func: winbond840_probe
54 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
56 c-help-name: Winbond W89c840 PCI Ethernet support
57 c-help-symbol: CONFIG_WINBOND_840
58 c-help: This driver is for the Winbond W89c840 chip. It also works with
59 c-help: the TX9882 chip on the Compex RL100-ATX board.
60 c-help: More specific information and updates are available from
61 c-help: http://www.scyld.com/network/drivers.html
64 /* The user-configurable values.
65 These may be modified when a driver module is loaded.*/
67 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
68 static int max_interrupt_work = 20;
69 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
70 The '840 uses a 64 element hash table based on the Ethernet CRC. */
71 static int multicast_filter_limit = 32;
73 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
74 Setting to > 1518 effectively disables this feature. */
75 static int rx_copybreak;
77 /* Used to pass the media type, etc.
78 Both 'options[]' and 'full_duplex[]' should exist for driver
80 The media type is usually passed in 'options[]'.
82 #define MAX_UNITS 8 /* More are supported, limit only on options */
83 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 /* Operational parameters that are set at compile time. */
88 /* Keep the ring sizes a power of two for compile efficiency.
89 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
90 Making the Tx ring too large decreases the effectiveness of channel
91 bonding and packet priority.
92 There are no ill effects from too-large receive rings. */
93 #define TX_RING_SIZE 16
94 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
95 #define TX_QUEUE_LEN_RESTART 5
96 #define RX_RING_SIZE 32
98 #define TX_BUFLIMIT (1024-128)
100 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101 To avoid overflowing we don't queue again until we have room for a
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
112 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
117 #if !defined(__OPTIMIZE__)
118 #warning You must compile this file with the correct options!
119 #warning See the last lines of the source file.
120 #error You must compile this driver with "-O".
123 /* Include files, designed to support most kernel versions 2.0.0 and later. */
124 #include <linux/module.h>
125 #include <linux/kernel.h>
126 #include <linux/string.h>
127 #include <linux/timer.h>
128 #include <linux/errno.h>
129 #include <linux/ioport.h>
130 #include <linux/slab.h>
131 #include <linux/interrupt.h>
132 #include <linux/pci.h>
133 #include <linux/netdevice.h>
134 #include <linux/etherdevice.h>
135 #include <linux/skbuff.h>
136 #include <linux/init.h>
137 #include <linux/delay.h>
138 #include <linux/ethtool.h>
139 #include <linux/mii.h>
140 #include <linux/rtnetlink.h>
141 #include <linux/crc32.h>
142 #include <asm/uaccess.h>
143 #include <asm/processor.h> /* Processor type for cache alignment. */
144 #include <asm/bitops.h>
148 /* These identify the driver base version and may not be removed. */
149 static char version[] __devinitdata =
150 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
151 KERN_INFO " http://www.scyld.com/network/drivers.html\n";
153 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
154 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
155 MODULE_LICENSE("GPL");
157 MODULE_PARM(max_interrupt_work, "i");
158 MODULE_PARM(debug, "i");
159 MODULE_PARM(rx_copybreak, "i");
160 MODULE_PARM(multicast_filter_limit, "i");
161 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
162 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
163 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
164 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
165 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
166 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
167 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
168 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
173 I. Board Compatibility
175 This driver is for the Winbond w89c840 chip.
177 II. Board-specific settings
181 III. Driver operation
183 This chip is very similar to the Digital 21*4* "Tulip" family. The first
184 twelve registers and the descriptor format are nearly identical. Read a
185 Tulip manual for operational details.
187 A significant difference is that the multicast filter and station address are
188 stored in registers rather than loaded through a pseudo-transmit packet.
190 Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
191 full-sized packet we must use both data buffers in a descriptor. Thus the
192 driver uses ring mode where descriptors are implicitly sequential in memory,
193 rather than using the second descriptor address as a chain pointer to
194 subsequent descriptors.
198 If you are going to almost clone a Tulip, why not go all the way and avoid
199 the need for a new driver?
203 http://www.scyld.com/expert/100mbps.html
204 http://www.scyld.com/expert/NWay.html
205 http://www.winbond.com.tw/
209 A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
210 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
211 silent data corruption.
213 Test with 'ping -s 10000' on a fast computer.
222 enum pci_id_flags_bits {
223 /* Set PCI command register bits before calling probe1(). */
224 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
225 /* Read and map the single following PCI BAR. */
226 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
227 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
229 enum chip_capability_flags {
230 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
232 #define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
234 #define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
237 static struct pci_device_id w840_pci_tbl[] = {
238 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
239 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
240 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
243 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
248 int pci, pci_mask, subsystem, subsystem_mask;
249 int revision, revision_mask; /* Only 8 bits. */
251 enum pci_id_flags_bits pci_flags;
252 int io_size; /* Needed for I/O region check or ioremap(). */
253 int drv_flags; /* Driver use, intended as capability flags. */
255 static struct pci_id_info pci_id_tbl[] = {
256 {"Winbond W89c840", /* Sometime a Level-One switch card. */
257 { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
258 W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
259 {"Winbond W89c840", { 0x08401050, 0xffffffff, },
260 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
261 {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
262 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
263 {0,}, /* 0 terminated list. */
266 /* This driver was written to use PCI memory space, however some x86 systems
267 work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
268 accesses instead of memory space. */
285 /* Offsets to the Command and Status Registers, "CSRs".
286 While similar to the Tulip, these registers are longword aligned.
287 Note: It's not useful to define symbolic names for every register bit in
288 the device. The name can only partially document the semantics and make
289 the driver longer and more difficult to read.
292 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
293 RxRingPtr=0x0C, TxRingPtr=0x10,
294 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
295 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
296 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
297 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
298 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
301 /* Bits in the interrupt status/enable registers. */
302 /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
303 enum intr_status_bits {
304 NormalIntr=0x10000, AbnormalIntr=0x8000,
305 IntrPCIErr=0x2000, TimerInt=0x800,
306 IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
307 TxFIFOUnderflow=0x20, RxErrIntr=0x10,
308 TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
311 /* Bits in the NetworkConfig register. */
313 AcceptErr=0x80, AcceptRunt=0x40,
314 AcceptBroadcast=0x20, AcceptMulticast=0x10,
315 AcceptAllPhys=0x08, AcceptMyPhys=0x02,
319 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
320 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
323 /* The Tulip Rx and Tx buffer descriptors. */
324 struct w840_rx_desc {
331 struct w840_tx_desc {
334 u32 buffer1, buffer2;
337 /* Bits in network_desc.status */
338 enum desc_status_bits {
339 DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
340 DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
344 #define MII_CNT 1 /* winbond only supports one MII */
345 struct netdev_private {
346 struct w840_rx_desc *rx_ring;
347 dma_addr_t rx_addr[RX_RING_SIZE];
348 struct w840_tx_desc *tx_ring;
349 dma_addr_t tx_addr[TX_RING_SIZE];
350 dma_addr_t ring_dma_addr;
351 /* The addresses of receive-in-place skbuffs. */
352 struct sk_buff* rx_skbuff[RX_RING_SIZE];
353 /* The saved address of a sent-in-place packet/buffer, for later free(). */
354 struct sk_buff* tx_skbuff[TX_RING_SIZE];
355 struct net_device_stats stats;
356 struct timer_list timer; /* Media monitoring timer. */
357 /* Frequently used values: keep some adjacent for cache effect. */
359 int chip_id, drv_flags;
360 struct pci_dev *pci_dev;
362 struct w840_rx_desc *rx_head_desc;
363 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
364 unsigned int rx_buf_sz; /* Based on MTU+slack. */
365 unsigned int cur_tx, dirty_tx;
366 unsigned int tx_q_bytes;
367 unsigned int tx_full; /* The Tx queue is full. */
368 /* MII transceiver section. */
369 int mii_cnt; /* MII device addresses. */
370 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
372 struct mii_if_info mii_if;
375 static int eeprom_read(long ioaddr, int location);
376 static int mdio_read(struct net_device *dev, int phy_id, int location);
377 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
378 static int netdev_open(struct net_device *dev);
379 static int update_link(struct net_device *dev);
380 static void netdev_timer(unsigned long data);
381 static void init_rxtx_rings(struct net_device *dev);
382 static void free_rxtx_rings(struct netdev_private *np);
383 static void init_registers(struct net_device *dev);
384 static void tx_timeout(struct net_device *dev);
385 static int alloc_ringdesc(struct net_device *dev);
386 static void free_ringdesc(struct netdev_private *np);
387 static int start_tx(struct sk_buff *skb, struct net_device *dev);
388 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
389 static void netdev_error(struct net_device *dev, int intr_status);
390 static int netdev_rx(struct net_device *dev);
391 static u32 __set_rx_mode(struct net_device *dev);
392 static void set_rx_mode(struct net_device *dev);
393 static struct net_device_stats *get_stats(struct net_device *dev);
394 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
395 static struct ethtool_ops netdev_ethtool_ops;
396 static int netdev_close(struct net_device *dev);
400 static int __devinit w840_probe1 (struct pci_dev *pdev,
401 const struct pci_device_id *ent)
403 struct net_device *dev;
404 struct netdev_private *np;
406 int chip_idx = ent->driver_data;
408 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
411 i = pci_enable_device(pdev);
414 pci_set_master(pdev);
418 if (pci_set_dma_mask(pdev,0xFFFFffff)) {
419 printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
423 dev = alloc_etherdev(sizeof(*np));
426 SET_MODULE_OWNER(dev);
427 SET_NETDEV_DEV(dev, &pdev->dev);
429 if (pci_request_regions(pdev, DRV_NAME))
433 ioaddr = pci_resource_start(pdev, 0);
435 ioaddr = pci_resource_start(pdev, 1);
436 ioaddr = (long) ioremap (ioaddr, pci_id_tbl[chip_idx].io_size);
438 goto err_out_free_res;
441 for (i = 0; i < 3; i++)
442 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
444 /* Reset the chip to erase previous misconfiguration.
445 No hold time required! */
446 writel(0x00000001, ioaddr + PCIBusCfg);
448 dev->base_addr = ioaddr;
453 np->chip_id = chip_idx;
454 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
455 spin_lock_init(&np->lock);
456 np->mii_if.dev = dev;
457 np->mii_if.mdio_read = mdio_read;
458 np->mii_if.mdio_write = mdio_write;
460 pci_set_drvdata(pdev, dev);
463 option = dev->mem_start;
465 /* The lower four bits are the media type. */
468 np->mii_if.full_duplex = 1;
470 printk(KERN_INFO "%s: ignoring user supplied media type %d",
471 dev->name, option & 15);
473 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
474 np->mii_if.full_duplex = 1;
476 if (np->mii_if.full_duplex)
477 np->mii_if.force_media = 1;
479 /* The chip-specific entries in the device structure. */
480 dev->open = &netdev_open;
481 dev->hard_start_xmit = &start_tx;
482 dev->stop = &netdev_close;
483 dev->get_stats = &get_stats;
484 dev->set_multicast_list = &set_rx_mode;
485 dev->do_ioctl = &netdev_ioctl;
486 dev->ethtool_ops = &netdev_ethtool_ops;
487 dev->tx_timeout = &tx_timeout;
488 dev->watchdog_timeo = TX_TIMEOUT;
490 i = register_netdev(dev);
492 goto err_out_cleardev;
494 printk(KERN_INFO "%s: %s at 0x%lx, ",
495 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
496 for (i = 0; i < 5; i++)
497 printk("%2.2x:", dev->dev_addr[i]);
498 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
500 if (np->drv_flags & CanHaveMII) {
501 int phy, phy_idx = 0;
502 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
503 int mii_status = mdio_read(dev, phy, MII_BMSR);
504 if (mii_status != 0xffff && mii_status != 0x0000) {
505 np->phys[phy_idx++] = phy;
506 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
507 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
508 mdio_read(dev, phy, MII_PHYSID2);
509 printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
510 "0x%4.4x advertising %4.4x.\n",
511 dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
514 np->mii_cnt = phy_idx;
515 np->mii_if.phy_id = np->phys[0];
517 printk(KERN_WARNING "%s: MII PHY not found -- this device may "
518 "not operate correctly.\n", dev->name);
526 pci_set_drvdata(pdev, NULL);
528 iounmap((void *)ioaddr);
531 pci_release_regions(pdev);
538 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
539 often serial bit streams generated by the host processor.
540 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
542 /* Delay between EEPROM clock transitions.
543 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
544 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
545 made udelay() unreliable.
546 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
549 #define eeprom_delay(ee_addr) readl(ee_addr)
551 enum EEPROM_Ctrl_Bits {
552 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
553 EE_ChipSelect=0x801, EE_DataIn=0x08,
556 /* The EEPROM commands include the alway-set leading bit. */
558 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
561 static int eeprom_read(long addr, int location)
565 long ee_addr = addr + EECtrl;
566 int read_cmd = location | EE_ReadCmd;
567 writel(EE_ChipSelect, ee_addr);
569 /* Shift the read command bits out. */
570 for (i = 10; i >= 0; i--) {
571 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
572 writel(dataval, ee_addr);
573 eeprom_delay(ee_addr);
574 writel(dataval | EE_ShiftClk, ee_addr);
575 eeprom_delay(ee_addr);
577 writel(EE_ChipSelect, ee_addr);
578 eeprom_delay(ee_addr);
580 for (i = 16; i > 0; i--) {
581 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
582 eeprom_delay(ee_addr);
583 retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
584 writel(EE_ChipSelect, ee_addr);
585 eeprom_delay(ee_addr);
588 /* Terminate the EEPROM access. */
593 /* MII transceiver control section.
594 Read and write the MII registers using software-generated serial
595 MDIO protocol. See the MII specifications or DP83840A data sheet
598 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
599 met by back-to-back 33Mhz PCI cycles. */
600 #define mdio_delay(mdio_addr) readl(mdio_addr)
602 /* Set iff a MII transceiver on any interface requires mdio preamble.
603 This only set with older transceivers, so the extra
604 code size of a per-interface flag is not worthwhile. */
605 static char mii_preamble_required = 1;
607 #define MDIO_WRITE0 (MDIO_EnbOutput)
608 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
610 /* Generate the preamble required for initial synchronization and
611 a few older transceivers. */
612 static void mdio_sync(long mdio_addr)
616 /* Establish sync by sending at least 32 logic ones. */
617 while (--bits >= 0) {
618 writel(MDIO_WRITE1, mdio_addr);
619 mdio_delay(mdio_addr);
620 writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
621 mdio_delay(mdio_addr);
625 static int mdio_read(struct net_device *dev, int phy_id, int location)
627 long mdio_addr = dev->base_addr + MIICtrl;
628 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
631 if (mii_preamble_required)
632 mdio_sync(mdio_addr);
634 /* Shift the read command bits out. */
635 for (i = 15; i >= 0; i--) {
636 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
638 writel(dataval, mdio_addr);
639 mdio_delay(mdio_addr);
640 writel(dataval | MDIO_ShiftClk, mdio_addr);
641 mdio_delay(mdio_addr);
643 /* Read the two transition, 16 data, and wire-idle bits. */
644 for (i = 20; i > 0; i--) {
645 writel(MDIO_EnbIn, mdio_addr);
646 mdio_delay(mdio_addr);
647 retval = (retval << 1) | ((readl(mdio_addr) & MDIO_DataIn) ? 1 : 0);
648 writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
649 mdio_delay(mdio_addr);
651 return (retval>>1) & 0xffff;
654 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
656 struct netdev_private *np = dev->priv;
657 long mdio_addr = dev->base_addr + MIICtrl;
658 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
661 if (location == 4 && phy_id == np->phys[0])
662 np->mii_if.advertising = value;
664 if (mii_preamble_required)
665 mdio_sync(mdio_addr);
667 /* Shift the command bits out. */
668 for (i = 31; i >= 0; i--) {
669 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
671 writel(dataval, mdio_addr);
672 mdio_delay(mdio_addr);
673 writel(dataval | MDIO_ShiftClk, mdio_addr);
674 mdio_delay(mdio_addr);
676 /* Clear out extra bits. */
677 for (i = 2; i > 0; i--) {
678 writel(MDIO_EnbIn, mdio_addr);
679 mdio_delay(mdio_addr);
680 writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
681 mdio_delay(mdio_addr);
687 static int netdev_open(struct net_device *dev)
689 struct netdev_private *np = dev->priv;
690 long ioaddr = dev->base_addr;
693 writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */
695 netif_device_detach(dev);
696 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
701 printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
702 dev->name, dev->irq);
704 if((i=alloc_ringdesc(dev)))
707 spin_lock_irq(&np->lock);
708 netif_device_attach(dev);
710 spin_unlock_irq(&np->lock);
712 netif_start_queue(dev);
714 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
716 /* Set the timer to check for link beat. */
717 init_timer(&np->timer);
718 np->timer.expires = jiffies + 1*HZ;
719 np->timer.data = (unsigned long)dev;
720 np->timer.function = &netdev_timer; /* timer handler */
721 add_timer(&np->timer);
724 netif_device_attach(dev);
728 #define MII_DAVICOM_DM9101 0x0181b800
730 static int update_link(struct net_device *dev)
732 struct netdev_private *np = dev->priv;
733 int duplex, fasteth, result, mii_reg;
736 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
738 if (mii_reg == 0xffff)
740 /* reread: the link status bit is sticky */
741 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
742 if (!(mii_reg & 0x4)) {
743 if (netif_carrier_ok(dev)) {
745 printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
746 dev->name, np->phys[0]);
747 netif_carrier_off(dev);
751 if (!netif_carrier_ok(dev)) {
753 printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
754 dev->name, np->phys[0]);
755 netif_carrier_on(dev);
758 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
759 /* If the link partner doesn't support autonegotiation
760 * the MII detects it's abilities with the "parallel detection".
761 * Some MIIs update the LPA register to the result of the parallel
762 * detection, some don't.
763 * The Davicom PHY [at least 0181b800] doesn't.
764 * Instead bit 9 and 13 of the BMCR are updated to the result
765 * of the negotiation..
767 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
768 duplex = mii_reg & BMCR_FULLDPLX;
769 fasteth = mii_reg & BMCR_SPEED100;
772 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
773 negotiated = mii_reg & np->mii_if.advertising;
775 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
776 fasteth = negotiated & 0x380;
778 duplex |= np->mii_if.force_media;
779 /* remove fastether and fullduplex */
780 result = np->csr6 & ~0x20000200;
784 result |= 0x20000000;
785 if (result != np->csr6 && debug)
786 printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
787 dev->name, fasteth ? 100 : 10,
788 duplex ? "full" : "half", np->phys[0]);
792 #define RXTX_TIMEOUT 2000
793 static inline void update_csr6(struct net_device *dev, int new)
795 struct netdev_private *np = dev->priv;
796 long ioaddr = dev->base_addr;
797 int limit = RXTX_TIMEOUT;
799 if (!netif_device_present(dev))
803 /* stop both Tx and Rx processes */
804 writel(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
805 /* wait until they have really stopped */
807 int csr5 = readl(ioaddr + IntrStatus);
810 t = (csr5 >> 17) & 0x07;
813 t = (csr5 >> 20) & 0x07;
820 printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
827 /* and restart them with the new configuration */
828 writel(np->csr6, ioaddr + NetworkConfig);
830 np->mii_if.full_duplex = 1;
833 static void netdev_timer(unsigned long data)
835 struct net_device *dev = (struct net_device *)data;
836 struct netdev_private *np = dev->priv;
837 long ioaddr = dev->base_addr;
840 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
842 dev->name, (int)readl(ioaddr + IntrStatus),
843 (int)readl(ioaddr + NetworkConfig));
844 spin_lock_irq(&np->lock);
845 update_csr6(dev, update_link(dev));
846 spin_unlock_irq(&np->lock);
847 np->timer.expires = jiffies + 10*HZ;
848 add_timer(&np->timer);
851 static void init_rxtx_rings(struct net_device *dev)
853 struct netdev_private *np = dev->priv;
856 np->rx_head_desc = &np->rx_ring[0];
857 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
859 /* Initial all Rx descriptors. */
860 for (i = 0; i < RX_RING_SIZE; i++) {
861 np->rx_ring[i].length = np->rx_buf_sz;
862 np->rx_ring[i].status = 0;
863 np->rx_skbuff[i] = 0;
865 /* Mark the last entry as wrapping the ring. */
866 np->rx_ring[i-1].length |= DescEndRing;
868 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
869 for (i = 0; i < RX_RING_SIZE; i++) {
870 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
871 np->rx_skbuff[i] = skb;
874 skb->dev = dev; /* Mark as being used by this device. */
875 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail,
876 skb->len,PCI_DMA_FROMDEVICE);
878 np->rx_ring[i].buffer1 = np->rx_addr[i];
879 np->rx_ring[i].status = DescOwn;
883 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
885 /* Initialize the Tx descriptors */
886 for (i = 0; i < TX_RING_SIZE; i++) {
887 np->tx_skbuff[i] = 0;
888 np->tx_ring[i].status = 0;
891 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
893 writel(np->ring_dma_addr, dev->base_addr + RxRingPtr);
894 writel(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
895 dev->base_addr + TxRingPtr);
899 static void free_rxtx_rings(struct netdev_private* np)
902 /* Free all the skbuffs in the Rx queue. */
903 for (i = 0; i < RX_RING_SIZE; i++) {
904 np->rx_ring[i].status = 0;
905 if (np->rx_skbuff[i]) {
906 pci_unmap_single(np->pci_dev,
908 np->rx_skbuff[i]->len,
910 dev_kfree_skb(np->rx_skbuff[i]);
912 np->rx_skbuff[i] = 0;
914 for (i = 0; i < TX_RING_SIZE; i++) {
915 if (np->tx_skbuff[i]) {
916 pci_unmap_single(np->pci_dev,
918 np->tx_skbuff[i]->len,
920 dev_kfree_skb(np->tx_skbuff[i]);
922 np->tx_skbuff[i] = 0;
926 static void init_registers(struct net_device *dev)
928 struct netdev_private *np = dev->priv;
929 long ioaddr = dev->base_addr;
932 for (i = 0; i < 6; i++)
933 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
935 /* Initialize other registers. */
937 i = (1<<20); /* Big-endian descriptors */
941 i |= (0x04<<2); /* skip length 4 u32 */
942 i |= 0x02; /* give Rx priority */
944 /* Configure the PCI bus bursts and FIFO thresholds.
945 486: Set 8 longword cache alignment, 8 longword burst.
946 586: Set 16 longword cache alignment, no burst limit.
947 Cache alignment bits 15:14 Burst length 13:8
948 0000 <not allowed> 0000 align to cache 0800 8 longwords
949 4000 8 longwords 0100 1 longword 1000 16 longwords
950 8000 16 longwords 0200 2 longwords 2000 32 longwords
951 C000 32 longwords 0400 4 longwords */
953 #if defined (__i386__) && !defined(MODULE)
954 /* When not a module we can work around broken '486 PCI boards. */
955 if (boot_cpu_data.x86 <= 4) {
957 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
958 "alignment to 8 longwords.\n", dev->name);
962 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
964 #elif defined(__sparc__)
967 #warning Processor architecture undefined
970 writel(i, ioaddr + PCIBusCfg);
973 /* 128 byte Tx threshold;
974 Transmit on; Receive on; */
975 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
977 /* Clear and Enable interrupts by setting the interrupt mask. */
978 writel(0x1A0F5, ioaddr + IntrStatus);
979 writel(0x1A0F5, ioaddr + IntrEnable);
981 writel(0, ioaddr + RxStartDemand);
984 static void tx_timeout(struct net_device *dev)
986 struct netdev_private *np = dev->priv;
987 long ioaddr = dev->base_addr;
989 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
990 " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
994 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
995 for (i = 0; i < RX_RING_SIZE; i++)
996 printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
997 printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
998 for (i = 0; i < TX_RING_SIZE; i++)
999 printk(" %8.8x", np->tx_ring[i].status);
1002 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
1003 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
1004 printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",readl(ioaddr+0x4C));
1006 disable_irq(dev->irq);
1007 spin_lock_irq(&np->lock);
1009 * Under high load dirty_tx and the internal tx descriptor pointer
1010 * come out of sync, thus perform a software reset and reinitialize
1014 writel(1, dev->base_addr+PCIBusCfg);
1017 free_rxtx_rings(np);
1018 init_rxtx_rings(dev);
1019 init_registers(dev);
1020 spin_unlock_irq(&np->lock);
1021 enable_irq(dev->irq);
1023 netif_wake_queue(dev);
1024 dev->trans_start = jiffies;
1025 np->stats.tx_errors++;
1029 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1030 static int alloc_ringdesc(struct net_device *dev)
1032 struct netdev_private *np = dev->priv;
1034 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1036 np->rx_ring = pci_alloc_consistent(np->pci_dev,
1037 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1038 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1039 &np->ring_dma_addr);
1042 init_rxtx_rings(dev);
1046 static void free_ringdesc(struct netdev_private *np)
1048 pci_free_consistent(np->pci_dev,
1049 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1050 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1051 np->rx_ring, np->ring_dma_addr);
1055 static int start_tx(struct sk_buff *skb, struct net_device *dev)
1057 struct netdev_private *np = dev->priv;
1060 /* Caution: the write order is important here, set the field
1061 with the "ownership" bits last. */
1063 /* Calculate the next Tx descriptor entry. */
1064 entry = np->cur_tx % TX_RING_SIZE;
1066 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1067 skb->data,skb->len, PCI_DMA_TODEVICE);
1068 np->tx_skbuff[entry] = skb;
1070 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1071 if (skb->len < TX_BUFLIMIT) {
1072 np->tx_ring[entry].length = DescWholePkt | skb->len;
1074 int len = skb->len - TX_BUFLIMIT;
1076 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1077 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1079 if(entry == TX_RING_SIZE-1)
1080 np->tx_ring[entry].length |= DescEndRing;
1082 /* Now acquire the irq spinlock.
1083 * The difficult race is the the ordering between
1084 * increasing np->cur_tx and setting DescOwn:
1085 * - if np->cur_tx is increased first the interrupt
1086 * handler could consider the packet as transmitted
1087 * since DescOwn is cleared.
1088 * - If DescOwn is set first the NIC could report the
1089 * packet as sent, but the interrupt handler would ignore it
1090 * since the np->cur_tx was not yet increased.
1092 spin_lock_irq(&np->lock);
1095 wmb(); /* flush length, buffer1, buffer2 */
1096 np->tx_ring[entry].status = DescOwn;
1097 wmb(); /* flush status and kick the hardware */
1098 writel(0, dev->base_addr + TxStartDemand);
1099 np->tx_q_bytes += skb->len;
1100 /* Work around horrible bug in the chip by marking the queue as full
1101 when we do not have FIFO room for a maximum sized packet. */
1102 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1103 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1104 netif_stop_queue(dev);
1108 spin_unlock_irq(&np->lock);
1110 dev->trans_start = jiffies;
1113 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1114 dev->name, np->cur_tx, entry);
1119 static void netdev_tx_done(struct net_device *dev)
1121 struct netdev_private *np = dev->priv;
1122 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1123 int entry = np->dirty_tx % TX_RING_SIZE;
1124 int tx_status = np->tx_ring[entry].status;
1128 if (tx_status & 0x8000) { /* There was an error, log it. */
1129 #ifndef final_version
1131 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1132 dev->name, tx_status);
1134 np->stats.tx_errors++;
1135 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1136 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1137 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1138 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1139 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1140 np->stats.tx_heartbeat_errors++;
1142 #ifndef final_version
1144 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
1145 dev->name, entry, tx_status);
1147 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1148 np->stats.collisions += (tx_status >> 3) & 15;
1149 np->stats.tx_packets++;
1151 /* Free the original skb. */
1152 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1153 np->tx_skbuff[entry]->len,
1155 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1156 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1157 np->tx_skbuff[entry] = 0;
1160 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1161 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1162 /* The ring is no longer full, clear tbusy. */
1165 netif_wake_queue(dev);
1169 /* The interrupt handler does all of the Rx thread work and cleans up
1170 after the Tx thread. */
1171 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1173 struct net_device *dev = (struct net_device *)dev_instance;
1174 struct netdev_private *np = dev->priv;
1175 long ioaddr = dev->base_addr;
1176 int work_limit = max_interrupt_work;
1179 if (!netif_device_present(dev))
1182 u32 intr_status = readl(ioaddr + IntrStatus);
1184 /* Acknowledge all of the current interrupt sources ASAP. */
1185 writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
1188 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1189 dev->name, intr_status);
1191 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1196 if (intr_status & (IntrRxDone | RxNoBuf))
1198 if (intr_status & RxNoBuf)
1199 writel(0, ioaddr + RxStartDemand);
1201 if (intr_status & (TxIdle | IntrTxDone) &&
1202 np->cur_tx != np->dirty_tx) {
1203 spin_lock(&np->lock);
1204 netdev_tx_done(dev);
1205 spin_unlock(&np->lock);
1208 /* Abnormal error summary/uncommon events handlers. */
1209 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
1210 TimerInt | IntrTxStopped))
1211 netdev_error(dev, intr_status);
1213 if (--work_limit < 0) {
1214 printk(KERN_WARNING "%s: Too much work at interrupt, "
1215 "status=0x%4.4x.\n", dev->name, intr_status);
1216 /* Set the timer to re-enable the other interrupts after
1218 spin_lock(&np->lock);
1219 if (netif_device_present(dev)) {
1220 writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1221 writel(10, ioaddr + GPTimer);
1223 spin_unlock(&np->lock);
1229 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1230 dev->name, (int)readl(ioaddr + IntrStatus));
1231 return IRQ_RETVAL(handled);
1234 /* This routine is logically part of the interrupt handler, but separated
1235 for clarity and better register allocation. */
1236 static int netdev_rx(struct net_device *dev)
1238 struct netdev_private *np = dev->priv;
1239 int entry = np->cur_rx % RX_RING_SIZE;
1240 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1243 printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
1244 entry, np->rx_ring[entry].status);
1247 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1248 while (--work_limit >= 0) {
1249 struct w840_rx_desc *desc = np->rx_head_desc;
1250 s32 status = desc->status;
1253 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1257 if ((status & 0x38008300) != 0x0300) {
1258 if ((status & 0x38000300) != 0x0300) {
1259 /* Ingore earlier buffers. */
1260 if ((status & 0xffff) != 0x7fff) {
1261 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1262 "multiple buffers, entry %#x status %4.4x!\n",
1263 dev->name, np->cur_rx, status);
1264 np->stats.rx_length_errors++;
1266 } else if (status & 0x8000) {
1267 /* There was a fatal error. */
1269 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1271 np->stats.rx_errors++; /* end of a packet.*/
1272 if (status & 0x0890) np->stats.rx_length_errors++;
1273 if (status & 0x004C) np->stats.rx_frame_errors++;
1274 if (status & 0x0002) np->stats.rx_crc_errors++;
1277 struct sk_buff *skb;
1278 /* Omit the four octet CRC from the length. */
1279 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1281 #ifndef final_version
1283 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1284 " status %x.\n", pkt_len, status);
1286 /* Check if the packet is long enough to accept without copying
1287 to a minimally-sized skbuff. */
1288 if (pkt_len < rx_copybreak
1289 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1291 skb_reserve(skb, 2); /* 16 byte align the IP header */
1292 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1293 np->rx_skbuff[entry]->len,
1294 PCI_DMA_FROMDEVICE);
1295 /* Call copy + cksum if available. */
1297 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1298 skb_put(skb, pkt_len);
1300 memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
1303 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1304 np->rx_skbuff[entry]->len,
1305 PCI_DMA_FROMDEVICE);
1307 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1308 np->rx_skbuff[entry]->len,
1309 PCI_DMA_FROMDEVICE);
1310 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1311 np->rx_skbuff[entry] = NULL;
1313 #ifndef final_version /* Remove after testing. */
1314 /* You will want this info for the initial debug. */
1316 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1317 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
1319 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1320 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1321 skb->data[8], skb->data[9], skb->data[10],
1322 skb->data[11], skb->data[12], skb->data[13],
1323 skb->data[14], skb->data[15], skb->data[16],
1326 skb->protocol = eth_type_trans(skb, dev);
1328 dev->last_rx = jiffies;
1329 np->stats.rx_packets++;
1330 np->stats.rx_bytes += pkt_len;
1332 entry = (++np->cur_rx) % RX_RING_SIZE;
1333 np->rx_head_desc = &np->rx_ring[entry];
1336 /* Refill the Rx ring buffers. */
1337 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1338 struct sk_buff *skb;
1339 entry = np->dirty_rx % RX_RING_SIZE;
1340 if (np->rx_skbuff[entry] == NULL) {
1341 skb = dev_alloc_skb(np->rx_buf_sz);
1342 np->rx_skbuff[entry] = skb;
1344 break; /* Better luck next round. */
1345 skb->dev = dev; /* Mark as being used by this device. */
1346 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1348 skb->len, PCI_DMA_FROMDEVICE);
1349 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1352 np->rx_ring[entry].status = DescOwn;
1358 static void netdev_error(struct net_device *dev, int intr_status)
1360 long ioaddr = dev->base_addr;
1361 struct netdev_private *np = dev->priv;
1364 printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
1365 dev->name, intr_status);
1366 if (intr_status == 0xffffffff)
1368 spin_lock(&np->lock);
1369 if (intr_status & TxFIFOUnderflow) {
1371 /* Bump up the Tx threshold */
1373 /* This causes lots of dropped packets,
1374 * and under high load even tx_timeouts
1376 new = np->csr6 + 0x4000;
1378 new = (np->csr6 >> 14)&0x7f;
1382 new = 127; /* load full packet before starting */
1383 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1385 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
1387 update_csr6(dev, new);
1389 if (intr_status & IntrRxDied) { /* Missed a Rx frame. */
1390 np->stats.rx_errors++;
1392 if (intr_status & TimerInt) {
1393 /* Re-enable other interrupts. */
1394 if (netif_device_present(dev))
1395 writel(0x1A0F5, ioaddr + IntrEnable);
1397 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1398 writel(0, ioaddr + RxStartDemand);
1399 spin_unlock(&np->lock);
1402 static struct net_device_stats *get_stats(struct net_device *dev)
1404 long ioaddr = dev->base_addr;
1405 struct netdev_private *np = dev->priv;
1407 /* The chip only need report frame silently dropped. */
1408 spin_lock_irq(&np->lock);
1409 if (netif_running(dev) && netif_device_present(dev))
1410 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1411 spin_unlock_irq(&np->lock);
1417 static u32 __set_rx_mode(struct net_device *dev)
1419 long ioaddr = dev->base_addr;
1420 u32 mc_filter[2]; /* Multicast hash filter */
1423 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1424 /* Unconditionally log net taps. */
1425 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1426 memset(mc_filter, 0xff, sizeof(mc_filter));
1427 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
1429 } else if ((dev->mc_count > multicast_filter_limit)
1430 || (dev->flags & IFF_ALLMULTI)) {
1431 /* Too many to match, or accept all multicasts. */
1432 memset(mc_filter, 0xff, sizeof(mc_filter));
1433 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1435 struct dev_mc_list *mclist;
1437 memset(mc_filter, 0, sizeof(mc_filter));
1438 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1439 i++, mclist = mclist->next) {
1440 int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
1442 mc_filter[filterbit >> 5] |= cpu_to_le32(1 << (filterbit & 31));
1444 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1446 writel(mc_filter[0], ioaddr + MulticastFilter0);
1447 writel(mc_filter[1], ioaddr + MulticastFilter1);
1451 static void set_rx_mode(struct net_device *dev)
1453 struct netdev_private *np = dev->priv;
1454 u32 rx_mode = __set_rx_mode(dev);
1455 spin_lock_irq(&np->lock);
1456 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1457 spin_unlock_irq(&np->lock);
1460 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1462 struct netdev_private *np = dev->priv;
1464 strcpy (info->driver, DRV_NAME);
1465 strcpy (info->version, DRV_VERSION);
1466 strcpy (info->bus_info, pci_name(np->pci_dev));
1469 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1471 struct netdev_private *np = dev->priv;
1474 spin_lock_irq(&np->lock);
1475 rc = mii_ethtool_gset(&np->mii_if, cmd);
1476 spin_unlock_irq(&np->lock);
1481 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1483 struct netdev_private *np = dev->priv;
1486 spin_lock_irq(&np->lock);
1487 rc = mii_ethtool_sset(&np->mii_if, cmd);
1488 spin_unlock_irq(&np->lock);
1493 static int netdev_nway_reset(struct net_device *dev)
1495 struct netdev_private *np = dev->priv;
1496 return mii_nway_restart(&np->mii_if);
1499 static u32 netdev_get_link(struct net_device *dev)
1501 struct netdev_private *np = dev->priv;
1502 return mii_link_ok(&np->mii_if);
1505 static u32 netdev_get_msglevel(struct net_device *dev)
1510 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1515 static struct ethtool_ops netdev_ethtool_ops = {
1516 .get_drvinfo = netdev_get_drvinfo,
1517 .get_settings = netdev_get_settings,
1518 .set_settings = netdev_set_settings,
1519 .nway_reset = netdev_nway_reset,
1520 .get_link = netdev_get_link,
1521 .get_msglevel = netdev_get_msglevel,
1522 .set_msglevel = netdev_set_msglevel,
1523 .get_sg = ethtool_op_get_sg,
1524 .get_tx_csum = ethtool_op_get_tx_csum,
1527 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1529 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
1530 struct netdev_private *np = dev->priv;
1533 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1534 data->phy_id = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
1537 case SIOCGMIIREG: /* Read MII PHY register. */
1538 spin_lock_irq(&np->lock);
1539 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1540 spin_unlock_irq(&np->lock);
1543 case SIOCSMIIREG: /* Write MII PHY register. */
1544 if (!capable(CAP_NET_ADMIN))
1546 spin_lock_irq(&np->lock);
1547 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1548 spin_unlock_irq(&np->lock);
1555 static int netdev_close(struct net_device *dev)
1557 long ioaddr = dev->base_addr;
1558 struct netdev_private *np = dev->priv;
1560 netif_stop_queue(dev);
1563 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
1564 "Config %8.8x.\n", dev->name, (int)readl(ioaddr + IntrStatus),
1565 (int)readl(ioaddr + NetworkConfig));
1566 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1567 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1570 /* Stop the chip's Tx and Rx processes. */
1571 spin_lock_irq(&np->lock);
1572 netif_device_detach(dev);
1573 update_csr6(dev, 0);
1574 writel(0x0000, ioaddr + IntrEnable);
1575 spin_unlock_irq(&np->lock);
1577 free_irq(dev->irq, dev);
1579 netif_device_attach(dev);
1581 if (readl(ioaddr + NetworkConfig) != 0xffffffff)
1582 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1588 printk(KERN_DEBUG" Tx ring at %8.8x:\n",
1590 for (i = 0; i < TX_RING_SIZE; i++)
1591 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
1592 i, np->tx_ring[i].length,
1593 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1594 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1596 for (i = 0; i < RX_RING_SIZE; i++) {
1597 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1598 i, np->rx_ring[i].length,
1599 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1602 #endif /* __i386__ debugging only */
1604 del_timer_sync(&np->timer);
1606 free_rxtx_rings(np);
1612 static void __devexit w840_remove1 (struct pci_dev *pdev)
1614 struct net_device *dev = pci_get_drvdata(pdev);
1617 unregister_netdev(dev);
1618 pci_release_regions(pdev);
1620 iounmap((char *)(dev->base_addr));
1625 pci_set_drvdata(pdev, NULL);
1631 * suspend/resume synchronization:
1632 * - open, close, do_ioctl:
1633 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1635 * spin_lock_irq(np->lock), doesn't touch hw if not present
1636 * - hard_start_xmit:
1637 * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
1639 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1640 * - set_multicast_list
1641 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1642 * - interrupt handler
1643 * doesn't touch hw if not present, synchronize_irq waits for
1644 * running instances of the interrupt handler.
1646 * Disabling hw requires clearing csr6 & IntrEnable.
1647 * update_csr6 & all function that write IntrEnable check netif_device_present
1648 * before settings any bits.
1650 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1651 * device would cause an irq storm.
1653 static int w840_suspend (struct pci_dev *pdev, u32 state)
1655 struct net_device *dev = pci_get_drvdata (pdev);
1656 struct netdev_private *np = dev->priv;
1657 long ioaddr = dev->base_addr;
1660 if (netif_running (dev)) {
1661 del_timer_sync(&np->timer);
1663 spin_lock_irq(&np->lock);
1664 netif_device_detach(dev);
1665 update_csr6(dev, 0);
1666 writel(0, ioaddr + IntrEnable);
1667 netif_stop_queue(dev);
1668 spin_unlock_irq(&np->lock);
1670 spin_unlock_wait(&dev->xmit_lock);
1671 synchronize_irq(dev->irq);
1673 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1675 /* no more hardware accesses behind this line. */
1677 if (np->csr6) BUG();
1678 if (readl(ioaddr + IntrEnable)) BUG();
1680 /* pci_power_off(pdev, -1); */
1682 free_rxtx_rings(np);
1684 netif_device_detach(dev);
1690 static int w840_resume (struct pci_dev *pdev)
1692 struct net_device *dev = pci_get_drvdata (pdev);
1693 struct netdev_private *np = dev->priv;
1696 if (netif_device_present(dev))
1697 goto out; /* device not suspended */
1698 if (netif_running(dev)) {
1699 pci_enable_device(pdev);
1700 /* pci_power_on(pdev); */
1702 spin_lock_irq(&np->lock);
1703 writel(1, dev->base_addr+PCIBusCfg);
1704 readl(dev->base_addr+PCIBusCfg);
1706 netif_device_attach(dev);
1707 init_rxtx_rings(dev);
1708 init_registers(dev);
1709 spin_unlock_irq(&np->lock);
1711 netif_wake_queue(dev);
1713 mod_timer(&np->timer, jiffies + 1*HZ);
1715 netif_device_attach(dev);
1723 static struct pci_driver w840_driver = {
1725 .id_table = w840_pci_tbl,
1726 .probe = w840_probe1,
1727 .remove = __devexit_p(w840_remove1),
1729 .suspend = w840_suspend,
1730 .resume = w840_resume,
1734 static int __init w840_init(void)
1736 /* when a module, this is printed whether or not devices are found in probe */
1740 return pci_module_init(&w840_driver);
1743 static void __exit w840_exit(void)
1745 pci_unregister_driver(&w840_driver);
1748 module_init(w840_init);
1749 module_exit(w840_exit);