1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
32 Version LK1.04 (D-Link):
34 - More support for ethtool.
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
83 Version LK1.08 (Philippe De Muyter phdm@macqel.be):
84 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM
94 Version LK1.10 (Philippe De Muyter phdm@macqel.be):
95 - Make 'unblock interface after Tx underrun' work
99 #define DRV_NAME "sundance"
100 #define DRV_VERSION "1.01+LK1.10"
101 #define DRV_RELDATE "28-Oct-2005"
104 /* The user-configurable values.
105 These may be modified when a driver module is loaded.*/
106 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
107 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
108 Typical is a 64 element hash table based on the Ethernet CRC. */
109 static const int multicast_filter_limit = 32;
111 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
112 Setting to > 1518 effectively disables this feature.
113 This chip can receive into offset buffers, so the Alpha does not
114 need a copy-align. */
115 static int rx_copybreak;
116 static int flowctrl=1;
118 /* media[] specifies the media type the NIC operates at.
119 autosense Autosensing active media.
120 10mbps_hd 10Mbps half duplex.
121 10mbps_fd 10Mbps full duplex.
122 100mbps_hd 100Mbps half duplex.
123 100mbps_fd 100Mbps full duplex.
124 0 Autosensing active media.
125 1 10Mbps half duplex.
126 2 10Mbps full duplex.
127 3 100Mbps half duplex.
128 4 100Mbps full duplex.
131 static char *media[MAX_UNITS];
134 /* Operational parameters that are set at compile time. */
136 /* Keep the ring sizes a power of two for compile efficiency.
137 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
138 Making the Tx ring too large decreases the effectiveness of channel
139 bonding and packet priority, and more than 128 requires modifying the
141 Large receive rings merely waste memory. */
142 #define TX_RING_SIZE 32
143 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
144 #define RX_RING_SIZE 64
146 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
147 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
149 /* Operational parameters that usually are not changed. */
150 /* Time in jiffies before concluding the transmitter is hung. */
151 #define TX_TIMEOUT (4*HZ)
152 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
154 /* Include files, designed to support most kernel versions 2.0.0 and later. */
155 #include <linux/module.h>
156 #include <linux/kernel.h>
157 #include <linux/string.h>
158 #include <linux/timer.h>
159 #include <linux/errno.h>
160 #include <linux/ioport.h>
161 #include <linux/slab.h>
162 #include <linux/interrupt.h>
163 #include <linux/pci.h>
164 #include <linux/netdevice.h>
165 #include <linux/etherdevice.h>
166 #include <linux/skbuff.h>
167 #include <linux/init.h>
168 #include <linux/bitops.h>
169 #include <asm/uaccess.h>
170 #include <asm/processor.h> /* Processor type for cache alignment. */
172 #include <linux/delay.h>
173 #include <linux/spinlock.h>
174 #ifndef _COMPAT_WITH_OLD_KERNEL
175 #include <linux/crc32.h>
176 #include <linux/ethtool.h>
177 #include <linux/mii.h>
185 /* These identify the driver base version and may not be removed. */
186 static char version[] __devinitdata =
187 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
188 KERN_INFO " http://www.scyld.com/network/sundance.html\n";
190 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
191 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
192 MODULE_LICENSE("GPL");
194 module_param(debug, int, 0);
195 module_param(rx_copybreak, int, 0);
196 module_param_array(media, charp, NULL, 0);
197 module_param(flowctrl, int, 0);
198 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
199 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
200 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
205 I. Board Compatibility
207 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
209 II. Board-specific settings
211 III. Driver operation
215 This driver uses two statically allocated fixed-size descriptor lists
216 formed into rings by a branch from the final descriptor to the beginning of
217 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
218 Some chips explicitly use only 2^N sized rings, while others use a
219 'next descriptor' pointer that the driver forms into rings.
221 IIIb/c. Transmit/Receive Structure
223 This driver uses a zero-copy receive and transmit scheme.
224 The driver allocates full frame size skbuffs for the Rx ring buffers at
225 open() time and passes the skb->data field to the chip as receive data
226 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
227 a fresh skbuff is allocated and the frame is copied to the new skbuff.
228 When the incoming frame is larger, the skbuff is passed directly up the
229 protocol stack. Buffers consumed this way are replaced by newly allocated
230 skbuffs in a later phase of receives.
232 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
233 using a full-sized skbuff for small frames vs. the copying costs of larger
234 frames. New boards are typically used in generously configured machines
235 and the underfilled buffers have negligible impact compared to the benefit of
236 a single allocation size, so the default value of zero results in never
237 copying packets. When copying is done, the cost is usually mitigated by using
238 a combined copy/checksum routine. Copying also preloads the cache, which is
239 most useful with small frames.
241 A subtle aspect of the operation is that the IP header at offset 14 in an
242 ethernet frame isn't longword aligned for further processing.
243 Unaligned buffers are permitted by the Sundance hardware, so
244 frames are received into the skbuff at an offset of "+2", 16-byte aligning
247 IIId. Synchronization
249 The driver runs as two independent, single-threaded flows of control. One
250 is the send-packet routine, which enforces single-threaded use by the
251 dev->tbusy flag. The other thread is the interrupt handler, which is single
252 threaded by the hardware and interrupt handling software.
254 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
255 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
256 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
257 the 'lp->tx_full' flag.
259 The interrupt handler has exclusive control over the Rx ring and records stats
260 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
261 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
262 clears both the tx_full and tbusy flags.
268 The Sundance ST201 datasheet, preliminary version.
269 The Kendin KS8723 datasheet, preliminary version.
270 The ICplus IP100 datasheet, preliminary version.
271 http://www.scyld.com/expert/100mbps.html
272 http://www.scyld.com/expert/NWay.html
278 /* Work-around for Kendin chip bugs. */
279 #ifndef CONFIG_SUNDANCE_MMIO
283 static struct pci_device_id sundance_pci_tbl[] = {
284 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
285 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
286 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
287 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
288 {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
289 {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
290 {0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6},
293 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
302 static const struct pci_id_info pci_id_tbl[] = {
303 {"D-Link DFE-550TX FAST Ethernet Adapter"},
304 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
305 {"D-Link DFE-580TX 4 port Server Adapter"},
306 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
307 {"D-Link DL10050-based FAST Ethernet Adapter"},
308 {"IC Plus IP100 Fast Ethernet Adapter"},
309 {"IC Plus IP100A Fast Ethernet Adapter" },
310 {NULL,}, /* 0 terminated list. */
313 /* This driver was written to use PCI memory space, however x86-oriented
314 hardware often uses I/O space accesses. */
316 /* Offsets to the device registers.
317 Unlike software-only systems, device drivers interact with complex hardware.
318 It's not useful to define symbolic names for every register bit in the
319 device. The name can only partially document the semantics and make
320 the driver longer and more difficult to read.
321 In general, only the important configuration values or bits changed
322 multiple times should be defined symbolically.
327 TxDMABurstThresh = 0x08,
328 TxDMAUrgentThresh = 0x09,
329 TxDMAPollPeriod = 0x0a,
334 RxDMABurstThresh = 0x14,
335 RxDMAUrgentThresh = 0x15,
336 RxDMAPollPeriod = 0x16,
341 TxStartThresh = 0x3c,
342 RxEarlyThresh = 0x3e,
357 MulticastFilter0 = 0x60,
358 MulticastFilter1 = 0x64,
365 StatsCarrierError = 0x74,
366 StatsLateColl = 0x75,
367 StatsMultiColl = 0x76,
371 StatsTxXSDefer = 0x7a,
377 /* Aliased and bogus values! */
380 enum ASICCtrl_HiWord_bit {
381 GlobalReset = 0x0001,
386 NetworkReset = 0x0020,
391 /* Bits in the interrupt status/mask registers. */
392 enum intr_status_bits {
393 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
394 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
396 StatsMax=0x0080, LinkChange=0x0100,
397 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
400 /* Bits in the RxMode register. */
402 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
403 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
405 /* Bits in MACCtrl. */
406 enum mac_ctrl0_bits {
407 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
408 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
410 enum mac_ctrl1_bits {
411 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
412 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
413 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
416 /* The Rx and Tx buffer descriptors. */
417 /* Note that using only 32 bit fields simplifies conversion to big-endian
422 struct desc_frag { u32 addr, length; } frag[1];
425 /* Bits in netdev_desc.status */
426 enum desc_status_bits {
428 DescEndPacket=0x4000,
432 DescIntrOnDMADone=0x80000000,
433 DisableAlign = 0x00000001,
436 #define PRIV_ALIGN 15 /* Required alignment mask */
437 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
438 within the structure. */
440 struct netdev_private {
441 /* Descriptor rings first for alignment. */
442 struct netdev_desc *rx_ring;
443 struct netdev_desc *tx_ring;
444 struct sk_buff* rx_skbuff[RX_RING_SIZE];
445 struct sk_buff* tx_skbuff[TX_RING_SIZE];
446 dma_addr_t tx_ring_dma;
447 dma_addr_t rx_ring_dma;
448 struct net_device_stats stats;
449 struct timer_list timer; /* Media monitoring timer. */
450 /* Frequently used values: keep some adjacent for cache effect. */
452 spinlock_t rx_lock; /* Group with Tx control cache line. */
455 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
456 unsigned int rx_buf_sz; /* Based on MTU+slack. */
457 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
458 unsigned int cur_tx, dirty_tx;
459 /* These values are keep track of the transceiver/media in use. */
460 unsigned int flowctrl:1;
461 unsigned int default_port:4; /* Last dev->if_port value. */
462 unsigned int an_enable:1;
464 struct tasklet_struct rx_tasklet;
465 struct tasklet_struct tx_tasklet;
468 /* Multicast and receive mode. */
469 spinlock_t mcastlock; /* SMP lock multicast updates. */
471 /* MII transceiver section. */
472 struct mii_if_info mii_if;
473 int mii_preamble_required;
474 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
475 struct pci_dev *pci_dev;
477 unsigned char pci_rev_id;
480 /* The station address location in the EEPROM. */
481 #define EEPROM_SA_OFFSET 0x10
482 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
483 IntrDrvRqst | IntrTxDone | StatsMax | \
486 static int change_mtu(struct net_device *dev, int new_mtu);
487 static int eeprom_read(void __iomem *ioaddr, int location);
488 static int mdio_read(struct net_device *dev, int phy_id, int location);
489 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
490 static int netdev_open(struct net_device *dev);
491 static void check_duplex(struct net_device *dev);
492 static void netdev_timer(unsigned long data);
493 static void tx_timeout(struct net_device *dev);
494 static void init_ring(struct net_device *dev);
495 static int start_tx(struct sk_buff *skb, struct net_device *dev);
496 static int reset_tx (struct net_device *dev);
497 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
498 static void rx_poll(unsigned long data);
499 static void tx_poll(unsigned long data);
500 static void refill_rx (struct net_device *dev);
501 static void netdev_error(struct net_device *dev, int intr_status);
502 static void netdev_error(struct net_device *dev, int intr_status);
503 static void set_rx_mode(struct net_device *dev);
504 static int __set_mac_addr(struct net_device *dev);
505 static struct net_device_stats *get_stats(struct net_device *dev);
506 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
507 static int netdev_close(struct net_device *dev);
508 static struct ethtool_ops ethtool_ops;
510 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
512 struct netdev_private *np = netdev_priv(dev);
513 void __iomem *ioaddr = np->base + ASICCtrl;
516 /* ST201 documentation states ASICCtrl is a 32bit register */
517 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
518 /* ST201 documentation states reset can take up to 1 ms */
520 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
521 if (--countdown == 0) {
522 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
529 static int __devinit sundance_probe1 (struct pci_dev *pdev,
530 const struct pci_device_id *ent)
532 struct net_device *dev;
533 struct netdev_private *np;
535 int chip_idx = ent->driver_data;
538 void __iomem *ioaddr;
547 int phy, phy_idx = 0;
550 /* when built into the kernel, we only print version if device is found */
552 static int printed_version;
553 if (!printed_version++)
557 if (pci_enable_device(pdev))
559 pci_set_master(pdev);
563 dev = alloc_etherdev(sizeof(*np));
566 SET_MODULE_OWNER(dev);
567 SET_NETDEV_DEV(dev, &pdev->dev);
569 if (pci_request_regions(pdev, DRV_NAME))
572 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
576 for (i = 0; i < 3; i++)
577 ((u16 *)dev->dev_addr)[i] =
578 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
579 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
581 dev->base_addr = (unsigned long)ioaddr;
584 np = netdev_priv(dev);
587 np->chip_id = chip_idx;
588 np->msg_enable = (1 << debug) - 1;
589 spin_lock_init(&np->lock);
590 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
591 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
593 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
595 goto err_out_cleardev;
596 np->tx_ring = (struct netdev_desc *)ring_space;
597 np->tx_ring_dma = ring_dma;
599 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
601 goto err_out_unmap_tx;
602 np->rx_ring = (struct netdev_desc *)ring_space;
603 np->rx_ring_dma = ring_dma;
605 np->mii_if.dev = dev;
606 np->mii_if.mdio_read = mdio_read;
607 np->mii_if.mdio_write = mdio_write;
608 np->mii_if.phy_id_mask = 0x1f;
609 np->mii_if.reg_num_mask = 0x1f;
611 /* The chip-specific entries in the device structure. */
612 dev->open = &netdev_open;
613 dev->hard_start_xmit = &start_tx;
614 dev->stop = &netdev_close;
615 dev->get_stats = &get_stats;
616 dev->set_multicast_list = &set_rx_mode;
617 dev->do_ioctl = &netdev_ioctl;
618 SET_ETHTOOL_OPS(dev, ðtool_ops);
619 dev->tx_timeout = &tx_timeout;
620 dev->watchdog_timeo = TX_TIMEOUT;
621 dev->change_mtu = &change_mtu;
622 pci_set_drvdata(pdev, dev);
624 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
626 i = register_netdev(dev);
628 goto err_out_unmap_rx;
630 printk(KERN_INFO "%s: %s at %p, ",
631 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
632 for (i = 0; i < 5; i++)
633 printk("%2.2x:", dev->dev_addr[i]);
634 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
636 np->phys[0] = 1; /* Default setting */
637 np->mii_preamble_required++;
639 * It seems some phys doesn't deal well with address 0 being accessed
640 * first, so leave address zero to the end of the loop (32 & 31).
642 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
643 int phyx = phy & 0x1f;
644 int mii_status = mdio_read(dev, phyx, MII_BMSR);
645 if (mii_status != 0xffff && mii_status != 0x0000) {
646 np->phys[phy_idx++] = phyx;
647 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
648 if ((mii_status & 0x0040) == 0)
649 np->mii_preamble_required++;
650 printk(KERN_INFO "%s: MII PHY found at address %d, status "
651 "0x%4.4x advertising %4.4x.\n",
652 dev->name, phyx, mii_status, np->mii_if.advertising);
655 np->mii_preamble_required--;
658 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
659 dev->name, ioread32(ioaddr + ASICCtrl));
660 goto err_out_unregister;
663 np->mii_if.phy_id = np->phys[0];
665 /* Parse override configuration */
667 if (card_idx < MAX_UNITS) {
668 if (media[card_idx] != NULL) {
670 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
671 strcmp (media[card_idx], "4") == 0) {
673 np->mii_if.full_duplex = 1;
674 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
675 || strcmp (media[card_idx], "3") == 0) {
677 np->mii_if.full_duplex = 0;
678 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
679 strcmp (media[card_idx], "2") == 0) {
681 np->mii_if.full_duplex = 1;
682 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
683 strcmp (media[card_idx], "1") == 0) {
685 np->mii_if.full_duplex = 0;
695 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
696 /* Default 100Mbps Full */
699 np->mii_if.full_duplex = 1;
704 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
706 /* If flow control enabled, we need to advertise it.*/
708 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
709 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
710 /* Force media type */
711 if (!np->an_enable) {
713 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
714 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
715 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
716 printk (KERN_INFO "Override speed=%d, %s duplex\n",
717 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
721 /* Perhaps move the reset here? */
722 /* Reset the chip to erase previous misconfiguration. */
723 if (netif_msg_hw(np))
724 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
725 iowrite16(0x00ff, ioaddr + ASICCtrl + 2);
726 if (netif_msg_hw(np))
727 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
733 unregister_netdev(dev);
735 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
737 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
739 pci_set_drvdata(pdev, NULL);
740 pci_iounmap(pdev, ioaddr);
742 pci_release_regions(pdev);
748 static int change_mtu(struct net_device *dev, int new_mtu)
750 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
752 if (netif_running(dev))
758 #define eeprom_delay(ee_addr) ioread32(ee_addr)
759 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
760 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
762 int boguscnt = 10000; /* Typical 1900 ticks. */
763 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
765 eeprom_delay(ioaddr + EECtrl);
766 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
767 return ioread16(ioaddr + EEData);
769 } while (--boguscnt > 0);
773 /* MII transceiver control section.
774 Read and write the MII registers using software-generated serial
775 MDIO protocol. See the MII specifications or DP83840A data sheet
778 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
779 met by back-to-back 33Mhz PCI cycles. */
780 #define mdio_delay() ioread8(mdio_addr)
783 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
785 #define MDIO_EnbIn (0)
786 #define MDIO_WRITE0 (MDIO_EnbOutput)
787 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
789 /* Generate the preamble required for initial synchronization and
790 a few older transceivers. */
791 static void mdio_sync(void __iomem *mdio_addr)
795 /* Establish sync by sending at least 32 logic ones. */
796 while (--bits >= 0) {
797 iowrite8(MDIO_WRITE1, mdio_addr);
799 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
804 static int mdio_read(struct net_device *dev, int phy_id, int location)
806 struct netdev_private *np = netdev_priv(dev);
807 void __iomem *mdio_addr = np->base + MIICtrl;
808 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
811 if (np->mii_preamble_required)
812 mdio_sync(mdio_addr);
814 /* Shift the read command bits out. */
815 for (i = 15; i >= 0; i--) {
816 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
818 iowrite8(dataval, mdio_addr);
820 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
823 /* Read the two transition, 16 data, and wire-idle bits. */
824 for (i = 19; i > 0; i--) {
825 iowrite8(MDIO_EnbIn, mdio_addr);
827 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
828 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
831 return (retval>>1) & 0xffff;
834 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
836 struct netdev_private *np = netdev_priv(dev);
837 void __iomem *mdio_addr = np->base + MIICtrl;
838 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
841 if (np->mii_preamble_required)
842 mdio_sync(mdio_addr);
844 /* Shift the command bits out. */
845 for (i = 31; i >= 0; i--) {
846 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
848 iowrite8(dataval, mdio_addr);
850 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
853 /* Clear out extra bits. */
854 for (i = 2; i > 0; i--) {
855 iowrite8(MDIO_EnbIn, mdio_addr);
857 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
863 static int netdev_open(struct net_device *dev)
865 struct netdev_private *np = netdev_priv(dev);
866 void __iomem *ioaddr = np->base;
869 /* Do we need to reset the chip??? */
871 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
875 if (netif_msg_ifup(np))
876 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
877 dev->name, dev->irq);
880 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
881 /* The Tx list pointer is written as packets are queued. */
883 /* Initialize other registers. */
885 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
886 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
888 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
891 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
893 /* Configure the PCI bus bursts and FIFO thresholds. */
895 if (dev->if_port == 0)
896 dev->if_port = np->default_port;
898 spin_lock_init(&np->mcastlock);
901 iowrite16(0, ioaddr + IntrEnable);
902 iowrite16(0, ioaddr + DownCounter);
903 /* Set the chip to poll every N*320nsec. */
904 iowrite8(100, ioaddr + RxDMAPollPeriod);
905 iowrite8(127, ioaddr + TxDMAPollPeriod);
906 /* Fix DFE-580TX packet drop issue */
907 if (np->pci_rev_id >= 0x14)
908 iowrite8(0x01, ioaddr + DebugCtrl1);
909 netif_start_queue(dev);
911 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
913 if (netif_msg_ifup(np))
914 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
915 "MAC Control %x, %4.4x %4.4x.\n",
916 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
917 ioread32(ioaddr + MACCtrl0),
918 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
920 /* Set the timer to check for link beat. */
921 init_timer(&np->timer);
922 np->timer.expires = jiffies + 3*HZ;
923 np->timer.data = (unsigned long)dev;
924 np->timer.function = &netdev_timer; /* timer handler */
925 add_timer(&np->timer);
927 /* Enable interrupts by setting the interrupt mask. */
928 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
933 static void check_duplex(struct net_device *dev)
935 struct netdev_private *np = netdev_priv(dev);
936 void __iomem *ioaddr = np->base;
937 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
938 int negotiated = mii_lpa & np->mii_if.advertising;
942 if (!np->an_enable || mii_lpa == 0xffff) {
943 if (np->mii_if.full_duplex)
944 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
949 /* Autonegotiation */
950 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
951 if (np->mii_if.full_duplex != duplex) {
952 np->mii_if.full_duplex = duplex;
953 if (netif_msg_link(np))
954 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
955 "negotiated capability %4.4x.\n", dev->name,
956 duplex ? "full" : "half", np->phys[0], negotiated);
957 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
961 static void netdev_timer(unsigned long data)
963 struct net_device *dev = (struct net_device *)data;
964 struct netdev_private *np = netdev_priv(dev);
965 void __iomem *ioaddr = np->base;
966 int next_tick = 10*HZ;
968 if (netif_msg_timer(np)) {
969 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
971 dev->name, ioread16(ioaddr + IntrEnable),
972 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
975 np->timer.expires = jiffies + next_tick;
976 add_timer(&np->timer);
979 static void tx_timeout(struct net_device *dev)
981 struct netdev_private *np = netdev_priv(dev);
982 void __iomem *ioaddr = np->base;
985 netif_stop_queue(dev);
986 tasklet_disable(&np->tx_tasklet);
987 iowrite16(0, ioaddr + IntrEnable);
988 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
990 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
991 ioread8(ioaddr + TxFrameId));
995 for (i=0; i<TX_RING_SIZE; i++) {
996 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
997 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
998 le32_to_cpu(np->tx_ring[i].next_desc),
999 le32_to_cpu(np->tx_ring[i].status),
1000 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
1001 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1002 le32_to_cpu(np->tx_ring[i].frag[0].length));
1004 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1005 ioread32(np->base + TxListPtr),
1006 netif_queue_stopped(dev));
1007 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1008 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1009 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1010 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1011 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1013 spin_lock_irqsave(&np->lock, flag);
1015 /* Stop and restart the chip's Tx processes . */
1017 spin_unlock_irqrestore(&np->lock, flag);
1021 dev->trans_start = jiffies;
1022 np->stats.tx_errors++;
1023 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1024 netif_wake_queue(dev);
1026 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1027 tasklet_enable(&np->tx_tasklet);
1031 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1032 static void init_ring(struct net_device *dev)
1034 struct netdev_private *np = netdev_priv(dev);
1037 np->cur_rx = np->cur_tx = 0;
1038 np->dirty_rx = np->dirty_tx = 0;
1041 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1043 /* Initialize all Rx descriptors. */
1044 for (i = 0; i < RX_RING_SIZE; i++) {
1045 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1046 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1047 np->rx_ring[i].status = 0;
1048 np->rx_ring[i].frag[0].length = 0;
1049 np->rx_skbuff[i] = NULL;
1052 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1053 for (i = 0; i < RX_RING_SIZE; i++) {
1054 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1055 np->rx_skbuff[i] = skb;
1058 skb->dev = dev; /* Mark as being used by this device. */
1059 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1060 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1061 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1062 PCI_DMA_FROMDEVICE));
1063 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1065 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1067 for (i = 0; i < TX_RING_SIZE; i++) {
1068 np->tx_skbuff[i] = NULL;
1069 np->tx_ring[i].status = 0;
1074 static void tx_poll (unsigned long data)
1076 struct net_device *dev = (struct net_device *)data;
1077 struct netdev_private *np = netdev_priv(dev);
1078 unsigned head = np->cur_task % TX_RING_SIZE;
1079 struct netdev_desc *txdesc =
1080 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1082 /* Chain the next pointer */
1083 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1084 int entry = np->cur_task % TX_RING_SIZE;
1085 txdesc = &np->tx_ring[entry];
1087 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1088 entry*sizeof(struct netdev_desc));
1090 np->last_tx = txdesc;
1092 /* Indicate the latest descriptor of tx ring */
1093 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1095 if (ioread32 (np->base + TxListPtr) == 0)
1096 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1097 np->base + TxListPtr);
1102 start_tx (struct sk_buff *skb, struct net_device *dev)
1104 struct netdev_private *np = netdev_priv(dev);
1105 struct netdev_desc *txdesc;
1108 /* Calculate the next Tx descriptor entry. */
1109 entry = np->cur_tx % TX_RING_SIZE;
1110 np->tx_skbuff[entry] = skb;
1111 txdesc = &np->tx_ring[entry];
1113 txdesc->next_desc = 0;
1114 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1115 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1118 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1120 /* Increment cur_tx before tasklet_schedule() */
1123 /* Schedule a tx_poll() task */
1124 tasklet_schedule(&np->tx_tasklet);
1126 /* On some architectures: explicitly flush cache lines here. */
1127 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1128 && !netif_queue_stopped(dev)) {
1131 netif_stop_queue (dev);
1133 dev->trans_start = jiffies;
1134 if (netif_msg_tx_queued(np)) {
1136 "%s: Transmit frame #%d queued in slot %d.\n",
1137 dev->name, np->cur_tx, entry);
1142 /* Reset hardware tx and free all of tx buffers */
1144 reset_tx (struct net_device *dev)
1146 struct netdev_private *np = netdev_priv(dev);
1147 void __iomem *ioaddr = np->base;
1148 struct sk_buff *skb;
1150 int irq = in_interrupt();
1152 /* Reset tx logic, TxListPtr will be cleaned */
1153 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1154 iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
1155 ioaddr + ASICCtrl + 2);
1156 for (i=50; i > 0; i--) {
1157 if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1161 /* free all tx skbuff */
1162 for (i = 0; i < TX_RING_SIZE; i++) {
1163 skb = np->tx_skbuff[i];
1165 pci_unmap_single(np->pci_dev,
1166 np->tx_ring[i].frag[0].addr, skb->len,
1169 dev_kfree_skb_irq (skb);
1171 dev_kfree_skb (skb);
1172 np->tx_skbuff[i] = NULL;
1173 np->stats.tx_dropped++;
1176 np->cur_tx = np->dirty_tx = 0;
1178 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1182 /* The interrupt handler cleans up after the Tx thread,
1183 and schedule a Rx thread work */
1184 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1186 struct net_device *dev = (struct net_device *)dev_instance;
1187 struct netdev_private *np = netdev_priv(dev);
1188 void __iomem *ioaddr = np->base;
1196 int intr_status = ioread16(ioaddr + IntrStatus);
1197 iowrite16(intr_status, ioaddr + IntrStatus);
1199 if (netif_msg_intr(np))
1200 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1201 dev->name, intr_status);
1203 if (!(intr_status & DEFAULT_INTR))
1208 if (intr_status & (IntrRxDMADone)) {
1209 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1210 ioaddr + IntrEnable);
1212 np->budget = RX_BUDGET;
1213 tasklet_schedule(&np->rx_tasklet);
1215 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1216 tx_status = ioread16 (ioaddr + TxStatus);
1217 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1218 if (netif_msg_tx_done(np))
1220 ("%s: Transmit status is %2.2x.\n",
1221 dev->name, tx_status);
1222 if (tx_status & 0x1e) {
1223 if (netif_msg_tx_err(np))
1224 printk("%s: Transmit error status %4.4x.\n",
1225 dev->name, tx_status);
1226 np->stats.tx_errors++;
1227 if (tx_status & 0x10)
1228 np->stats.tx_fifo_errors++;
1229 if (tx_status & 0x08)
1230 np->stats.collisions++;
1231 if (tx_status & 0x04)
1232 np->stats.tx_fifo_errors++;
1233 if (tx_status & 0x02)
1234 np->stats.tx_window_errors++;
1236 ** This reset has been verified on
1237 ** DFE-580TX boards ! phdm@macqel.be.
1239 if (tx_status & 0x10) { /* TxUnderrun */
1240 unsigned short txthreshold;
1242 txthreshold = ioread16 (ioaddr + TxStartThresh);
1243 /* Restart Tx FIFO and transmitter */
1244 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1245 iowrite16 (txthreshold, ioaddr + TxStartThresh);
1246 /* No need to reset the Tx pointer here */
1248 /* Restart the Tx. */
1249 iowrite16 (TxEnable, ioaddr + MACCtrl1);
1251 /* Yup, this is a documentation bug. It cost me *hours*. */
1252 iowrite16 (0, ioaddr + TxStatus);
1254 iowrite32(5000, ioaddr + DownCounter);
1257 tx_status = ioread16 (ioaddr + TxStatus);
1259 hw_frame_id = (tx_status >> 8) & 0xff;
1261 hw_frame_id = ioread8(ioaddr + TxFrameId);
1264 if (np->pci_rev_id >= 0x14) {
1265 spin_lock(&np->lock);
1266 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1267 int entry = np->dirty_tx % TX_RING_SIZE;
1268 struct sk_buff *skb;
1270 sw_frame_id = (le32_to_cpu(
1271 np->tx_ring[entry].status) >> 2) & 0xff;
1272 if (sw_frame_id == hw_frame_id &&
1273 !(le32_to_cpu(np->tx_ring[entry].status)
1276 if (sw_frame_id == (hw_frame_id + 1) %
1279 skb = np->tx_skbuff[entry];
1280 /* Free the original skb. */
1281 pci_unmap_single(np->pci_dev,
1282 np->tx_ring[entry].frag[0].addr,
1283 skb->len, PCI_DMA_TODEVICE);
1284 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1285 np->tx_skbuff[entry] = NULL;
1286 np->tx_ring[entry].frag[0].addr = 0;
1287 np->tx_ring[entry].frag[0].length = 0;
1289 spin_unlock(&np->lock);
1291 spin_lock(&np->lock);
1292 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1293 int entry = np->dirty_tx % TX_RING_SIZE;
1294 struct sk_buff *skb;
1295 if (!(le32_to_cpu(np->tx_ring[entry].status)
1298 skb = np->tx_skbuff[entry];
1299 /* Free the original skb. */
1300 pci_unmap_single(np->pci_dev,
1301 np->tx_ring[entry].frag[0].addr,
1302 skb->len, PCI_DMA_TODEVICE);
1303 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1304 np->tx_skbuff[entry] = NULL;
1305 np->tx_ring[entry].frag[0].addr = 0;
1306 np->tx_ring[entry].frag[0].length = 0;
1308 spin_unlock(&np->lock);
1311 if (netif_queue_stopped(dev) &&
1312 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1313 /* The ring is no longer full, clear busy flag. */
1314 netif_wake_queue (dev);
1316 /* Abnormal error summary/uncommon events handlers. */
1317 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1318 netdev_error(dev, intr_status);
1320 if (netif_msg_intr(np))
1321 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1322 dev->name, ioread16(ioaddr + IntrStatus));
1323 return IRQ_RETVAL(handled);
1326 static void rx_poll(unsigned long data)
1328 struct net_device *dev = (struct net_device *)data;
1329 struct netdev_private *np = netdev_priv(dev);
1330 int entry = np->cur_rx % RX_RING_SIZE;
1331 int boguscnt = np->budget;
1332 void __iomem *ioaddr = np->base;
1335 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1337 struct netdev_desc *desc = &(np->rx_ring[entry]);
1338 u32 frame_status = le32_to_cpu(desc->status);
1341 if (--boguscnt < 0) {
1344 if (!(frame_status & DescOwn))
1346 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1347 if (netif_msg_rx_status(np))
1348 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1350 if (frame_status & 0x001f4000) {
1351 /* There was a error. */
1352 if (netif_msg_rx_err(np))
1353 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1355 np->stats.rx_errors++;
1356 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1357 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1358 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1359 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1360 if (frame_status & 0x00100000) {
1361 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1363 dev->name, frame_status);
1366 struct sk_buff *skb;
1367 #ifndef final_version
1368 if (netif_msg_rx_status(np))
1369 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1370 ", bogus_cnt %d.\n",
1373 /* Check if the packet is long enough to accept without copying
1374 to a minimally-sized skbuff. */
1375 if (pkt_len < rx_copybreak
1376 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1378 skb_reserve(skb, 2); /* 16 byte align the IP header */
1379 pci_dma_sync_single_for_cpu(np->pci_dev,
1382 PCI_DMA_FROMDEVICE);
1384 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1385 pci_dma_sync_single_for_device(np->pci_dev,
1388 PCI_DMA_FROMDEVICE);
1389 skb_put(skb, pkt_len);
1391 pci_unmap_single(np->pci_dev,
1394 PCI_DMA_FROMDEVICE);
1395 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1396 np->rx_skbuff[entry] = NULL;
1398 skb->protocol = eth_type_trans(skb, dev);
1399 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1401 dev->last_rx = jiffies;
1403 entry = (entry + 1) % RX_RING_SIZE;
1408 np->budget -= received;
1409 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1417 np->budget -= received;
1418 if (np->budget <= 0)
1419 np->budget = RX_BUDGET;
1420 tasklet_schedule(&np->rx_tasklet);
1424 static void refill_rx (struct net_device *dev)
1426 struct netdev_private *np = netdev_priv(dev);
1430 /* Refill the Rx ring buffers. */
1431 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1432 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1433 struct sk_buff *skb;
1434 entry = np->dirty_rx % RX_RING_SIZE;
1435 if (np->rx_skbuff[entry] == NULL) {
1436 skb = dev_alloc_skb(np->rx_buf_sz);
1437 np->rx_skbuff[entry] = skb;
1439 break; /* Better luck next round. */
1440 skb->dev = dev; /* Mark as being used by this device. */
1441 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1442 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1443 pci_map_single(np->pci_dev, skb->data,
1444 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1446 /* Perhaps we need not reset this field. */
1447 np->rx_ring[entry].frag[0].length =
1448 cpu_to_le32(np->rx_buf_sz | LastFrag);
1449 np->rx_ring[entry].status = 0;
1454 static void netdev_error(struct net_device *dev, int intr_status)
1456 struct netdev_private *np = netdev_priv(dev);
1457 void __iomem *ioaddr = np->base;
1458 u16 mii_ctl, mii_advertise, mii_lpa;
1461 if (intr_status & LinkChange) {
1462 if (np->an_enable) {
1463 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1464 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1465 mii_advertise &= mii_lpa;
1466 printk (KERN_INFO "%s: Link changed: ", dev->name);
1467 if (mii_advertise & ADVERTISE_100FULL) {
1469 printk ("100Mbps, full duplex\n");
1470 } else if (mii_advertise & ADVERTISE_100HALF) {
1472 printk ("100Mbps, half duplex\n");
1473 } else if (mii_advertise & ADVERTISE_10FULL) {
1475 printk ("10Mbps, full duplex\n");
1476 } else if (mii_advertise & ADVERTISE_10HALF) {
1478 printk ("10Mbps, half duplex\n");
1483 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1484 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1486 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1488 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1492 if (np->flowctrl && np->mii_if.full_duplex) {
1493 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1494 ioaddr + MulticastFilter1+2);
1495 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1499 if (intr_status & StatsMax) {
1502 if (intr_status & IntrPCIErr) {
1503 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1504 dev->name, intr_status);
1505 /* We must do a global reset of DMA to continue. */
1509 static struct net_device_stats *get_stats(struct net_device *dev)
1511 struct netdev_private *np = netdev_priv(dev);
1512 void __iomem *ioaddr = np->base;
1515 /* We should lock this segment of code for SMP eventually, although
1516 the vulnerability window is very small and statistics are
1518 /* The chip only need report frame silently dropped. */
1519 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1520 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1521 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1522 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1523 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1524 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1525 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1526 ioread8(ioaddr + StatsTxDefer);
1527 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1528 ioread8(ioaddr + i);
1529 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1530 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1531 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1532 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1537 static void set_rx_mode(struct net_device *dev)
1539 struct netdev_private *np = netdev_priv(dev);
1540 void __iomem *ioaddr = np->base;
1541 u16 mc_filter[4]; /* Multicast hash filter */
1545 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1546 /* Unconditionally log net taps. */
1547 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1548 memset(mc_filter, 0xff, sizeof(mc_filter));
1549 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1550 } else if ((dev->mc_count > multicast_filter_limit)
1551 || (dev->flags & IFF_ALLMULTI)) {
1552 /* Too many to match, or accept all multicasts. */
1553 memset(mc_filter, 0xff, sizeof(mc_filter));
1554 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1555 } else if (dev->mc_count) {
1556 struct dev_mc_list *mclist;
1560 memset (mc_filter, 0, sizeof (mc_filter));
1561 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1562 i++, mclist = mclist->next) {
1563 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1564 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1565 if (crc & 0x80000000) index |= 1 << bit;
1566 mc_filter[index/16] |= (1 << (index % 16));
1568 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1570 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1573 if (np->mii_if.full_duplex && np->flowctrl)
1574 mc_filter[3] |= 0x0200;
1576 for (i = 0; i < 4; i++)
1577 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1578 iowrite8(rx_mode, ioaddr + RxMode);
1581 static int __set_mac_addr(struct net_device *dev)
1583 struct netdev_private *np = netdev_priv(dev);
1586 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1587 iowrite16(addr16, np->base + StationAddr);
1588 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1589 iowrite16(addr16, np->base + StationAddr+2);
1590 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1591 iowrite16(addr16, np->base + StationAddr+4);
1595 static int check_if_running(struct net_device *dev)
1597 if (!netif_running(dev))
1602 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1604 struct netdev_private *np = netdev_priv(dev);
1605 strcpy(info->driver, DRV_NAME);
1606 strcpy(info->version, DRV_VERSION);
1607 strcpy(info->bus_info, pci_name(np->pci_dev));
1610 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1612 struct netdev_private *np = netdev_priv(dev);
1613 spin_lock_irq(&np->lock);
1614 mii_ethtool_gset(&np->mii_if, ecmd);
1615 spin_unlock_irq(&np->lock);
1619 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1621 struct netdev_private *np = netdev_priv(dev);
1623 spin_lock_irq(&np->lock);
1624 res = mii_ethtool_sset(&np->mii_if, ecmd);
1625 spin_unlock_irq(&np->lock);
1629 static int nway_reset(struct net_device *dev)
1631 struct netdev_private *np = netdev_priv(dev);
1632 return mii_nway_restart(&np->mii_if);
1635 static u32 get_link(struct net_device *dev)
1637 struct netdev_private *np = netdev_priv(dev);
1638 return mii_link_ok(&np->mii_if);
1641 static u32 get_msglevel(struct net_device *dev)
1643 struct netdev_private *np = netdev_priv(dev);
1644 return np->msg_enable;
1647 static void set_msglevel(struct net_device *dev, u32 val)
1649 struct netdev_private *np = netdev_priv(dev);
1650 np->msg_enable = val;
1653 static struct ethtool_ops ethtool_ops = {
1654 .begin = check_if_running,
1655 .get_drvinfo = get_drvinfo,
1656 .get_settings = get_settings,
1657 .set_settings = set_settings,
1658 .nway_reset = nway_reset,
1659 .get_link = get_link,
1660 .get_msglevel = get_msglevel,
1661 .set_msglevel = set_msglevel,
1662 .get_perm_addr = ethtool_op_get_perm_addr,
1665 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1667 struct netdev_private *np = netdev_priv(dev);
1668 void __iomem *ioaddr = np->base;
1672 if (!netif_running(dev))
1675 spin_lock_irq(&np->lock);
1676 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1677 spin_unlock_irq(&np->lock);
1679 case SIOCDEVPRIVATE:
1680 for (i=0; i<TX_RING_SIZE; i++) {
1681 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1682 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1683 le32_to_cpu(np->tx_ring[i].next_desc),
1684 le32_to_cpu(np->tx_ring[i].status),
1685 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1687 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1688 le32_to_cpu(np->tx_ring[i].frag[0].length));
1690 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1691 ioread32(np->base + TxListPtr),
1692 netif_queue_stopped(dev));
1693 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1694 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1695 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1696 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1697 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1698 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1706 static int netdev_close(struct net_device *dev)
1708 struct netdev_private *np = netdev_priv(dev);
1709 void __iomem *ioaddr = np->base;
1710 struct sk_buff *skb;
1713 netif_stop_queue(dev);
1715 if (netif_msg_ifdown(np)) {
1716 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1717 "Rx %4.4x Int %2.2x.\n",
1718 dev->name, ioread8(ioaddr + TxStatus),
1719 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1720 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1721 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1724 /* Disable interrupts by clearing the interrupt mask. */
1725 iowrite16(0x0000, ioaddr + IntrEnable);
1727 /* Stop the chip's Tx and Rx processes. */
1728 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1730 /* Wait and kill tasklet */
1731 tasklet_kill(&np->rx_tasklet);
1732 tasklet_kill(&np->tx_tasklet);
1735 if (netif_msg_hw(np)) {
1736 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1737 (int)(np->tx_ring_dma));
1738 for (i = 0; i < TX_RING_SIZE; i++)
1739 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1740 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1741 np->tx_ring[i].frag[0].length);
1742 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1743 (int)(np->rx_ring_dma));
1744 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1745 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1746 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1747 np->rx_ring[i].frag[0].length);
1750 #endif /* __i386__ debugging only */
1752 free_irq(dev->irq, dev);
1754 del_timer_sync(&np->timer);
1756 /* Free all the skbuffs in the Rx queue. */
1757 for (i = 0; i < RX_RING_SIZE; i++) {
1758 np->rx_ring[i].status = 0;
1759 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1760 skb = np->rx_skbuff[i];
1762 pci_unmap_single(np->pci_dev,
1763 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1764 PCI_DMA_FROMDEVICE);
1766 np->rx_skbuff[i] = NULL;
1769 for (i = 0; i < TX_RING_SIZE; i++) {
1770 skb = np->tx_skbuff[i];
1772 pci_unmap_single(np->pci_dev,
1773 np->tx_ring[i].frag[0].addr, skb->len,
1776 np->tx_skbuff[i] = NULL;
1783 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1785 struct net_device *dev = pci_get_drvdata(pdev);
1788 struct netdev_private *np = netdev_priv(dev);
1790 unregister_netdev(dev);
1791 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1793 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1795 pci_iounmap(pdev, np->base);
1796 pci_release_regions(pdev);
1798 pci_set_drvdata(pdev, NULL);
1802 static struct pci_driver sundance_driver = {
1804 .id_table = sundance_pci_tbl,
1805 .probe = sundance_probe1,
1806 .remove = __devexit_p(sundance_remove1),
1809 static int __init sundance_init(void)
1811 /* when a module, this is printed whether or not devices are found in probe */
1815 return pci_module_init(&sundance_driver);
1818 static void __exit sundance_exit(void)
1820 pci_unregister_driver(&sundance_driver);
1823 module_init(sundance_init);
1824 module_exit(sundance_exit);