1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
3 Written/copyright 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
23 ---------------------------------------------------------------------
25 Linux kernel-specific changes:
28 * Merge becker version 1.09 (4/08/2000)
31 * Major bugfix to 1.09 driver (Francis Romieu)
34 * Merge becker test version 1.09 (5/29/2000)
37 * Fix locking (jgarzik)
38 * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
41 * Merge becker version 1.11
42 * Move pci_enable_device before any PCI BAR len checks
48 * ethtool driver info support (jgarzik)
51 * ethtool media get/set support (jgarzik)
54 * revert MII transceiver init change (jgarzik)
57 * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
58 * replace some MII-related magic numbers with constants
61 * fix power-up sequence
64 * revert version 1.1.12, power-up sequence "fix"
66 LK1.1.14 (Kryzsztof Halasa):
67 * fix spurious bad initializations
68 * pound phy a la SMSC's app note on the subject
71 * fix power up/down for ethtool that broke in 1.11
75 #define DRV_NAME "epic100"
76 #define DRV_VERSION "1.11+LK1.1.14+AC1.1.14"
77 #define DRV_RELDATE "June 2, 2004"
79 /* The user-configurable values.
80 These may be modified when a driver module is loaded.*/
82 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
83 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
84 static int max_interrupt_work = 32;
86 /* Used to pass the full-duplex flag, etc. */
87 #define MAX_UNITS 8 /* More are supported, limit only on options */
88 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
89 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
91 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
92 Setting to > 1518 effectively disables this feature. */
93 static int rx_copybreak;
95 /* Operational parameters that are set at compile time. */
97 /* Keep the ring sizes a power of two for operational efficiency.
98 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
99 Making the Tx ring too large decreases the effectiveness of channel
100 bonding and packet priority.
101 There are no ill effects from too-large receive rings. */
102 #define TX_RING_SIZE 16
103 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
104 #define RX_RING_SIZE 32
105 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
106 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
112 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
114 /* Bytes transferred to chip before transmission starts. */
115 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
116 #define TX_FIFO_THRESH 256
117 #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
119 #if !defined(__OPTIMIZE__)
120 #warning You must compile this file with the correct options!
121 #warning See the last lines of the source file.
122 #error You must compile this driver with "-O".
125 #include <linux/config.h>
126 #include <linux/module.h>
127 #include <linux/kernel.h>
128 #include <linux/string.h>
129 #include <linux/timer.h>
130 #include <linux/errno.h>
131 #include <linux/ioport.h>
132 #include <linux/slab.h>
133 #include <linux/interrupt.h>
134 #include <linux/pci.h>
135 #include <linux/delay.h>
136 #include <linux/netdevice.h>
137 #include <linux/etherdevice.h>
138 #include <linux/skbuff.h>
139 #include <linux/init.h>
140 #include <linux/spinlock.h>
141 #include <linux/ethtool.h>
142 #include <linux/mii.h>
143 #include <linux/crc32.h>
144 #include <asm/bitops.h>
146 #include <asm/uaccess.h>
148 /* These identify the driver base version and may not be removed. */
149 static char version[] __devinitdata =
150 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
151 static char version2[] __devinitdata =
152 " http://www.scyld.com/network/epic100.html\n";
153 static char version3[] __devinitdata =
154 " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
156 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
157 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
158 MODULE_LICENSE("GPL");
160 MODULE_PARM(debug, "i");
161 MODULE_PARM(max_interrupt_work, "i");
162 MODULE_PARM(rx_copybreak, "i");
163 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
164 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
165 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
166 MODULE_PARM_DESC(max_interrupt_work, "EPIC/100 maximum events handled per interrupt");
167 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
168 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
169 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
174 I. Board Compatibility
176 This device driver is designed for the SMC "EPIC/100", the SMC
177 single-chip Ethernet controllers for PCI. This chip is used on
178 the SMC EtherPower II boards.
180 II. Board-specific settings
182 PCI bus devices are configured by the system at boot time, so no jumpers
183 need to be set on the board. The system BIOS will assign the
184 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
185 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
188 III. Driver operation
194 http://www.smsc.com/main/datasheets/83c171.pdf
195 http://www.smsc.com/main/datasheets/83c175.pdf
196 http://scyld.com/expert/NWay.html
197 http://www.national.com/pf/DP/DP83840A.html
204 enum pci_id_flags_bits {
205 /* Set PCI command register bits before calling probe1(). */
206 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
207 /* Read and map the single following PCI BAR. */
208 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
209 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
212 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
214 #define EPIC_TOTAL_SIZE 0x100
217 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
219 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
229 struct epic_chip_info {
231 enum pci_id_flags_bits pci_flags;
232 int io_size; /* Needed for I/O region check or ioremap(). */
233 int drv_flags; /* Driver use, intended as capability flags. */
237 /* indexed by chip_t */
238 static struct epic_chip_info pci_id_tbl[] = {
239 { "SMSC EPIC/100 83c170",
240 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
241 { "SMSC EPIC/100 83c170",
242 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR },
243 { "SMSC EPIC/C 83c175",
244 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
248 static struct pci_device_id epic_pci_tbl[] = {
249 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
250 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
251 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
252 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
255 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
273 /* Offsets to registers, using the (ugh) SMC names. */
274 enum epic_registers {
275 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
277 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
278 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
279 LAN0=64, /* MAC address. */
280 MC0=80, /* Multicast filter table. */
281 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
282 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
285 /* Interrupt register bits, using my own meaningful names. */
287 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
288 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
289 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
290 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
291 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
294 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
295 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
298 static u16 media2miictl[16] = {
299 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
300 0, 0, 0, 0, 0, 0, 0, 0 };
302 /* The EPIC100 Rx and Tx buffer descriptors. */
304 struct epic_tx_desc {
311 struct epic_rx_desc {
318 enum desc_status_bits {
322 #define PRIV_ALIGN 15 /* Required alignment mask */
323 struct epic_private {
324 struct epic_rx_desc *rx_ring;
325 struct epic_tx_desc *tx_ring;
326 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
327 struct sk_buff* tx_skbuff[TX_RING_SIZE];
328 /* The addresses of receive-in-place skbuffs. */
329 struct sk_buff* rx_skbuff[RX_RING_SIZE];
331 dma_addr_t tx_ring_dma;
332 dma_addr_t rx_ring_dma;
335 spinlock_t lock; /* Group with Tx control cache line. */
336 unsigned int cur_tx, dirty_tx;
338 unsigned int cur_rx, dirty_rx;
339 unsigned int rx_buf_sz; /* Based on MTU+slack. */
341 struct pci_dev *pci_dev; /* PCI bus location. */
342 int chip_id, chip_flags;
344 struct net_device_stats stats;
345 struct timer_list timer; /* Media selection timer. */
347 unsigned char mc_filter[8];
348 signed char phys[4]; /* MII device addresses. */
349 u16 advertising; /* NWay media advertisement */
351 struct mii_if_info mii;
352 unsigned int tx_full:1; /* The Tx queue is full. */
353 unsigned int default_port:4; /* Last dev->if_port value. */
356 static int epic_open(struct net_device *dev);
357 static int read_eeprom(long ioaddr, int location);
358 static int mdio_read(struct net_device *dev, int phy_id, int location);
359 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
360 static void epic_restart(struct net_device *dev);
361 static void epic_timer(unsigned long data);
362 static void epic_tx_timeout(struct net_device *dev);
363 static void epic_init_ring(struct net_device *dev);
364 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
365 static int epic_rx(struct net_device *dev);
366 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
367 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
368 static struct ethtool_ops netdev_ethtool_ops;
369 static int epic_close(struct net_device *dev);
370 static struct net_device_stats *epic_get_stats(struct net_device *dev);
371 static void set_rx_mode(struct net_device *dev);
375 static int __devinit epic_init_one (struct pci_dev *pdev,
376 const struct pci_device_id *ent)
378 static int card_idx = -1;
380 int chip_idx = (int) ent->driver_data;
382 struct net_device *dev;
383 struct epic_private *ep;
384 int i, option = 0, duplex = 0;
388 /* when built into the kernel, we only print version if device is found */
390 static int printed_version;
391 if (!printed_version++)
392 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
393 version, version2, version3);
398 i = pci_enable_device(pdev);
403 if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
404 printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
408 pci_set_master(pdev);
410 dev = alloc_etherdev(sizeof (*ep));
412 printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
415 SET_MODULE_OWNER(dev);
416 SET_NETDEV_DEV(dev, &pdev->dev);
418 if (pci_request_regions(pdev, DRV_NAME))
419 goto err_out_free_netdev;
422 ioaddr = pci_resource_start (pdev, 0);
424 ioaddr = pci_resource_start (pdev, 1);
425 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
427 printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
428 goto err_out_free_res;
432 pci_set_drvdata(pdev, dev);
435 ep->mii.mdio_read = mdio_read;
436 ep->mii.mdio_write = mdio_write;
437 ep->mii.phy_id_mask = 0x1f;
438 ep->mii.reg_num_mask = 0x1f;
440 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
442 goto err_out_iounmap;
443 ep->tx_ring = (struct epic_tx_desc *)ring_space;
444 ep->tx_ring_dma = ring_dma;
446 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
448 goto err_out_unmap_tx;
449 ep->rx_ring = (struct epic_rx_desc *)ring_space;
450 ep->rx_ring_dma = ring_dma;
452 if (dev->mem_start) {
453 option = dev->mem_start;
454 duplex = (dev->mem_start & 16) ? 1 : 0;
455 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
456 if (options[card_idx] >= 0)
457 option = options[card_idx];
458 if (full_duplex[card_idx] >= 0)
459 duplex = full_duplex[card_idx];
462 dev->base_addr = ioaddr;
465 spin_lock_init (&ep->lock);
467 /* Bring the chip out of low-power mode. */
468 outl(0x4200, ioaddr + GENCTL);
469 /* Magic?! If we don't set this bit the MII interface won't work. */
470 /* This magic is documented in SMSC app note 7.15 */
471 for (i = 16; i > 0; i--)
472 outl(0x0008, ioaddr + TEST1);
474 /* Turn on the MII transceiver. */
475 outl(0x12, ioaddr + MIICfg);
477 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
478 outl(0x0200, ioaddr + GENCTL);
480 /* Note: the '175 does not have a serial EEPROM. */
481 for (i = 0; i < 3; i++)
482 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
485 printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n",
487 for (i = 0; i < 64; i++)
488 printk(" %4.4x%s", read_eeprom(ioaddr, i),
489 i % 16 == 15 ? "\n" : "");
493 ep->chip_id = chip_idx;
494 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
496 /* Find the connected MII xcvrs.
497 Doing this in open() would allow detecting external xcvrs later, but
498 takes much time and no cards have external MII. */
500 int phy, phy_idx = 0;
501 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
502 int mii_status = mdio_read(dev, phy, MII_BMSR);
503 if (mii_status != 0xffff && mii_status != 0x0000) {
504 ep->phys[phy_idx++] = phy;
505 printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control "
506 "%4.4x status %4.4x.\n",
507 pci_name(pdev), phy, mdio_read(dev, phy, 0), mii_status);
510 ep->mii_phy_cnt = phy_idx;
513 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
514 printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link "
516 pci_name(pdev), ep->mii.advertising, mdio_read(dev, phy, 5));
517 } else if ( ! (ep->chip_flags & NO_MII)) {
518 printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n",
520 /* Use the known PHY address of the EPII. */
523 ep->mii.phy_id = ep->phys[0];
526 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
527 if (ep->chip_flags & MII_PWRDWN)
528 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
529 outl(0x0008, ioaddr + GENCTL);
531 /* The lower four bits are the media type. */
533 ep->mii.force_media = ep->mii.full_duplex = 1;
534 printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n",
537 dev->if_port = ep->default_port = option;
539 /* The Epic-specific entries in the device structure. */
540 dev->open = &epic_open;
541 dev->hard_start_xmit = &epic_start_xmit;
542 dev->stop = &epic_close;
543 dev->get_stats = &epic_get_stats;
544 dev->set_multicast_list = &set_rx_mode;
545 dev->do_ioctl = &netdev_ioctl;
546 dev->ethtool_ops = &netdev_ethtool_ops;
547 dev->watchdog_timeo = TX_TIMEOUT;
548 dev->tx_timeout = &epic_tx_timeout;
550 i = register_netdev(dev);
552 goto err_out_unmap_tx;
554 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
555 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
556 for (i = 0; i < 5; i++)
557 printk("%2.2x:", dev->dev_addr[i]);
558 printk("%2.2x.\n", dev->dev_addr[i]);
563 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
569 pci_release_regions(pdev);
575 /* Serial EEPROM section. */
577 /* EEPROM_Ctrl bits. */
578 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
579 #define EE_CS 0x02 /* EEPROM chip select. */
580 #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
581 #define EE_WRITE_0 0x01
582 #define EE_WRITE_1 0x09
583 #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
584 #define EE_ENB (0x0001 | EE_CS)
586 /* Delay between EEPROM clock transitions.
587 This serves to flush the operation to the PCI bus.
590 #define eeprom_delay() inl(ee_addr)
592 /* The EEPROM commands include the alway-set leading bit. */
593 #define EE_WRITE_CMD (5 << 6)
594 #define EE_READ64_CMD (6 << 6)
595 #define EE_READ256_CMD (6 << 8)
596 #define EE_ERASE_CMD (7 << 6)
598 static int __devinit read_eeprom(long ioaddr, int location)
602 long ee_addr = ioaddr + EECTL;
603 int read_cmd = location |
604 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
606 outl(EE_ENB & ~EE_CS, ee_addr);
607 outl(EE_ENB, ee_addr);
609 /* Shift the read command bits out. */
610 for (i = 12; i >= 0; i--) {
611 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
612 outl(EE_ENB | dataval, ee_addr);
614 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
617 outl(EE_ENB, ee_addr);
619 for (i = 16; i > 0; i--) {
620 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
622 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
623 outl(EE_ENB, ee_addr);
627 /* Terminate the EEPROM access. */
628 outl(EE_ENB & ~EE_CS, ee_addr);
633 #define MII_WRITEOP 2
634 static int mdio_read(struct net_device *dev, int phy_id, int location)
636 long ioaddr = dev->base_addr;
637 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
640 outl(read_cmd, ioaddr + MIICtrl);
641 /* Typical operation takes 25 loops. */
642 for (i = 400; i > 0; i--) {
644 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
645 /* Work around read failure bug. */
646 if (phy_id == 1 && location < 6
647 && inw(ioaddr + MIIData) == 0xffff) {
648 outl(read_cmd, ioaddr + MIICtrl);
651 return inw(ioaddr + MIIData);
657 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
659 long ioaddr = dev->base_addr;
662 outw(value, ioaddr + MIIData);
663 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
664 for (i = 10000; i > 0; i--) {
666 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
673 static int epic_open(struct net_device *dev)
675 struct epic_private *ep = dev->priv;
676 long ioaddr = dev->base_addr;
680 /* Soft reset the chip. */
681 outl(0x4001, ioaddr + GENCTL);
683 if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)))
688 outl(0x4000, ioaddr + GENCTL);
689 /* This magic is documented in SMSC app note 7.15 */
690 for (i = 16; i > 0; i--)
691 outl(0x0008, ioaddr + TEST1);
693 /* Pull the chip out of low-power mode, enable interrupts, and set for
694 PCI read multiple. The MIIcfg setting and strange write order are
695 required by the details of which bits are reset and the transceiver
696 wiring on the Ositech CardBus card.
699 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
701 if (ep->chip_flags & MII_PWRDWN)
702 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
704 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
705 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
706 inl(ioaddr + GENCTL);
707 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
709 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
710 inl(ioaddr + GENCTL);
711 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
714 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
716 for (i = 0; i < 3; i++)
717 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
719 ep->tx_threshold = TX_FIFO_THRESH;
720 outl(ep->tx_threshold, ioaddr + TxThresh);
722 if (media2miictl[dev->if_port & 15]) {
724 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
725 if (dev->if_port == 1) {
727 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
729 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
732 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
733 if (mii_lpa != 0xffff) {
734 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
735 ep->mii.full_duplex = 1;
736 else if (! (mii_lpa & LPA_LPACK))
737 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
739 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
740 " register read of %4.4x.\n", dev->name,
741 ep->mii.full_duplex ? "full" : "half",
742 ep->phys[0], mii_lpa);
746 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
747 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
748 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
750 /* Start the chip's Rx process. */
752 outl(StartRx | RxQueued, ioaddr + COMMAND);
754 netif_start_queue(dev);
756 /* Enable interrupts by setting the interrupt mask. */
757 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
758 | CntFull | TxUnderrun | TxDone | TxEmpty
759 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
763 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
765 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
766 ep->mii.full_duplex ? "full" : "half");
768 /* Set the timer to switch to check for link beat and perhaps switch
769 to an alternate media type. */
770 init_timer(&ep->timer);
771 ep->timer.expires = jiffies + 3*HZ;
772 ep->timer.data = (unsigned long)dev;
773 ep->timer.function = &epic_timer; /* timer handler */
774 add_timer(&ep->timer);
779 /* Reset the chip to recover from a PCI transaction error.
780 This may occur at interrupt time. */
781 static void epic_pause(struct net_device *dev)
783 long ioaddr = dev->base_addr;
784 struct epic_private *ep = dev->priv;
786 netif_stop_queue (dev);
788 /* Disable interrupts by clearing the interrupt mask. */
789 outl(0x00000000, ioaddr + INTMASK);
790 /* Stop the chip's Tx and Rx DMA processes. */
791 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
793 /* Update the error counts. */
794 if (inw(ioaddr + COMMAND) != 0xffff) {
795 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
796 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
797 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
800 /* Remove the packets on the Rx queue. */
804 static void epic_restart(struct net_device *dev)
806 long ioaddr = dev->base_addr;
807 struct epic_private *ep = dev->priv;
810 /* Soft reset the chip. */
811 outl(0x4001, ioaddr + GENCTL);
813 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
814 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
817 /* This magic is documented in SMSC app note 7.15 */
818 for (i = 16; i > 0; i--)
819 outl(0x0008, ioaddr + TEST1);
821 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
822 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
824 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
826 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
827 if (ep->chip_flags & MII_PWRDWN)
828 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
830 for (i = 0; i < 3; i++)
831 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
833 ep->tx_threshold = TX_FIFO_THRESH;
834 outl(ep->tx_threshold, ioaddr + TxThresh);
835 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
836 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
837 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
838 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
839 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
841 /* Start the chip's Rx process. */
843 outl(StartRx | RxQueued, ioaddr + COMMAND);
845 /* Enable interrupts by setting the interrupt mask. */
846 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
847 | CntFull | TxUnderrun | TxDone | TxEmpty
848 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
850 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
851 " interrupt %4.4x.\n",
852 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
853 (int)inl(ioaddr + INTSTAT));
857 static void check_media(struct net_device *dev)
859 struct epic_private *ep = dev->priv;
860 long ioaddr = dev->base_addr;
861 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
862 int negotiated = mii_lpa & ep->mii.advertising;
863 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
865 if (ep->mii.force_media)
867 if (mii_lpa == 0xffff) /* Bogus read */
869 if (ep->mii.full_duplex != duplex) {
870 ep->mii.full_duplex = duplex;
871 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
872 " partner capability of %4.4x.\n", dev->name,
873 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
874 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
878 static void epic_timer(unsigned long data)
880 struct net_device *dev = (struct net_device *)data;
881 struct epic_private *ep = dev->priv;
882 long ioaddr = dev->base_addr;
883 int next_tick = 5*HZ;
886 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
887 dev->name, (int)inl(ioaddr + TxSTAT));
888 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
889 "IntStatus %4.4x RxStatus %4.4x.\n",
890 dev->name, (int)inl(ioaddr + INTMASK),
891 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
896 ep->timer.expires = jiffies + next_tick;
897 add_timer(&ep->timer);
900 static void epic_tx_timeout(struct net_device *dev)
902 struct epic_private *ep = dev->priv;
903 long ioaddr = dev->base_addr;
906 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
907 "Tx status %4.4x.\n",
908 dev->name, (int)inw(ioaddr + TxSTAT));
910 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
911 dev->name, ep->dirty_tx, ep->cur_tx);
914 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
915 ep->stats.tx_fifo_errors++;
916 outl(RestartTx, ioaddr + COMMAND);
919 outl(TxQueued, dev->base_addr + COMMAND);
922 dev->trans_start = jiffies;
923 ep->stats.tx_errors++;
925 netif_wake_queue(dev);
928 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
929 static void epic_init_ring(struct net_device *dev)
931 struct epic_private *ep = dev->priv;
935 ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
936 ep->dirty_tx = ep->cur_tx = 0;
937 ep->cur_rx = ep->dirty_rx = 0;
938 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
940 /* Initialize all Rx descriptors. */
941 for (i = 0; i < RX_RING_SIZE; i++) {
942 ep->rx_ring[i].rxstatus = 0;
943 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
944 ep->rx_ring[i].next = ep->rx_ring_dma +
945 (i+1)*sizeof(struct epic_rx_desc);
946 ep->rx_skbuff[i] = 0;
948 /* Mark the last entry as wrapping the ring. */
949 ep->rx_ring[i-1].next = ep->rx_ring_dma;
951 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
952 for (i = 0; i < RX_RING_SIZE; i++) {
953 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
954 ep->rx_skbuff[i] = skb;
957 skb->dev = dev; /* Mark as being used by this device. */
958 skb_reserve(skb, 2); /* 16 byte align the IP header. */
959 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
960 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
961 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
963 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
965 /* The Tx buffer descriptor is filled in as needed, but we
966 do need to clear the ownership bit. */
967 for (i = 0; i < TX_RING_SIZE; i++) {
968 ep->tx_skbuff[i] = 0;
969 ep->tx_ring[i].txstatus = 0x0000;
970 ep->tx_ring[i].next = ep->tx_ring_dma +
971 (i+1)*sizeof(struct epic_tx_desc);
973 ep->tx_ring[i-1].next = ep->tx_ring_dma;
977 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
979 struct epic_private *ep = dev->priv;
980 int entry, free_count;
984 if (skb->len < ETH_ZLEN) {
985 skb = skb_padto(skb, ETH_ZLEN);
990 /* Caution: the write order is important here, set the field with the
991 "ownership" bit last. */
993 /* Calculate the next Tx descriptor entry. */
994 spin_lock_irqsave(&ep->lock, flags);
995 free_count = ep->cur_tx - ep->dirty_tx;
996 entry = ep->cur_tx % TX_RING_SIZE;
998 ep->tx_skbuff[entry] = skb;
999 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
1000 skb->len, PCI_DMA_TODEVICE);
1001 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
1002 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
1003 } else if (free_count == TX_QUEUE_LEN/2) {
1004 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1005 } else if (free_count < TX_QUEUE_LEN - 1) {
1006 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
1008 /* Leave room for an additional entry. */
1009 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1012 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
1013 ep->tx_ring[entry].txstatus =
1014 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
1015 | cpu_to_le32(DescOwn);
1019 netif_stop_queue(dev);
1021 spin_unlock_irqrestore(&ep->lock, flags);
1022 /* Trigger an immediate transmit demand. */
1023 outl(TxQueued, dev->base_addr + COMMAND);
1025 dev->trans_start = jiffies;
1027 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1028 "flag %2.2x Tx status %8.8x.\n",
1029 dev->name, (int)skb->len, entry, ctrl_word,
1030 (int)inl(dev->base_addr + TxSTAT));
1035 /* The interrupt handler does all of the Rx thread work and cleans up
1036 after the Tx thread. */
1037 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1039 struct net_device *dev = dev_instance;
1040 struct epic_private *ep = dev->priv;
1041 long ioaddr = dev->base_addr;
1042 int status, boguscnt = max_interrupt_work;
1043 unsigned int handled = 0;
1046 status = inl(ioaddr + INTSTAT);
1047 /* Acknowledge all of the current interrupt sources ASAP. */
1048 outl(status & 0x00007fff, ioaddr + INTSTAT);
1051 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1052 "intstat=%#8.8x.\n",
1053 dev->name, status, (int)inl(ioaddr + INTSTAT));
1055 if ((status & IntrSummary) == 0)
1059 if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
1062 if (status & (TxEmpty | TxDone)) {
1063 unsigned int dirty_tx, cur_tx;
1065 /* Note: if this lock becomes a problem we can narrow the locked
1066 region at the cost of occasionally grabbing the lock more
1068 spin_lock(&ep->lock);
1069 cur_tx = ep->cur_tx;
1070 dirty_tx = ep->dirty_tx;
1071 for (; cur_tx - dirty_tx > 0; dirty_tx++) {
1072 struct sk_buff *skb;
1073 int entry = dirty_tx % TX_RING_SIZE;
1074 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1076 if (txstatus & DescOwn)
1077 break; /* It still hasn't been Txed */
1079 if ( ! (txstatus & 0x0001)) {
1080 /* There was an major error, log it. */
1081 #ifndef final_version
1083 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1084 dev->name, txstatus);
1086 ep->stats.tx_errors++;
1087 if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
1088 if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
1089 if (txstatus & 0x0040) ep->stats.tx_window_errors++;
1090 if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
1092 ep->stats.collisions += (txstatus >> 8) & 15;
1093 ep->stats.tx_packets++;
1094 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1097 /* Free the original skb. */
1098 skb = ep->tx_skbuff[entry];
1099 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1100 skb->len, PCI_DMA_TODEVICE);
1101 dev_kfree_skb_irq(skb);
1102 ep->tx_skbuff[entry] = 0;
1105 #ifndef final_version
1106 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1107 printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1108 dev->name, dirty_tx, cur_tx, ep->tx_full);
1109 dirty_tx += TX_RING_SIZE;
1112 ep->dirty_tx = dirty_tx;
1114 && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1115 /* The ring is no longer full, allow new TX entries. */
1117 spin_unlock(&ep->lock);
1118 netif_wake_queue(dev);
1120 spin_unlock(&ep->lock);
1123 /* Check uncommon events all at once. */
1124 if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
1125 PCIBusErr170 | PCIBusErr175)) {
1126 if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
1128 /* Always update the error counts to avoid overhead later. */
1129 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1130 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1131 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1133 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1134 ep->stats.tx_fifo_errors++;
1135 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1136 /* Restart the transmit process. */
1137 outl(RestartTx, ioaddr + COMMAND);
1139 if (status & RxOverflow) { /* Missed a Rx frame. */
1140 ep->stats.rx_errors++;
1142 if (status & (RxOverflow | RxFull))
1143 outw(RxQueued, ioaddr + COMMAND);
1144 if (status & PCIBusErr170) {
1145 printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n",
1150 /* Clear all error sources. */
1151 outl(status & 0x7f18, ioaddr + INTSTAT);
1153 if (--boguscnt < 0) {
1154 printk(KERN_ERR "%s: Too much work at interrupt, "
1155 "IntrStatus=0x%8.8x.\n",
1157 /* Clear all interrupt sources. */
1158 outl(0x0001ffff, ioaddr + INTSTAT);
1164 printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n",
1167 return IRQ_RETVAL(handled);
1170 static int epic_rx(struct net_device *dev)
1172 struct epic_private *ep = dev->priv;
1173 int entry = ep->cur_rx % RX_RING_SIZE;
1174 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1178 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1179 ep->rx_ring[entry].rxstatus);
1180 /* If we own the next entry, it's a new packet. Send it up. */
1181 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1182 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1185 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1186 if (--rx_work_limit < 0)
1188 if (status & 0x2006) {
1190 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1192 if (status & 0x2000) {
1193 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1194 "multiple buffers, status %4.4x!\n", dev->name, status);
1195 ep->stats.rx_length_errors++;
1196 } else if (status & 0x0006)
1197 /* Rx Frame errors are counted in hardware. */
1198 ep->stats.rx_errors++;
1200 /* Malloc up new buffer, compatible with net-2e. */
1201 /* Omit the four octet CRC from the length. */
1202 short pkt_len = (status >> 16) - 4;
1203 struct sk_buff *skb;
1205 if (pkt_len > PKT_BUF_SZ - 4) {
1206 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1208 dev->name, status, pkt_len);
1211 /* Check if the packet is long enough to accept without copying
1212 to a minimally-sized skbuff. */
1213 if (pkt_len < rx_copybreak
1214 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1216 skb_reserve(skb, 2); /* 16 byte align the IP header */
1217 pci_dma_sync_single_for_cpu(ep->pci_dev,
1218 ep->rx_ring[entry].bufaddr,
1220 PCI_DMA_FROMDEVICE);
1221 #if 1 /* HAS_IP_COPYSUM */
1222 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
1223 skb_put(skb, pkt_len);
1225 memcpy(skb_put(skb, pkt_len), ep->rx_skbuff[entry]->tail,
1228 pci_dma_sync_single_for_device(ep->pci_dev,
1229 ep->rx_ring[entry].bufaddr,
1231 PCI_DMA_FROMDEVICE);
1233 pci_unmap_single(ep->pci_dev,
1234 ep->rx_ring[entry].bufaddr,
1235 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1236 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1237 ep->rx_skbuff[entry] = NULL;
1239 skb->protocol = eth_type_trans(skb, dev);
1241 dev->last_rx = jiffies;
1242 ep->stats.rx_packets++;
1243 ep->stats.rx_bytes += pkt_len;
1246 entry = (++ep->cur_rx) % RX_RING_SIZE;
1249 /* Refill the Rx ring buffers. */
1250 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1251 entry = ep->dirty_rx % RX_RING_SIZE;
1252 if (ep->rx_skbuff[entry] == NULL) {
1253 struct sk_buff *skb;
1254 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1257 skb->dev = dev; /* Mark as being used by this device. */
1258 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1259 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1260 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1263 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1268 static int epic_close(struct net_device *dev)
1270 long ioaddr = dev->base_addr;
1271 struct epic_private *ep = dev->priv;
1272 struct sk_buff *skb;
1275 netif_stop_queue(dev);
1278 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1279 dev->name, (int)inl(ioaddr + INTSTAT));
1281 del_timer_sync(&ep->timer);
1283 free_irq(dev->irq, dev);
1285 /* Free all the skbuffs in the Rx queue. */
1286 for (i = 0; i < RX_RING_SIZE; i++) {
1287 skb = ep->rx_skbuff[i];
1288 ep->rx_skbuff[i] = 0;
1289 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1290 ep->rx_ring[i].buflength = 0;
1292 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1293 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1296 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1298 for (i = 0; i < TX_RING_SIZE; i++) {
1299 skb = ep->tx_skbuff[i];
1300 ep->tx_skbuff[i] = 0;
1303 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1304 skb->len, PCI_DMA_TODEVICE);
1308 /* Green! Leave the chip in low-power mode. */
1309 outl(0x0008, ioaddr + GENCTL);
1314 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1316 struct epic_private *ep = dev->priv;
1317 long ioaddr = dev->base_addr;
1319 if (netif_running(dev)) {
1320 /* Update the error counts. */
1321 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1322 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1323 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1329 /* Set or clear the multicast filter for this adaptor.
1330 Note that we only use exclusion around actually queueing the
1331 new frame, not around filling ep->setup_frame. This is non-deterministic
1332 when re-entered but still correct. */
1334 static void set_rx_mode(struct net_device *dev)
1336 long ioaddr = dev->base_addr;
1337 struct epic_private *ep = dev->priv;
1338 unsigned char mc_filter[8]; /* Multicast hash filter */
1341 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1342 outl(0x002C, ioaddr + RxCtrl);
1343 /* Unconditionally log net taps. */
1344 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1345 memset(mc_filter, 0xff, sizeof(mc_filter));
1346 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1347 /* There is apparently a chip bug, so the multicast filter
1348 is never enabled. */
1349 /* Too many to filter perfectly -- accept all multicasts. */
1350 memset(mc_filter, 0xff, sizeof(mc_filter));
1351 outl(0x000C, ioaddr + RxCtrl);
1352 } else if (dev->mc_count == 0) {
1353 outl(0x0004, ioaddr + RxCtrl);
1355 } else { /* Never executed, for now. */
1356 struct dev_mc_list *mclist;
1358 memset(mc_filter, 0, sizeof(mc_filter));
1359 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1360 i++, mclist = mclist->next) {
1361 unsigned int bit_nr =
1362 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1363 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1366 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1367 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1368 for (i = 0; i < 4; i++)
1369 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1370 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1375 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1377 struct epic_private *np = dev->priv;
1379 strcpy (info->driver, DRV_NAME);
1380 strcpy (info->version, DRV_VERSION);
1381 strcpy (info->bus_info, pci_name(np->pci_dev));
1384 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1386 struct epic_private *np = dev->priv;
1389 spin_lock_irq(&np->lock);
1390 rc = mii_ethtool_gset(&np->mii, cmd);
1391 spin_unlock_irq(&np->lock);
1396 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1398 struct epic_private *np = dev->priv;
1401 spin_lock_irq(&np->lock);
1402 rc = mii_ethtool_sset(&np->mii, cmd);
1403 spin_unlock_irq(&np->lock);
1408 static int netdev_nway_reset(struct net_device *dev)
1410 struct epic_private *np = dev->priv;
1411 return mii_nway_restart(&np->mii);
1414 static u32 netdev_get_link(struct net_device *dev)
1416 struct epic_private *np = dev->priv;
1417 return mii_link_ok(&np->mii);
1420 static u32 netdev_get_msglevel(struct net_device *dev)
1425 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1430 static int ethtool_begin(struct net_device *dev)
1432 unsigned long ioaddr = dev->base_addr;
1433 /* power-up, if interface is down */
1434 if (! netif_running(dev)) {
1435 outl(0x0200, ioaddr + GENCTL);
1436 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1441 static void ethtool_complete(struct net_device *dev)
1443 unsigned long ioaddr = dev->base_addr;
1444 /* power-down, if interface is down */
1445 if (! netif_running(dev)) {
1446 outl(0x0008, ioaddr + GENCTL);
1447 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1451 static struct ethtool_ops netdev_ethtool_ops = {
1452 .get_drvinfo = netdev_get_drvinfo,
1453 .get_settings = netdev_get_settings,
1454 .set_settings = netdev_set_settings,
1455 .nway_reset = netdev_nway_reset,
1456 .get_link = netdev_get_link,
1457 .get_msglevel = netdev_get_msglevel,
1458 .set_msglevel = netdev_set_msglevel,
1459 .get_sg = ethtool_op_get_sg,
1460 .get_tx_csum = ethtool_op_get_tx_csum,
1461 .begin = ethtool_begin,
1462 .complete = ethtool_complete
1465 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1467 struct epic_private *np = dev->priv;
1468 long ioaddr = dev->base_addr;
1469 struct mii_ioctl_data *data = if_mii(rq);
1472 /* power-up, if interface is down */
1473 if (! netif_running(dev)) {
1474 outl(0x0200, ioaddr + GENCTL);
1475 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1478 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1479 spin_lock_irq(&np->lock);
1480 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1481 spin_unlock_irq(&np->lock);
1483 /* power-down, if interface is down */
1484 if (! netif_running(dev)) {
1485 outl(0x0008, ioaddr + GENCTL);
1486 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1492 static void __devexit epic_remove_one (struct pci_dev *pdev)
1494 struct net_device *dev = pci_get_drvdata(pdev);
1495 struct epic_private *ep = dev->priv;
1497 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1498 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1499 unregister_netdev(dev);
1501 iounmap((void*) dev->base_addr);
1503 pci_release_regions(pdev);
1505 pci_set_drvdata(pdev, NULL);
1506 /* pci_power_off(pdev, -1); */
1512 static int epic_suspend (struct pci_dev *pdev, u32 state)
1514 struct net_device *dev = pci_get_drvdata(pdev);
1515 long ioaddr = dev->base_addr;
1517 if (!netif_running(dev))
1520 /* Put the chip into low-power mode. */
1521 outl(0x0008, ioaddr + GENCTL);
1522 /* pci_power_off(pdev, -1); */
1527 static int epic_resume (struct pci_dev *pdev)
1529 struct net_device *dev = pci_get_drvdata(pdev);
1531 if (!netif_running(dev))
1534 /* pci_power_on(pdev); */
1538 #endif /* CONFIG_PM */
1541 static struct pci_driver epic_driver = {
1543 .id_table = epic_pci_tbl,
1544 .probe = epic_init_one,
1545 .remove = __devexit_p(epic_remove_one),
1547 .suspend = epic_suspend,
1548 .resume = epic_resume,
1549 #endif /* CONFIG_PM */
1553 static int __init epic_init (void)
1555 /* when a module, this is printed whether or not devices are found in probe */
1557 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1558 version, version2, version3);
1561 return pci_module_init (&epic_driver);
1565 static void __exit epic_cleanup (void)
1567 pci_unregister_driver (&epic_driver);
1571 module_init(epic_init);
1572 module_exit(epic_cleanup);