1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
3 Written/copyright 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
23 ---------------------------------------------------------------------
25 Linux kernel-specific changes:
28 * Merge becker version 1.09 (4/08/2000)
31 * Major bugfix to 1.09 driver (Francis Romieu)
34 * Merge becker test version 1.09 (5/29/2000)
37 * Fix locking (jgarzik)
38 * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
41 * Merge becker version 1.11
42 * Move pci_enable_device before any PCI BAR len checks
48 * ethtool driver info support (jgarzik)
51 * ethtool media get/set support (jgarzik)
54 * revert MII transceiver init change (jgarzik)
57 * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
58 * replace some MII-related magic numbers with constants
61 * fix power-up sequence
64 * revert version 1.1.12, power-up sequence "fix"
66 LK1.1.14 (Kryzsztof Halasa):
67 * fix spurious bad initializations
68 * pound phy a la SMSC's app note on the subject
72 #define DRV_NAME "epic100"
73 #define DRV_VERSION "1.11+LK1.1.14"
74 #define DRV_RELDATE "Aug 4, 2002"
76 /* The user-configurable values.
77 These may be modified when a driver module is loaded.*/
79 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
80 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
81 static int max_interrupt_work = 32;
83 /* Used to pass the full-duplex flag, etc. */
84 #define MAX_UNITS 8 /* More are supported, limit only on options */
85 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
88 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
89 Setting to > 1518 effectively disables this feature. */
90 static int rx_copybreak;
92 /* Operational parameters that are set at compile time. */
94 /* Keep the ring sizes a power of two for operational efficiency.
95 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
96 Making the Tx ring too large decreases the effectiveness of channel
97 bonding and packet priority.
98 There are no ill effects from too-large receive rings. */
99 #define TX_RING_SIZE 16
100 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
101 #define RX_RING_SIZE 32
102 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
103 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
105 /* Operational parameters that usually are not changed. */
106 /* Time in jiffies before concluding the transmitter is hung. */
107 #define TX_TIMEOUT (2*HZ)
109 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
111 /* Bytes transferred to chip before transmission starts. */
112 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
113 #define TX_FIFO_THRESH 256
114 #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
116 #if !defined(__OPTIMIZE__)
117 #warning You must compile this file with the correct options!
118 #warning See the last lines of the source file.
119 #error You must compile this driver with "-O".
122 #include <linux/config.h>
123 #include <linux/module.h>
124 #include <linux/kernel.h>
125 #include <linux/string.h>
126 #include <linux/timer.h>
127 #include <linux/errno.h>
128 #include <linux/ioport.h>
129 #include <linux/slab.h>
130 #include <linux/interrupt.h>
131 #include <linux/pci.h>
132 #include <linux/delay.h>
133 #include <linux/netdevice.h>
134 #include <linux/etherdevice.h>
135 #include <linux/skbuff.h>
136 #include <linux/init.h>
137 #include <linux/spinlock.h>
138 #include <linux/ethtool.h>
139 #include <linux/mii.h>
140 #include <linux/crc32.h>
141 #include <asm/bitops.h>
143 #include <asm/uaccess.h>
145 /* These identify the driver base version and may not be removed. */
146 static char version[] __devinitdata =
147 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
148 static char version2[] __devinitdata =
149 " http://www.scyld.com/network/epic100.html\n";
150 static char version3[] __devinitdata =
151 " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
153 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
154 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
155 MODULE_LICENSE("GPL");
157 MODULE_PARM(debug, "i");
158 MODULE_PARM(max_interrupt_work, "i");
159 MODULE_PARM(rx_copybreak, "i");
160 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
161 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
162 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
163 MODULE_PARM_DESC(max_interrupt_work, "EPIC/100 maximum events handled per interrupt");
164 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
165 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
166 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
171 I. Board Compatibility
173 This device driver is designed for the SMC "EPIC/100", the SMC
174 single-chip Ethernet controllers for PCI. This chip is used on
175 the SMC EtherPower II boards.
177 II. Board-specific settings
179 PCI bus devices are configured by the system at boot time, so no jumpers
180 need to be set on the board. The system BIOS will assign the
181 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
182 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
185 III. Driver operation
191 http://www.smsc.com/main/datasheets/83c171.pdf
192 http://www.smsc.com/main/datasheets/83c175.pdf
193 http://scyld.com/expert/NWay.html
194 http://www.national.com/pf/DP/DP83840A.html
201 enum pci_id_flags_bits {
202 /* Set PCI command register bits before calling probe1(). */
203 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
204 /* Read and map the single following PCI BAR. */
205 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
206 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
209 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
211 #define EPIC_TOTAL_SIZE 0x100
214 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
216 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
226 struct epic_chip_info {
228 enum pci_id_flags_bits pci_flags;
229 int io_size; /* Needed for I/O region check or ioremap(). */
230 int drv_flags; /* Driver use, intended as capability flags. */
234 /* indexed by chip_t */
235 static struct epic_chip_info pci_id_tbl[] = {
236 { "SMSC EPIC/100 83c170",
237 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
238 { "SMSC EPIC/100 83c170",
239 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR },
240 { "SMSC EPIC/C 83c175",
241 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
245 static struct pci_device_id epic_pci_tbl[] = {
246 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
247 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
248 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
249 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
252 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
270 /* Offsets to registers, using the (ugh) SMC names. */
271 enum epic_registers {
272 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
274 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
275 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
276 LAN0=64, /* MAC address. */
277 MC0=80, /* Multicast filter table. */
278 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
279 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
282 /* Interrupt register bits, using my own meaningful names. */
284 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
285 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
286 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
287 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
288 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
291 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
292 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
295 static u16 media2miictl[16] = {
296 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
297 0, 0, 0, 0, 0, 0, 0, 0 };
299 /* The EPIC100 Rx and Tx buffer descriptors. */
301 struct epic_tx_desc {
308 struct epic_rx_desc {
315 enum desc_status_bits {
319 #define PRIV_ALIGN 15 /* Required alignment mask */
320 struct epic_private {
321 struct epic_rx_desc *rx_ring;
322 struct epic_tx_desc *tx_ring;
323 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
324 struct sk_buff* tx_skbuff[TX_RING_SIZE];
325 /* The addresses of receive-in-place skbuffs. */
326 struct sk_buff* rx_skbuff[RX_RING_SIZE];
328 dma_addr_t tx_ring_dma;
329 dma_addr_t rx_ring_dma;
332 spinlock_t lock; /* Group with Tx control cache line. */
333 unsigned int cur_tx, dirty_tx;
335 unsigned int cur_rx, dirty_rx;
336 unsigned int rx_buf_sz; /* Based on MTU+slack. */
338 struct pci_dev *pci_dev; /* PCI bus location. */
339 int chip_id, chip_flags;
341 struct net_device_stats stats;
342 struct timer_list timer; /* Media selection timer. */
344 unsigned char mc_filter[8];
345 signed char phys[4]; /* MII device addresses. */
346 u16 advertising; /* NWay media advertisement */
348 struct mii_if_info mii;
349 unsigned int tx_full:1; /* The Tx queue is full. */
350 unsigned int default_port:4; /* Last dev->if_port value. */
353 static int epic_open(struct net_device *dev);
354 static int read_eeprom(long ioaddr, int location);
355 static int mdio_read(struct net_device *dev, int phy_id, int location);
356 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
357 static void epic_restart(struct net_device *dev);
358 static void epic_timer(unsigned long data);
359 static void epic_tx_timeout(struct net_device *dev);
360 static void epic_init_ring(struct net_device *dev);
361 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
362 static int epic_rx(struct net_device *dev);
363 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
364 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
365 static struct ethtool_ops netdev_ethtool_ops;
366 static int epic_close(struct net_device *dev);
367 static struct net_device_stats *epic_get_stats(struct net_device *dev);
368 static void set_rx_mode(struct net_device *dev);
372 static int __devinit epic_init_one (struct pci_dev *pdev,
373 const struct pci_device_id *ent)
375 static int card_idx = -1;
377 int chip_idx = (int) ent->driver_data;
379 struct net_device *dev;
380 struct epic_private *ep;
381 int i, option = 0, duplex = 0;
385 /* when built into the kernel, we only print version if device is found */
387 static int printed_version;
388 if (!printed_version++)
389 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
390 version, version2, version3);
395 i = pci_enable_device(pdev);
400 if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
401 printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
405 pci_set_master(pdev);
407 dev = alloc_etherdev(sizeof (*ep));
409 printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
412 SET_MODULE_OWNER(dev);
413 SET_NETDEV_DEV(dev, &pdev->dev);
415 if (pci_request_regions(pdev, DRV_NAME))
416 goto err_out_free_netdev;
419 ioaddr = pci_resource_start (pdev, 0);
421 ioaddr = pci_resource_start (pdev, 1);
422 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
424 printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
425 goto err_out_free_res;
429 pci_set_drvdata(pdev, dev);
432 ep->mii.mdio_read = mdio_read;
433 ep->mii.mdio_write = mdio_write;
434 ep->mii.phy_id_mask = 0x1f;
435 ep->mii.reg_num_mask = 0x1f;
437 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
439 goto err_out_iounmap;
440 ep->tx_ring = (struct epic_tx_desc *)ring_space;
441 ep->tx_ring_dma = ring_dma;
443 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
445 goto err_out_unmap_tx;
446 ep->rx_ring = (struct epic_rx_desc *)ring_space;
447 ep->rx_ring_dma = ring_dma;
449 if (dev->mem_start) {
450 option = dev->mem_start;
451 duplex = (dev->mem_start & 16) ? 1 : 0;
452 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
453 if (options[card_idx] >= 0)
454 option = options[card_idx];
455 if (full_duplex[card_idx] >= 0)
456 duplex = full_duplex[card_idx];
459 dev->base_addr = ioaddr;
462 spin_lock_init (&ep->lock);
464 /* Bring the chip out of low-power mode. */
465 outl(0x4200, ioaddr + GENCTL);
466 /* Magic?! If we don't set this bit the MII interface won't work. */
467 /* This magic is documented in SMSC app note 7.15 */
468 for (i = 16; i > 0; i--)
469 outl(0x0008, ioaddr + TEST1);
471 /* Turn on the MII transceiver. */
472 outl(0x12, ioaddr + MIICfg);
474 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
475 outl(0x0200, ioaddr + GENCTL);
477 /* Note: the '175 does not have a serial EEPROM. */
478 for (i = 0; i < 3; i++)
479 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
482 printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n",
484 for (i = 0; i < 64; i++)
485 printk(" %4.4x%s", read_eeprom(ioaddr, i),
486 i % 16 == 15 ? "\n" : "");
490 ep->chip_id = chip_idx;
491 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
493 /* Find the connected MII xcvrs.
494 Doing this in open() would allow detecting external xcvrs later, but
495 takes much time and no cards have external MII. */
497 int phy, phy_idx = 0;
498 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
499 int mii_status = mdio_read(dev, phy, MII_BMSR);
500 if (mii_status != 0xffff && mii_status != 0x0000) {
501 ep->phys[phy_idx++] = phy;
502 printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control "
503 "%4.4x status %4.4x.\n",
504 pci_name(pdev), phy, mdio_read(dev, phy, 0), mii_status);
507 ep->mii_phy_cnt = phy_idx;
510 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
511 printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link "
513 pci_name(pdev), ep->mii.advertising, mdio_read(dev, phy, 5));
514 } else if ( ! (ep->chip_flags & NO_MII)) {
515 printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n",
517 /* Use the known PHY address of the EPII. */
520 ep->mii.phy_id = ep->phys[0];
523 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
524 if (ep->chip_flags & MII_PWRDWN)
525 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
526 outl(0x0008, ioaddr + GENCTL);
528 /* The lower four bits are the media type. */
530 ep->mii.force_media = ep->mii.full_duplex = 1;
531 printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n",
534 dev->if_port = ep->default_port = option;
536 /* The Epic-specific entries in the device structure. */
537 dev->open = &epic_open;
538 dev->hard_start_xmit = &epic_start_xmit;
539 dev->stop = &epic_close;
540 dev->get_stats = &epic_get_stats;
541 dev->set_multicast_list = &set_rx_mode;
542 dev->do_ioctl = &netdev_ioctl;
543 dev->ethtool_ops = &netdev_ethtool_ops;
544 dev->watchdog_timeo = TX_TIMEOUT;
545 dev->tx_timeout = &epic_tx_timeout;
547 i = register_netdev(dev);
549 goto err_out_unmap_tx;
551 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
552 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
553 for (i = 0; i < 5; i++)
554 printk("%2.2x:", dev->dev_addr[i]);
555 printk("%2.2x.\n", dev->dev_addr[i]);
560 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
566 pci_release_regions(pdev);
572 /* Serial EEPROM section. */
574 /* EEPROM_Ctrl bits. */
575 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
576 #define EE_CS 0x02 /* EEPROM chip select. */
577 #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
578 #define EE_WRITE_0 0x01
579 #define EE_WRITE_1 0x09
580 #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
581 #define EE_ENB (0x0001 | EE_CS)
583 /* Delay between EEPROM clock transitions.
584 This serves to flush the operation to the PCI bus.
587 #define eeprom_delay() inl(ee_addr)
589 /* The EEPROM commands include the alway-set leading bit. */
590 #define EE_WRITE_CMD (5 << 6)
591 #define EE_READ64_CMD (6 << 6)
592 #define EE_READ256_CMD (6 << 8)
593 #define EE_ERASE_CMD (7 << 6)
595 static int __devinit read_eeprom(long ioaddr, int location)
599 long ee_addr = ioaddr + EECTL;
600 int read_cmd = location |
601 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
603 outl(EE_ENB & ~EE_CS, ee_addr);
604 outl(EE_ENB, ee_addr);
606 /* Shift the read command bits out. */
607 for (i = 12; i >= 0; i--) {
608 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
609 outl(EE_ENB | dataval, ee_addr);
611 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
614 outl(EE_ENB, ee_addr);
616 for (i = 16; i > 0; i--) {
617 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
619 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
620 outl(EE_ENB, ee_addr);
624 /* Terminate the EEPROM access. */
625 outl(EE_ENB & ~EE_CS, ee_addr);
630 #define MII_WRITEOP 2
631 static int mdio_read(struct net_device *dev, int phy_id, int location)
633 long ioaddr = dev->base_addr;
634 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
637 outl(read_cmd, ioaddr + MIICtrl);
638 /* Typical operation takes 25 loops. */
639 for (i = 400; i > 0; i--) {
641 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
642 /* Work around read failure bug. */
643 if (phy_id == 1 && location < 6
644 && inw(ioaddr + MIIData) == 0xffff) {
645 outl(read_cmd, ioaddr + MIICtrl);
648 return inw(ioaddr + MIIData);
654 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
656 long ioaddr = dev->base_addr;
659 outw(value, ioaddr + MIIData);
660 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
661 for (i = 10000; i > 0; i--) {
663 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
670 static int epic_open(struct net_device *dev)
672 struct epic_private *ep = dev->priv;
673 long ioaddr = dev->base_addr;
677 /* Soft reset the chip. */
678 outl(0x4001, ioaddr + GENCTL);
680 if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)))
685 outl(0x4000, ioaddr + GENCTL);
686 /* This magic is documented in SMSC app note 7.15 */
687 for (i = 16; i > 0; i--)
688 outl(0x0008, ioaddr + TEST1);
690 /* Pull the chip out of low-power mode, enable interrupts, and set for
691 PCI read multiple. The MIIcfg setting and strange write order are
692 required by the details of which bits are reset and the transceiver
693 wiring on the Ositech CardBus card.
696 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
698 if (ep->chip_flags & MII_PWRDWN)
699 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
701 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
702 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
703 inl(ioaddr + GENCTL);
704 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
706 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
707 inl(ioaddr + GENCTL);
708 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
711 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
713 for (i = 0; i < 3; i++)
714 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
716 ep->tx_threshold = TX_FIFO_THRESH;
717 outl(ep->tx_threshold, ioaddr + TxThresh);
719 if (media2miictl[dev->if_port & 15]) {
721 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
722 if (dev->if_port == 1) {
724 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
726 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
729 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
730 if (mii_lpa != 0xffff) {
731 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
732 ep->mii.full_duplex = 1;
733 else if (! (mii_lpa & LPA_LPACK))
734 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
736 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
737 " register read of %4.4x.\n", dev->name,
738 ep->mii.full_duplex ? "full" : "half",
739 ep->phys[0], mii_lpa);
743 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
744 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
745 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
747 /* Start the chip's Rx process. */
749 outl(StartRx | RxQueued, ioaddr + COMMAND);
751 netif_start_queue(dev);
753 /* Enable interrupts by setting the interrupt mask. */
754 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
755 | CntFull | TxUnderrun | TxDone | TxEmpty
756 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
760 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
762 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
763 ep->mii.full_duplex ? "full" : "half");
765 /* Set the timer to switch to check for link beat and perhaps switch
766 to an alternate media type. */
767 init_timer(&ep->timer);
768 ep->timer.expires = jiffies + 3*HZ;
769 ep->timer.data = (unsigned long)dev;
770 ep->timer.function = &epic_timer; /* timer handler */
771 add_timer(&ep->timer);
776 /* Reset the chip to recover from a PCI transaction error.
777 This may occur at interrupt time. */
778 static void epic_pause(struct net_device *dev)
780 long ioaddr = dev->base_addr;
781 struct epic_private *ep = dev->priv;
783 netif_stop_queue (dev);
785 /* Disable interrupts by clearing the interrupt mask. */
786 outl(0x00000000, ioaddr + INTMASK);
787 /* Stop the chip's Tx and Rx DMA processes. */
788 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
790 /* Update the error counts. */
791 if (inw(ioaddr + COMMAND) != 0xffff) {
792 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
793 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
794 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
797 /* Remove the packets on the Rx queue. */
801 static void epic_restart(struct net_device *dev)
803 long ioaddr = dev->base_addr;
804 struct epic_private *ep = dev->priv;
807 /* Soft reset the chip. */
808 outl(0x4001, ioaddr + GENCTL);
810 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
811 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
814 /* This magic is documented in SMSC app note 7.15 */
815 for (i = 16; i > 0; i--)
816 outl(0x0008, ioaddr + TEST1);
818 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
819 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
821 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
823 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
824 if (ep->chip_flags & MII_PWRDWN)
825 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
827 for (i = 0; i < 3; i++)
828 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
830 ep->tx_threshold = TX_FIFO_THRESH;
831 outl(ep->tx_threshold, ioaddr + TxThresh);
832 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
833 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
834 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
835 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
836 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
838 /* Start the chip's Rx process. */
840 outl(StartRx | RxQueued, ioaddr + COMMAND);
842 /* Enable interrupts by setting the interrupt mask. */
843 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
844 | CntFull | TxUnderrun | TxDone | TxEmpty
845 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
847 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
848 " interrupt %4.4x.\n",
849 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
850 (int)inl(ioaddr + INTSTAT));
854 static void check_media(struct net_device *dev)
856 struct epic_private *ep = dev->priv;
857 long ioaddr = dev->base_addr;
858 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
859 int negotiated = mii_lpa & ep->mii.advertising;
860 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
862 if (ep->mii.force_media)
864 if (mii_lpa == 0xffff) /* Bogus read */
866 if (ep->mii.full_duplex != duplex) {
867 ep->mii.full_duplex = duplex;
868 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
869 " partner capability of %4.4x.\n", dev->name,
870 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
871 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
875 static void epic_timer(unsigned long data)
877 struct net_device *dev = (struct net_device *)data;
878 struct epic_private *ep = dev->priv;
879 long ioaddr = dev->base_addr;
880 int next_tick = 5*HZ;
883 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
884 dev->name, (int)inl(ioaddr + TxSTAT));
885 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
886 "IntStatus %4.4x RxStatus %4.4x.\n",
887 dev->name, (int)inl(ioaddr + INTMASK),
888 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
893 ep->timer.expires = jiffies + next_tick;
894 add_timer(&ep->timer);
897 static void epic_tx_timeout(struct net_device *dev)
899 struct epic_private *ep = dev->priv;
900 long ioaddr = dev->base_addr;
903 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
904 "Tx status %4.4x.\n",
905 dev->name, (int)inw(ioaddr + TxSTAT));
907 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
908 dev->name, ep->dirty_tx, ep->cur_tx);
911 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
912 ep->stats.tx_fifo_errors++;
913 outl(RestartTx, ioaddr + COMMAND);
916 outl(TxQueued, dev->base_addr + COMMAND);
919 dev->trans_start = jiffies;
920 ep->stats.tx_errors++;
922 netif_wake_queue(dev);
925 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
926 static void epic_init_ring(struct net_device *dev)
928 struct epic_private *ep = dev->priv;
932 ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
933 ep->dirty_tx = ep->cur_tx = 0;
934 ep->cur_rx = ep->dirty_rx = 0;
935 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
937 /* Initialize all Rx descriptors. */
938 for (i = 0; i < RX_RING_SIZE; i++) {
939 ep->rx_ring[i].rxstatus = 0;
940 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
941 ep->rx_ring[i].next = ep->rx_ring_dma +
942 (i+1)*sizeof(struct epic_rx_desc);
943 ep->rx_skbuff[i] = 0;
945 /* Mark the last entry as wrapping the ring. */
946 ep->rx_ring[i-1].next = ep->rx_ring_dma;
948 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
949 for (i = 0; i < RX_RING_SIZE; i++) {
950 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
951 ep->rx_skbuff[i] = skb;
954 skb->dev = dev; /* Mark as being used by this device. */
955 skb_reserve(skb, 2); /* 16 byte align the IP header. */
956 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
957 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
958 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
960 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
962 /* The Tx buffer descriptor is filled in as needed, but we
963 do need to clear the ownership bit. */
964 for (i = 0; i < TX_RING_SIZE; i++) {
965 ep->tx_skbuff[i] = 0;
966 ep->tx_ring[i].txstatus = 0x0000;
967 ep->tx_ring[i].next = ep->tx_ring_dma +
968 (i+1)*sizeof(struct epic_tx_desc);
970 ep->tx_ring[i-1].next = ep->tx_ring_dma;
974 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
976 struct epic_private *ep = dev->priv;
977 int entry, free_count;
981 if (skb->len < ETH_ZLEN) {
982 skb = skb_padto(skb, ETH_ZLEN);
987 /* Caution: the write order is important here, set the field with the
988 "ownership" bit last. */
990 /* Calculate the next Tx descriptor entry. */
991 spin_lock_irqsave(&ep->lock, flags);
992 free_count = ep->cur_tx - ep->dirty_tx;
993 entry = ep->cur_tx % TX_RING_SIZE;
995 ep->tx_skbuff[entry] = skb;
996 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
997 skb->len, PCI_DMA_TODEVICE);
998 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
999 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
1000 } else if (free_count == TX_QUEUE_LEN/2) {
1001 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1002 } else if (free_count < TX_QUEUE_LEN - 1) {
1003 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
1005 /* Leave room for an additional entry. */
1006 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1009 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
1010 ep->tx_ring[entry].txstatus =
1011 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
1012 | cpu_to_le32(DescOwn);
1016 netif_stop_queue(dev);
1018 spin_unlock_irqrestore(&ep->lock, flags);
1019 /* Trigger an immediate transmit demand. */
1020 outl(TxQueued, dev->base_addr + COMMAND);
1022 dev->trans_start = jiffies;
1024 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1025 "flag %2.2x Tx status %8.8x.\n",
1026 dev->name, (int)skb->len, entry, ctrl_word,
1027 (int)inl(dev->base_addr + TxSTAT));
1032 /* The interrupt handler does all of the Rx thread work and cleans up
1033 after the Tx thread. */
1034 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1036 struct net_device *dev = dev_instance;
1037 struct epic_private *ep = dev->priv;
1038 long ioaddr = dev->base_addr;
1039 int status, boguscnt = max_interrupt_work;
1040 unsigned int handled = 0;
1043 status = inl(ioaddr + INTSTAT);
1044 /* Acknowledge all of the current interrupt sources ASAP. */
1045 outl(status & 0x00007fff, ioaddr + INTSTAT);
1048 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1049 "intstat=%#8.8x.\n",
1050 dev->name, status, (int)inl(ioaddr + INTSTAT));
1052 if ((status & IntrSummary) == 0)
1056 if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
1059 if (status & (TxEmpty | TxDone)) {
1060 unsigned int dirty_tx, cur_tx;
1062 /* Note: if this lock becomes a problem we can narrow the locked
1063 region at the cost of occasionally grabbing the lock more
1065 spin_lock(&ep->lock);
1066 cur_tx = ep->cur_tx;
1067 dirty_tx = ep->dirty_tx;
1068 for (; cur_tx - dirty_tx > 0; dirty_tx++) {
1069 struct sk_buff *skb;
1070 int entry = dirty_tx % TX_RING_SIZE;
1071 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1073 if (txstatus & DescOwn)
1074 break; /* It still hasn't been Txed */
1076 if ( ! (txstatus & 0x0001)) {
1077 /* There was an major error, log it. */
1078 #ifndef final_version
1080 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1081 dev->name, txstatus);
1083 ep->stats.tx_errors++;
1084 if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
1085 if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
1086 if (txstatus & 0x0040) ep->stats.tx_window_errors++;
1087 if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
1089 ep->stats.collisions += (txstatus >> 8) & 15;
1090 ep->stats.tx_packets++;
1091 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1094 /* Free the original skb. */
1095 skb = ep->tx_skbuff[entry];
1096 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1097 skb->len, PCI_DMA_TODEVICE);
1098 dev_kfree_skb_irq(skb);
1099 ep->tx_skbuff[entry] = 0;
1102 #ifndef final_version
1103 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1104 printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1105 dev->name, dirty_tx, cur_tx, ep->tx_full);
1106 dirty_tx += TX_RING_SIZE;
1109 ep->dirty_tx = dirty_tx;
1111 && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1112 /* The ring is no longer full, allow new TX entries. */
1114 spin_unlock(&ep->lock);
1115 netif_wake_queue(dev);
1117 spin_unlock(&ep->lock);
1120 /* Check uncommon events all at once. */
1121 if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
1122 PCIBusErr170 | PCIBusErr175)) {
1123 if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
1125 /* Always update the error counts to avoid overhead later. */
1126 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1127 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1128 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1130 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1131 ep->stats.tx_fifo_errors++;
1132 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1133 /* Restart the transmit process. */
1134 outl(RestartTx, ioaddr + COMMAND);
1136 if (status & RxOverflow) { /* Missed a Rx frame. */
1137 ep->stats.rx_errors++;
1139 if (status & (RxOverflow | RxFull))
1140 outw(RxQueued, ioaddr + COMMAND);
1141 if (status & PCIBusErr170) {
1142 printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n",
1147 /* Clear all error sources. */
1148 outl(status & 0x7f18, ioaddr + INTSTAT);
1150 if (--boguscnt < 0) {
1151 printk(KERN_ERR "%s: Too much work at interrupt, "
1152 "IntrStatus=0x%8.8x.\n",
1154 /* Clear all interrupt sources. */
1155 outl(0x0001ffff, ioaddr + INTSTAT);
1161 printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n",
1164 return IRQ_RETVAL(handled);
1167 static int epic_rx(struct net_device *dev)
1169 struct epic_private *ep = dev->priv;
1170 int entry = ep->cur_rx % RX_RING_SIZE;
1171 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1175 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1176 ep->rx_ring[entry].rxstatus);
1177 /* If we own the next entry, it's a new packet. Send it up. */
1178 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1179 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1182 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1183 if (--rx_work_limit < 0)
1185 if (status & 0x2006) {
1187 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1189 if (status & 0x2000) {
1190 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1191 "multiple buffers, status %4.4x!\n", dev->name, status);
1192 ep->stats.rx_length_errors++;
1193 } else if (status & 0x0006)
1194 /* Rx Frame errors are counted in hardware. */
1195 ep->stats.rx_errors++;
1197 /* Malloc up new buffer, compatible with net-2e. */
1198 /* Omit the four octet CRC from the length. */
1199 short pkt_len = (status >> 16) - 4;
1200 struct sk_buff *skb;
1202 if (pkt_len > PKT_BUF_SZ - 4) {
1203 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1205 dev->name, status, pkt_len);
1208 /* Check if the packet is long enough to accept without copying
1209 to a minimally-sized skbuff. */
1210 if (pkt_len < rx_copybreak
1211 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1213 skb_reserve(skb, 2); /* 16 byte align the IP header */
1214 pci_dma_sync_single_for_cpu(ep->pci_dev,
1215 ep->rx_ring[entry].bufaddr,
1217 PCI_DMA_FROMDEVICE);
1218 #if 1 /* HAS_IP_COPYSUM */
1219 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
1220 skb_put(skb, pkt_len);
1222 memcpy(skb_put(skb, pkt_len), ep->rx_skbuff[entry]->tail,
1225 pci_dma_sync_single_for_device(ep->pci_dev,
1226 ep->rx_ring[entry].bufaddr,
1228 PCI_DMA_FROMDEVICE);
1230 pci_unmap_single(ep->pci_dev,
1231 ep->rx_ring[entry].bufaddr,
1232 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1233 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1234 ep->rx_skbuff[entry] = NULL;
1236 skb->protocol = eth_type_trans(skb, dev);
1238 dev->last_rx = jiffies;
1239 ep->stats.rx_packets++;
1240 ep->stats.rx_bytes += pkt_len;
1243 entry = (++ep->cur_rx) % RX_RING_SIZE;
1246 /* Refill the Rx ring buffers. */
1247 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1248 entry = ep->dirty_rx % RX_RING_SIZE;
1249 if (ep->rx_skbuff[entry] == NULL) {
1250 struct sk_buff *skb;
1251 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1254 skb->dev = dev; /* Mark as being used by this device. */
1255 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1256 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1257 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1260 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1265 static int epic_close(struct net_device *dev)
1267 long ioaddr = dev->base_addr;
1268 struct epic_private *ep = dev->priv;
1269 struct sk_buff *skb;
1272 netif_stop_queue(dev);
1275 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1276 dev->name, (int)inl(ioaddr + INTSTAT));
1278 del_timer_sync(&ep->timer);
1280 free_irq(dev->irq, dev);
1282 /* Free all the skbuffs in the Rx queue. */
1283 for (i = 0; i < RX_RING_SIZE; i++) {
1284 skb = ep->rx_skbuff[i];
1285 ep->rx_skbuff[i] = 0;
1286 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1287 ep->rx_ring[i].buflength = 0;
1289 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1290 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1293 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1295 for (i = 0; i < TX_RING_SIZE; i++) {
1296 skb = ep->tx_skbuff[i];
1297 ep->tx_skbuff[i] = 0;
1300 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1301 skb->len, PCI_DMA_TODEVICE);
1305 /* Green! Leave the chip in low-power mode. */
1306 outl(0x0008, ioaddr + GENCTL);
1311 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1313 struct epic_private *ep = dev->priv;
1314 long ioaddr = dev->base_addr;
1316 if (netif_running(dev)) {
1317 /* Update the error counts. */
1318 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1319 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1320 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1326 /* Set or clear the multicast filter for this adaptor.
1327 Note that we only use exclusion around actually queueing the
1328 new frame, not around filling ep->setup_frame. This is non-deterministic
1329 when re-entered but still correct. */
1331 static void set_rx_mode(struct net_device *dev)
1333 long ioaddr = dev->base_addr;
1334 struct epic_private *ep = dev->priv;
1335 unsigned char mc_filter[8]; /* Multicast hash filter */
1338 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1339 outl(0x002C, ioaddr + RxCtrl);
1340 /* Unconditionally log net taps. */
1341 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1342 memset(mc_filter, 0xff, sizeof(mc_filter));
1343 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1344 /* There is apparently a chip bug, so the multicast filter
1345 is never enabled. */
1346 /* Too many to filter perfectly -- accept all multicasts. */
1347 memset(mc_filter, 0xff, sizeof(mc_filter));
1348 outl(0x000C, ioaddr + RxCtrl);
1349 } else if (dev->mc_count == 0) {
1350 outl(0x0004, ioaddr + RxCtrl);
1352 } else { /* Never executed, for now. */
1353 struct dev_mc_list *mclist;
1355 memset(mc_filter, 0, sizeof(mc_filter));
1356 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1357 i++, mclist = mclist->next) {
1358 unsigned int bit_nr =
1359 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1360 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1363 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1364 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1365 for (i = 0; i < 4; i++)
1366 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1367 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1372 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1374 struct epic_private *np = dev->priv;
1376 strcpy (info->driver, DRV_NAME);
1377 strcpy (info->version, DRV_VERSION);
1378 strcpy (info->bus_info, pci_name(np->pci_dev));
1381 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1383 struct epic_private *np = dev->priv;
1386 spin_lock_irq(&np->lock);
1387 rc = mii_ethtool_gset(&np->mii, cmd);
1388 spin_unlock_irq(&np->lock);
1393 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1395 struct epic_private *np = dev->priv;
1398 spin_lock_irq(&np->lock);
1399 rc = mii_ethtool_sset(&np->mii, cmd);
1400 spin_unlock_irq(&np->lock);
1405 static int netdev_nway_reset(struct net_device *dev)
1407 struct epic_private *np = dev->priv;
1408 return mii_nway_restart(&np->mii);
1411 static u32 netdev_get_link(struct net_device *dev)
1413 struct epic_private *np = dev->priv;
1414 return mii_link_ok(&np->mii);
1417 static u32 netdev_get_msglevel(struct net_device *dev)
1422 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1427 static struct ethtool_ops netdev_ethtool_ops = {
1428 .get_drvinfo = netdev_get_drvinfo,
1429 .get_settings = netdev_get_settings,
1430 .set_settings = netdev_set_settings,
1431 .nway_reset = netdev_nway_reset,
1432 .get_link = netdev_get_link,
1433 .get_msglevel = netdev_get_msglevel,
1434 .set_msglevel = netdev_set_msglevel,
1435 .get_sg = ethtool_op_get_sg,
1436 .get_tx_csum = ethtool_op_get_tx_csum,
1439 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1441 struct epic_private *np = dev->priv;
1442 long ioaddr = dev->base_addr;
1443 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
1446 /* power-up, if interface is down */
1447 if (! netif_running(dev)) {
1448 outl(0x0200, ioaddr + GENCTL);
1449 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1452 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1453 spin_lock_irq(&np->lock);
1454 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1455 spin_unlock_irq(&np->lock);
1457 /* power-down, if interface is down */
1458 if (! netif_running(dev)) {
1459 outl(0x0008, ioaddr + GENCTL);
1460 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1466 static void __devexit epic_remove_one (struct pci_dev *pdev)
1468 struct net_device *dev = pci_get_drvdata(pdev);
1469 struct epic_private *ep = dev->priv;
1471 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1472 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1473 unregister_netdev(dev);
1475 iounmap((void*) dev->base_addr);
1477 pci_release_regions(pdev);
1479 pci_set_drvdata(pdev, NULL);
1480 /* pci_power_off(pdev, -1); */
1486 static int epic_suspend (struct pci_dev *pdev, u32 state)
1488 struct net_device *dev = pci_get_drvdata(pdev);
1489 long ioaddr = dev->base_addr;
1491 if (!netif_running(dev))
1494 /* Put the chip into low-power mode. */
1495 outl(0x0008, ioaddr + GENCTL);
1496 /* pci_power_off(pdev, -1); */
1501 static int epic_resume (struct pci_dev *pdev)
1503 struct net_device *dev = pci_get_drvdata(pdev);
1505 if (!netif_running(dev))
1508 /* pci_power_on(pdev); */
1512 #endif /* CONFIG_PM */
1515 static struct pci_driver epic_driver = {
1517 .id_table = epic_pci_tbl,
1518 .probe = epic_init_one,
1519 .remove = __devexit_p(epic_remove_one),
1521 .suspend = epic_suspend,
1522 .resume = epic_resume,
1523 #endif /* CONFIG_PM */
1527 static int __init epic_init (void)
1529 /* when a module, this is printed whether or not devices are found in probe */
1531 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1532 version, version2, version3);
1535 return pci_module_init (&epic_driver);
1539 static void __exit epic_cleanup (void)
1541 pci_unregister_driver (&epic_driver);
1545 module_init(epic_init);
1546 module_exit(epic_cleanup);