1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
3 Written/copyright 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
23 ---------------------------------------------------------------------
25 Linux kernel-specific changes:
28 * Merge becker version 1.09 (4/08/2000)
31 * Major bugfix to 1.09 driver (Francis Romieu)
34 * Merge becker test version 1.09 (5/29/2000)
37 * Fix locking (jgarzik)
38 * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
41 * Merge becker version 1.11
42 * Move pci_enable_device before any PCI BAR len checks
48 * ethtool driver info support (jgarzik)
51 * ethtool media get/set support (jgarzik)
54 * revert MII transceiver init change (jgarzik)
57 * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
58 * replace some MII-related magic numbers with constants
61 * fix power-up sequence
64 * revert version 1.1.12, power-up sequence "fix"
66 LK1.1.14 (Kryzsztof Halasa):
67 * fix spurious bad initializations
68 * pound phy a la SMSC's app note on the subject
71 * fix power up/down for ethtool that broke in 1.11
75 #define DRV_NAME "epic100"
76 #define DRV_VERSION "1.11+LK1.1.14+AC1.1.14"
77 #define DRV_RELDATE "June 2, 2004"
79 /* The user-configurable values.
80 These may be modified when a driver module is loaded.*/
82 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
83 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
84 static int max_interrupt_work = 32;
86 /* Used to pass the full-duplex flag, etc. */
87 #define MAX_UNITS 8 /* More are supported, limit only on options */
88 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
89 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
91 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
92 Setting to > 1518 effectively disables this feature. */
93 static int rx_copybreak;
95 /* Operational parameters that are set at compile time. */
97 /* Keep the ring sizes a power of two for operational efficiency.
98 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
99 Making the Tx ring too large decreases the effectiveness of channel
100 bonding and packet priority.
101 There are no ill effects from too-large receive rings. */
102 #define TX_RING_SIZE 16
103 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
104 #define RX_RING_SIZE 32
105 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
106 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
112 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
114 /* Bytes transferred to chip before transmission starts. */
115 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
116 #define TX_FIFO_THRESH 256
117 #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
119 #include <linux/config.h>
120 #include <linux/module.h>
121 #include <linux/kernel.h>
122 #include <linux/string.h>
123 #include <linux/timer.h>
124 #include <linux/errno.h>
125 #include <linux/ioport.h>
126 #include <linux/slab.h>
127 #include <linux/interrupt.h>
128 #include <linux/pci.h>
129 #include <linux/delay.h>
130 #include <linux/netdevice.h>
131 #include <linux/etherdevice.h>
132 #include <linux/skbuff.h>
133 #include <linux/init.h>
134 #include <linux/spinlock.h>
135 #include <linux/ethtool.h>
136 #include <linux/mii.h>
137 #include <linux/crc32.h>
138 #include <asm/bitops.h>
140 #include <asm/uaccess.h>
142 /* These identify the driver base version and may not be removed. */
143 static char version[] __devinitdata =
144 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
145 static char version2[] __devinitdata =
146 " http://www.scyld.com/network/epic100.html\n";
147 static char version3[] __devinitdata =
148 " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
150 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
151 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
152 MODULE_LICENSE("GPL");
154 MODULE_PARM(debug, "i");
155 MODULE_PARM(max_interrupt_work, "i");
156 MODULE_PARM(rx_copybreak, "i");
157 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
158 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
159 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
160 MODULE_PARM_DESC(max_interrupt_work, "EPIC/100 maximum events handled per interrupt");
161 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
162 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
163 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
168 I. Board Compatibility
170 This device driver is designed for the SMC "EPIC/100", the SMC
171 single-chip Ethernet controllers for PCI. This chip is used on
172 the SMC EtherPower II boards.
174 II. Board-specific settings
176 PCI bus devices are configured by the system at boot time, so no jumpers
177 need to be set on the board. The system BIOS will assign the
178 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
179 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
182 III. Driver operation
188 http://www.smsc.com/main/datasheets/83c171.pdf
189 http://www.smsc.com/main/datasheets/83c175.pdf
190 http://scyld.com/expert/NWay.html
191 http://www.national.com/pf/DP/DP83840A.html
198 enum pci_id_flags_bits {
199 /* Set PCI command register bits before calling probe1(). */
200 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
201 /* Read and map the single following PCI BAR. */
202 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
203 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
206 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
208 #define EPIC_TOTAL_SIZE 0x100
211 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
213 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
223 struct epic_chip_info {
225 enum pci_id_flags_bits pci_flags;
226 int io_size; /* Needed for I/O region check or ioremap(). */
227 int drv_flags; /* Driver use, intended as capability flags. */
231 /* indexed by chip_t */
232 static struct epic_chip_info pci_id_tbl[] = {
233 { "SMSC EPIC/100 83c170",
234 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
235 { "SMSC EPIC/100 83c170",
236 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR },
237 { "SMSC EPIC/C 83c175",
238 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
242 static struct pci_device_id epic_pci_tbl[] = {
243 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
244 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
245 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
246 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
249 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
267 /* Offsets to registers, using the (ugh) SMC names. */
268 enum epic_registers {
269 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
271 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
272 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
273 LAN0=64, /* MAC address. */
274 MC0=80, /* Multicast filter table. */
275 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
276 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
279 /* Interrupt register bits, using my own meaningful names. */
281 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
282 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
283 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
284 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
285 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
288 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
289 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
292 static u16 media2miictl[16] = {
293 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
294 0, 0, 0, 0, 0, 0, 0, 0 };
296 /* The EPIC100 Rx and Tx buffer descriptors. */
298 struct epic_tx_desc {
305 struct epic_rx_desc {
312 enum desc_status_bits {
316 #define PRIV_ALIGN 15 /* Required alignment mask */
317 struct epic_private {
318 struct epic_rx_desc *rx_ring;
319 struct epic_tx_desc *tx_ring;
320 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
321 struct sk_buff* tx_skbuff[TX_RING_SIZE];
322 /* The addresses of receive-in-place skbuffs. */
323 struct sk_buff* rx_skbuff[RX_RING_SIZE];
325 dma_addr_t tx_ring_dma;
326 dma_addr_t rx_ring_dma;
329 spinlock_t lock; /* Group with Tx control cache line. */
330 unsigned int cur_tx, dirty_tx;
332 unsigned int cur_rx, dirty_rx;
333 unsigned int rx_buf_sz; /* Based on MTU+slack. */
335 struct pci_dev *pci_dev; /* PCI bus location. */
336 int chip_id, chip_flags;
338 struct net_device_stats stats;
339 struct timer_list timer; /* Media selection timer. */
341 unsigned char mc_filter[8];
342 signed char phys[4]; /* MII device addresses. */
343 u16 advertising; /* NWay media advertisement */
345 struct mii_if_info mii;
346 unsigned int tx_full:1; /* The Tx queue is full. */
347 unsigned int default_port:4; /* Last dev->if_port value. */
350 static int epic_open(struct net_device *dev);
351 static int read_eeprom(long ioaddr, int location);
352 static int mdio_read(struct net_device *dev, int phy_id, int location);
353 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
354 static void epic_restart(struct net_device *dev);
355 static void epic_timer(unsigned long data);
356 static void epic_tx_timeout(struct net_device *dev);
357 static void epic_init_ring(struct net_device *dev);
358 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
359 static int epic_rx(struct net_device *dev);
360 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
361 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
362 static struct ethtool_ops netdev_ethtool_ops;
363 static int epic_close(struct net_device *dev);
364 static struct net_device_stats *epic_get_stats(struct net_device *dev);
365 static void set_rx_mode(struct net_device *dev);
369 static int __devinit epic_init_one (struct pci_dev *pdev,
370 const struct pci_device_id *ent)
372 static int card_idx = -1;
374 int chip_idx = (int) ent->driver_data;
376 struct net_device *dev;
377 struct epic_private *ep;
378 int i, option = 0, duplex = 0;
382 /* when built into the kernel, we only print version if device is found */
384 static int printed_version;
385 if (!printed_version++)
386 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
387 version, version2, version3);
392 i = pci_enable_device(pdev);
397 if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
398 printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
402 pci_set_master(pdev);
404 dev = alloc_etherdev(sizeof (*ep));
406 printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
409 SET_MODULE_OWNER(dev);
410 SET_NETDEV_DEV(dev, &pdev->dev);
412 if (pci_request_regions(pdev, DRV_NAME))
413 goto err_out_free_netdev;
416 ioaddr = pci_resource_start (pdev, 0);
418 ioaddr = pci_resource_start (pdev, 1);
419 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
421 printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
422 goto err_out_free_res;
426 pci_set_drvdata(pdev, dev);
429 ep->mii.mdio_read = mdio_read;
430 ep->mii.mdio_write = mdio_write;
431 ep->mii.phy_id_mask = 0x1f;
432 ep->mii.reg_num_mask = 0x1f;
434 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
436 goto err_out_iounmap;
437 ep->tx_ring = (struct epic_tx_desc *)ring_space;
438 ep->tx_ring_dma = ring_dma;
440 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
442 goto err_out_unmap_tx;
443 ep->rx_ring = (struct epic_rx_desc *)ring_space;
444 ep->rx_ring_dma = ring_dma;
446 if (dev->mem_start) {
447 option = dev->mem_start;
448 duplex = (dev->mem_start & 16) ? 1 : 0;
449 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
450 if (options[card_idx] >= 0)
451 option = options[card_idx];
452 if (full_duplex[card_idx] >= 0)
453 duplex = full_duplex[card_idx];
456 dev->base_addr = ioaddr;
459 spin_lock_init (&ep->lock);
461 /* Bring the chip out of low-power mode. */
462 outl(0x4200, ioaddr + GENCTL);
463 /* Magic?! If we don't set this bit the MII interface won't work. */
464 /* This magic is documented in SMSC app note 7.15 */
465 for (i = 16; i > 0; i--)
466 outl(0x0008, ioaddr + TEST1);
468 /* Turn on the MII transceiver. */
469 outl(0x12, ioaddr + MIICfg);
471 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
472 outl(0x0200, ioaddr + GENCTL);
474 /* Note: the '175 does not have a serial EEPROM. */
475 for (i = 0; i < 3; i++)
476 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
479 printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n",
481 for (i = 0; i < 64; i++)
482 printk(" %4.4x%s", read_eeprom(ioaddr, i),
483 i % 16 == 15 ? "\n" : "");
487 ep->chip_id = chip_idx;
488 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
490 /* Find the connected MII xcvrs.
491 Doing this in open() would allow detecting external xcvrs later, but
492 takes much time and no cards have external MII. */
494 int phy, phy_idx = 0;
495 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
496 int mii_status = mdio_read(dev, phy, MII_BMSR);
497 if (mii_status != 0xffff && mii_status != 0x0000) {
498 ep->phys[phy_idx++] = phy;
499 printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control "
500 "%4.4x status %4.4x.\n",
501 pci_name(pdev), phy, mdio_read(dev, phy, 0), mii_status);
504 ep->mii_phy_cnt = phy_idx;
507 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
508 printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link "
510 pci_name(pdev), ep->mii.advertising, mdio_read(dev, phy, 5));
511 } else if ( ! (ep->chip_flags & NO_MII)) {
512 printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n",
514 /* Use the known PHY address of the EPII. */
517 ep->mii.phy_id = ep->phys[0];
520 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
521 if (ep->chip_flags & MII_PWRDWN)
522 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
523 outl(0x0008, ioaddr + GENCTL);
525 /* The lower four bits are the media type. */
527 ep->mii.force_media = ep->mii.full_duplex = 1;
528 printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n",
531 dev->if_port = ep->default_port = option;
533 /* The Epic-specific entries in the device structure. */
534 dev->open = &epic_open;
535 dev->hard_start_xmit = &epic_start_xmit;
536 dev->stop = &epic_close;
537 dev->get_stats = &epic_get_stats;
538 dev->set_multicast_list = &set_rx_mode;
539 dev->do_ioctl = &netdev_ioctl;
540 dev->ethtool_ops = &netdev_ethtool_ops;
541 dev->watchdog_timeo = TX_TIMEOUT;
542 dev->tx_timeout = &epic_tx_timeout;
544 i = register_netdev(dev);
546 goto err_out_unmap_tx;
548 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
549 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
550 for (i = 0; i < 5; i++)
551 printk("%2.2x:", dev->dev_addr[i]);
552 printk("%2.2x.\n", dev->dev_addr[i]);
557 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
563 pci_release_regions(pdev);
569 /* Serial EEPROM section. */
571 /* EEPROM_Ctrl bits. */
572 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
573 #define EE_CS 0x02 /* EEPROM chip select. */
574 #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
575 #define EE_WRITE_0 0x01
576 #define EE_WRITE_1 0x09
577 #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
578 #define EE_ENB (0x0001 | EE_CS)
580 /* Delay between EEPROM clock transitions.
581 This serves to flush the operation to the PCI bus.
584 #define eeprom_delay() inl(ee_addr)
586 /* The EEPROM commands include the alway-set leading bit. */
587 #define EE_WRITE_CMD (5 << 6)
588 #define EE_READ64_CMD (6 << 6)
589 #define EE_READ256_CMD (6 << 8)
590 #define EE_ERASE_CMD (7 << 6)
592 static int __devinit read_eeprom(long ioaddr, int location)
596 long ee_addr = ioaddr + EECTL;
597 int read_cmd = location |
598 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
600 outl(EE_ENB & ~EE_CS, ee_addr);
601 outl(EE_ENB, ee_addr);
603 /* Shift the read command bits out. */
604 for (i = 12; i >= 0; i--) {
605 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
606 outl(EE_ENB | dataval, ee_addr);
608 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
611 outl(EE_ENB, ee_addr);
613 for (i = 16; i > 0; i--) {
614 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
616 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
617 outl(EE_ENB, ee_addr);
621 /* Terminate the EEPROM access. */
622 outl(EE_ENB & ~EE_CS, ee_addr);
627 #define MII_WRITEOP 2
628 static int mdio_read(struct net_device *dev, int phy_id, int location)
630 long ioaddr = dev->base_addr;
631 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
634 outl(read_cmd, ioaddr + MIICtrl);
635 /* Typical operation takes 25 loops. */
636 for (i = 400; i > 0; i--) {
638 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
639 /* Work around read failure bug. */
640 if (phy_id == 1 && location < 6
641 && inw(ioaddr + MIIData) == 0xffff) {
642 outl(read_cmd, ioaddr + MIICtrl);
645 return inw(ioaddr + MIIData);
651 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
653 long ioaddr = dev->base_addr;
656 outw(value, ioaddr + MIIData);
657 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
658 for (i = 10000; i > 0; i--) {
660 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
667 static int epic_open(struct net_device *dev)
669 struct epic_private *ep = dev->priv;
670 long ioaddr = dev->base_addr;
674 /* Soft reset the chip. */
675 outl(0x4001, ioaddr + GENCTL);
677 if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)))
682 outl(0x4000, ioaddr + GENCTL);
683 /* This magic is documented in SMSC app note 7.15 */
684 for (i = 16; i > 0; i--)
685 outl(0x0008, ioaddr + TEST1);
687 /* Pull the chip out of low-power mode, enable interrupts, and set for
688 PCI read multiple. The MIIcfg setting and strange write order are
689 required by the details of which bits are reset and the transceiver
690 wiring on the Ositech CardBus card.
693 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
695 if (ep->chip_flags & MII_PWRDWN)
696 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
698 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
699 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
700 inl(ioaddr + GENCTL);
701 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
703 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
704 inl(ioaddr + GENCTL);
705 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
708 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
710 for (i = 0; i < 3; i++)
711 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
713 ep->tx_threshold = TX_FIFO_THRESH;
714 outl(ep->tx_threshold, ioaddr + TxThresh);
716 if (media2miictl[dev->if_port & 15]) {
718 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
719 if (dev->if_port == 1) {
721 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
723 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
726 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
727 if (mii_lpa != 0xffff) {
728 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
729 ep->mii.full_duplex = 1;
730 else if (! (mii_lpa & LPA_LPACK))
731 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
733 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
734 " register read of %4.4x.\n", dev->name,
735 ep->mii.full_duplex ? "full" : "half",
736 ep->phys[0], mii_lpa);
740 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
741 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
742 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
744 /* Start the chip's Rx process. */
746 outl(StartRx | RxQueued, ioaddr + COMMAND);
748 netif_start_queue(dev);
750 /* Enable interrupts by setting the interrupt mask. */
751 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
752 | CntFull | TxUnderrun | TxDone | TxEmpty
753 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
757 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
759 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
760 ep->mii.full_duplex ? "full" : "half");
762 /* Set the timer to switch to check for link beat and perhaps switch
763 to an alternate media type. */
764 init_timer(&ep->timer);
765 ep->timer.expires = jiffies + 3*HZ;
766 ep->timer.data = (unsigned long)dev;
767 ep->timer.function = &epic_timer; /* timer handler */
768 add_timer(&ep->timer);
773 /* Reset the chip to recover from a PCI transaction error.
774 This may occur at interrupt time. */
775 static void epic_pause(struct net_device *dev)
777 long ioaddr = dev->base_addr;
778 struct epic_private *ep = dev->priv;
780 netif_stop_queue (dev);
782 /* Disable interrupts by clearing the interrupt mask. */
783 outl(0x00000000, ioaddr + INTMASK);
784 /* Stop the chip's Tx and Rx DMA processes. */
785 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
787 /* Update the error counts. */
788 if (inw(ioaddr + COMMAND) != 0xffff) {
789 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
790 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
791 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
794 /* Remove the packets on the Rx queue. */
798 static void epic_restart(struct net_device *dev)
800 long ioaddr = dev->base_addr;
801 struct epic_private *ep = dev->priv;
804 /* Soft reset the chip. */
805 outl(0x4001, ioaddr + GENCTL);
807 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
808 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
811 /* This magic is documented in SMSC app note 7.15 */
812 for (i = 16; i > 0; i--)
813 outl(0x0008, ioaddr + TEST1);
815 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
816 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
818 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
820 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
821 if (ep->chip_flags & MII_PWRDWN)
822 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
824 for (i = 0; i < 3; i++)
825 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
827 ep->tx_threshold = TX_FIFO_THRESH;
828 outl(ep->tx_threshold, ioaddr + TxThresh);
829 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
830 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
831 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
832 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
833 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
835 /* Start the chip's Rx process. */
837 outl(StartRx | RxQueued, ioaddr + COMMAND);
839 /* Enable interrupts by setting the interrupt mask. */
840 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
841 | CntFull | TxUnderrun | TxDone | TxEmpty
842 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
844 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
845 " interrupt %4.4x.\n",
846 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
847 (int)inl(ioaddr + INTSTAT));
851 static void check_media(struct net_device *dev)
853 struct epic_private *ep = dev->priv;
854 long ioaddr = dev->base_addr;
855 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
856 int negotiated = mii_lpa & ep->mii.advertising;
857 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
859 if (ep->mii.force_media)
861 if (mii_lpa == 0xffff) /* Bogus read */
863 if (ep->mii.full_duplex != duplex) {
864 ep->mii.full_duplex = duplex;
865 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
866 " partner capability of %4.4x.\n", dev->name,
867 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
868 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
872 static void epic_timer(unsigned long data)
874 struct net_device *dev = (struct net_device *)data;
875 struct epic_private *ep = dev->priv;
876 long ioaddr = dev->base_addr;
877 int next_tick = 5*HZ;
880 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
881 dev->name, (int)inl(ioaddr + TxSTAT));
882 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
883 "IntStatus %4.4x RxStatus %4.4x.\n",
884 dev->name, (int)inl(ioaddr + INTMASK),
885 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
890 ep->timer.expires = jiffies + next_tick;
891 add_timer(&ep->timer);
894 static void epic_tx_timeout(struct net_device *dev)
896 struct epic_private *ep = dev->priv;
897 long ioaddr = dev->base_addr;
900 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
901 "Tx status %4.4x.\n",
902 dev->name, (int)inw(ioaddr + TxSTAT));
904 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
905 dev->name, ep->dirty_tx, ep->cur_tx);
908 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
909 ep->stats.tx_fifo_errors++;
910 outl(RestartTx, ioaddr + COMMAND);
913 outl(TxQueued, dev->base_addr + COMMAND);
916 dev->trans_start = jiffies;
917 ep->stats.tx_errors++;
919 netif_wake_queue(dev);
922 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
923 static void epic_init_ring(struct net_device *dev)
925 struct epic_private *ep = dev->priv;
929 ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
930 ep->dirty_tx = ep->cur_tx = 0;
931 ep->cur_rx = ep->dirty_rx = 0;
932 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
934 /* Initialize all Rx descriptors. */
935 for (i = 0; i < RX_RING_SIZE; i++) {
936 ep->rx_ring[i].rxstatus = 0;
937 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
938 ep->rx_ring[i].next = ep->rx_ring_dma +
939 (i+1)*sizeof(struct epic_rx_desc);
940 ep->rx_skbuff[i] = 0;
942 /* Mark the last entry as wrapping the ring. */
943 ep->rx_ring[i-1].next = ep->rx_ring_dma;
945 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
946 for (i = 0; i < RX_RING_SIZE; i++) {
947 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
948 ep->rx_skbuff[i] = skb;
951 skb->dev = dev; /* Mark as being used by this device. */
952 skb_reserve(skb, 2); /* 16 byte align the IP header. */
953 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
954 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
955 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
957 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
959 /* The Tx buffer descriptor is filled in as needed, but we
960 do need to clear the ownership bit. */
961 for (i = 0; i < TX_RING_SIZE; i++) {
962 ep->tx_skbuff[i] = 0;
963 ep->tx_ring[i].txstatus = 0x0000;
964 ep->tx_ring[i].next = ep->tx_ring_dma +
965 (i+1)*sizeof(struct epic_tx_desc);
967 ep->tx_ring[i-1].next = ep->tx_ring_dma;
971 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
973 struct epic_private *ep = dev->priv;
974 int entry, free_count;
978 if (skb->len < ETH_ZLEN) {
979 skb = skb_padto(skb, ETH_ZLEN);
984 /* Caution: the write order is important here, set the field with the
985 "ownership" bit last. */
987 /* Calculate the next Tx descriptor entry. */
988 spin_lock_irqsave(&ep->lock, flags);
989 free_count = ep->cur_tx - ep->dirty_tx;
990 entry = ep->cur_tx % TX_RING_SIZE;
992 ep->tx_skbuff[entry] = skb;
993 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
994 skb->len, PCI_DMA_TODEVICE);
995 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
996 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
997 } else if (free_count == TX_QUEUE_LEN/2) {
998 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
999 } else if (free_count < TX_QUEUE_LEN - 1) {
1000 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
1002 /* Leave room for an additional entry. */
1003 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1006 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
1007 ep->tx_ring[entry].txstatus =
1008 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
1009 | cpu_to_le32(DescOwn);
1013 netif_stop_queue(dev);
1015 spin_unlock_irqrestore(&ep->lock, flags);
1016 /* Trigger an immediate transmit demand. */
1017 outl(TxQueued, dev->base_addr + COMMAND);
1019 dev->trans_start = jiffies;
1021 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1022 "flag %2.2x Tx status %8.8x.\n",
1023 dev->name, (int)skb->len, entry, ctrl_word,
1024 (int)inl(dev->base_addr + TxSTAT));
1029 /* The interrupt handler does all of the Rx thread work and cleans up
1030 after the Tx thread. */
1031 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1033 struct net_device *dev = dev_instance;
1034 struct epic_private *ep = dev->priv;
1035 long ioaddr = dev->base_addr;
1036 int status, boguscnt = max_interrupt_work;
1037 unsigned int handled = 0;
1040 status = inl(ioaddr + INTSTAT);
1041 /* Acknowledge all of the current interrupt sources ASAP. */
1042 outl(status & 0x00007fff, ioaddr + INTSTAT);
1045 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1046 "intstat=%#8.8x.\n",
1047 dev->name, status, (int)inl(ioaddr + INTSTAT));
1049 if ((status & IntrSummary) == 0)
1053 if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
1056 if (status & (TxEmpty | TxDone)) {
1057 unsigned int dirty_tx, cur_tx;
1059 /* Note: if this lock becomes a problem we can narrow the locked
1060 region at the cost of occasionally grabbing the lock more
1062 spin_lock(&ep->lock);
1063 cur_tx = ep->cur_tx;
1064 dirty_tx = ep->dirty_tx;
1065 for (; cur_tx - dirty_tx > 0; dirty_tx++) {
1066 struct sk_buff *skb;
1067 int entry = dirty_tx % TX_RING_SIZE;
1068 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1070 if (txstatus & DescOwn)
1071 break; /* It still hasn't been Txed */
1073 if ( ! (txstatus & 0x0001)) {
1074 /* There was an major error, log it. */
1075 #ifndef final_version
1077 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1078 dev->name, txstatus);
1080 ep->stats.tx_errors++;
1081 if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
1082 if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
1083 if (txstatus & 0x0040) ep->stats.tx_window_errors++;
1084 if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
1086 ep->stats.collisions += (txstatus >> 8) & 15;
1087 ep->stats.tx_packets++;
1088 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1091 /* Free the original skb. */
1092 skb = ep->tx_skbuff[entry];
1093 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1094 skb->len, PCI_DMA_TODEVICE);
1095 dev_kfree_skb_irq(skb);
1096 ep->tx_skbuff[entry] = 0;
1099 #ifndef final_version
1100 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1101 printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1102 dev->name, dirty_tx, cur_tx, ep->tx_full);
1103 dirty_tx += TX_RING_SIZE;
1106 ep->dirty_tx = dirty_tx;
1108 && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1109 /* The ring is no longer full, allow new TX entries. */
1111 spin_unlock(&ep->lock);
1112 netif_wake_queue(dev);
1114 spin_unlock(&ep->lock);
1117 /* Check uncommon events all at once. */
1118 if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
1119 PCIBusErr170 | PCIBusErr175)) {
1120 if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
1122 /* Always update the error counts to avoid overhead later. */
1123 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1124 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1125 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1127 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1128 ep->stats.tx_fifo_errors++;
1129 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1130 /* Restart the transmit process. */
1131 outl(RestartTx, ioaddr + COMMAND);
1133 if (status & RxOverflow) { /* Missed a Rx frame. */
1134 ep->stats.rx_errors++;
1136 if (status & (RxOverflow | RxFull))
1137 outw(RxQueued, ioaddr + COMMAND);
1138 if (status & PCIBusErr170) {
1139 printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n",
1144 /* Clear all error sources. */
1145 outl(status & 0x7f18, ioaddr + INTSTAT);
1147 if (--boguscnt < 0) {
1148 printk(KERN_ERR "%s: Too much work at interrupt, "
1149 "IntrStatus=0x%8.8x.\n",
1151 /* Clear all interrupt sources. */
1152 outl(0x0001ffff, ioaddr + INTSTAT);
1158 printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n",
1161 return IRQ_RETVAL(handled);
1164 static int epic_rx(struct net_device *dev)
1166 struct epic_private *ep = dev->priv;
1167 int entry = ep->cur_rx % RX_RING_SIZE;
1168 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1172 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1173 ep->rx_ring[entry].rxstatus);
1174 /* If we own the next entry, it's a new packet. Send it up. */
1175 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1176 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1179 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1180 if (--rx_work_limit < 0)
1182 if (status & 0x2006) {
1184 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1186 if (status & 0x2000) {
1187 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1188 "multiple buffers, status %4.4x!\n", dev->name, status);
1189 ep->stats.rx_length_errors++;
1190 } else if (status & 0x0006)
1191 /* Rx Frame errors are counted in hardware. */
1192 ep->stats.rx_errors++;
1194 /* Malloc up new buffer, compatible with net-2e. */
1195 /* Omit the four octet CRC from the length. */
1196 short pkt_len = (status >> 16) - 4;
1197 struct sk_buff *skb;
1199 if (pkt_len > PKT_BUF_SZ - 4) {
1200 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1202 dev->name, status, pkt_len);
1205 /* Check if the packet is long enough to accept without copying
1206 to a minimally-sized skbuff. */
1207 if (pkt_len < rx_copybreak
1208 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1210 skb_reserve(skb, 2); /* 16 byte align the IP header */
1211 pci_dma_sync_single_for_cpu(ep->pci_dev,
1212 ep->rx_ring[entry].bufaddr,
1214 PCI_DMA_FROMDEVICE);
1215 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
1216 skb_put(skb, pkt_len);
1217 pci_dma_sync_single_for_device(ep->pci_dev,
1218 ep->rx_ring[entry].bufaddr,
1220 PCI_DMA_FROMDEVICE);
1222 pci_unmap_single(ep->pci_dev,
1223 ep->rx_ring[entry].bufaddr,
1224 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1225 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1226 ep->rx_skbuff[entry] = NULL;
1228 skb->protocol = eth_type_trans(skb, dev);
1230 dev->last_rx = jiffies;
1231 ep->stats.rx_packets++;
1232 ep->stats.rx_bytes += pkt_len;
1235 entry = (++ep->cur_rx) % RX_RING_SIZE;
1238 /* Refill the Rx ring buffers. */
1239 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1240 entry = ep->dirty_rx % RX_RING_SIZE;
1241 if (ep->rx_skbuff[entry] == NULL) {
1242 struct sk_buff *skb;
1243 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1246 skb->dev = dev; /* Mark as being used by this device. */
1247 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1248 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1249 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1252 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1257 static int epic_close(struct net_device *dev)
1259 long ioaddr = dev->base_addr;
1260 struct epic_private *ep = dev->priv;
1261 struct sk_buff *skb;
1264 netif_stop_queue(dev);
1267 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1268 dev->name, (int)inl(ioaddr + INTSTAT));
1270 del_timer_sync(&ep->timer);
1272 free_irq(dev->irq, dev);
1274 /* Free all the skbuffs in the Rx queue. */
1275 for (i = 0; i < RX_RING_SIZE; i++) {
1276 skb = ep->rx_skbuff[i];
1277 ep->rx_skbuff[i] = 0;
1278 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1279 ep->rx_ring[i].buflength = 0;
1281 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1282 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1285 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1287 for (i = 0; i < TX_RING_SIZE; i++) {
1288 skb = ep->tx_skbuff[i];
1289 ep->tx_skbuff[i] = 0;
1292 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1293 skb->len, PCI_DMA_TODEVICE);
1297 /* Green! Leave the chip in low-power mode. */
1298 outl(0x0008, ioaddr + GENCTL);
1303 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1305 struct epic_private *ep = dev->priv;
1306 long ioaddr = dev->base_addr;
1308 if (netif_running(dev)) {
1309 /* Update the error counts. */
1310 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1311 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1312 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1318 /* Set or clear the multicast filter for this adaptor.
1319 Note that we only use exclusion around actually queueing the
1320 new frame, not around filling ep->setup_frame. This is non-deterministic
1321 when re-entered but still correct. */
1323 static void set_rx_mode(struct net_device *dev)
1325 long ioaddr = dev->base_addr;
1326 struct epic_private *ep = dev->priv;
1327 unsigned char mc_filter[8]; /* Multicast hash filter */
1330 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1331 outl(0x002C, ioaddr + RxCtrl);
1332 /* Unconditionally log net taps. */
1333 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1334 memset(mc_filter, 0xff, sizeof(mc_filter));
1335 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1336 /* There is apparently a chip bug, so the multicast filter
1337 is never enabled. */
1338 /* Too many to filter perfectly -- accept all multicasts. */
1339 memset(mc_filter, 0xff, sizeof(mc_filter));
1340 outl(0x000C, ioaddr + RxCtrl);
1341 } else if (dev->mc_count == 0) {
1342 outl(0x0004, ioaddr + RxCtrl);
1344 } else { /* Never executed, for now. */
1345 struct dev_mc_list *mclist;
1347 memset(mc_filter, 0, sizeof(mc_filter));
1348 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1349 i++, mclist = mclist->next) {
1350 unsigned int bit_nr =
1351 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1352 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1355 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1356 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1357 for (i = 0; i < 4; i++)
1358 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1359 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1364 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1366 struct epic_private *np = dev->priv;
1368 strcpy (info->driver, DRV_NAME);
1369 strcpy (info->version, DRV_VERSION);
1370 strcpy (info->bus_info, pci_name(np->pci_dev));
1373 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1375 struct epic_private *np = dev->priv;
1378 spin_lock_irq(&np->lock);
1379 rc = mii_ethtool_gset(&np->mii, cmd);
1380 spin_unlock_irq(&np->lock);
1385 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1387 struct epic_private *np = dev->priv;
1390 spin_lock_irq(&np->lock);
1391 rc = mii_ethtool_sset(&np->mii, cmd);
1392 spin_unlock_irq(&np->lock);
1397 static int netdev_nway_reset(struct net_device *dev)
1399 struct epic_private *np = dev->priv;
1400 return mii_nway_restart(&np->mii);
1403 static u32 netdev_get_link(struct net_device *dev)
1405 struct epic_private *np = dev->priv;
1406 return mii_link_ok(&np->mii);
1409 static u32 netdev_get_msglevel(struct net_device *dev)
1414 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1419 static int ethtool_begin(struct net_device *dev)
1421 unsigned long ioaddr = dev->base_addr;
1422 /* power-up, if interface is down */
1423 if (! netif_running(dev)) {
1424 outl(0x0200, ioaddr + GENCTL);
1425 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1430 static void ethtool_complete(struct net_device *dev)
1432 unsigned long ioaddr = dev->base_addr;
1433 /* power-down, if interface is down */
1434 if (! netif_running(dev)) {
1435 outl(0x0008, ioaddr + GENCTL);
1436 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1440 static struct ethtool_ops netdev_ethtool_ops = {
1441 .get_drvinfo = netdev_get_drvinfo,
1442 .get_settings = netdev_get_settings,
1443 .set_settings = netdev_set_settings,
1444 .nway_reset = netdev_nway_reset,
1445 .get_link = netdev_get_link,
1446 .get_msglevel = netdev_get_msglevel,
1447 .set_msglevel = netdev_set_msglevel,
1448 .get_sg = ethtool_op_get_sg,
1449 .get_tx_csum = ethtool_op_get_tx_csum,
1450 .begin = ethtool_begin,
1451 .complete = ethtool_complete
1454 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1456 struct epic_private *np = dev->priv;
1457 long ioaddr = dev->base_addr;
1458 struct mii_ioctl_data *data = if_mii(rq);
1461 /* power-up, if interface is down */
1462 if (! netif_running(dev)) {
1463 outl(0x0200, ioaddr + GENCTL);
1464 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1467 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1468 spin_lock_irq(&np->lock);
1469 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1470 spin_unlock_irq(&np->lock);
1472 /* power-down, if interface is down */
1473 if (! netif_running(dev)) {
1474 outl(0x0008, ioaddr + GENCTL);
1475 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1481 static void __devexit epic_remove_one (struct pci_dev *pdev)
1483 struct net_device *dev = pci_get_drvdata(pdev);
1484 struct epic_private *ep = dev->priv;
1486 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1487 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1488 unregister_netdev(dev);
1490 iounmap((void*) dev->base_addr);
1492 pci_release_regions(pdev);
1494 pci_set_drvdata(pdev, NULL);
1495 /* pci_power_off(pdev, -1); */
1501 static int epic_suspend (struct pci_dev *pdev, u32 state)
1503 struct net_device *dev = pci_get_drvdata(pdev);
1504 long ioaddr = dev->base_addr;
1506 if (!netif_running(dev))
1509 /* Put the chip into low-power mode. */
1510 outl(0x0008, ioaddr + GENCTL);
1511 /* pci_power_off(pdev, -1); */
1516 static int epic_resume (struct pci_dev *pdev)
1518 struct net_device *dev = pci_get_drvdata(pdev);
1520 if (!netif_running(dev))
1523 /* pci_power_on(pdev); */
1527 #endif /* CONFIG_PM */
1530 static struct pci_driver epic_driver = {
1532 .id_table = epic_pci_tbl,
1533 .probe = epic_init_one,
1534 .remove = __devexit_p(epic_remove_one),
1536 .suspend = epic_suspend,
1537 .resume = epic_resume,
1538 #endif /* CONFIG_PM */
1542 static int __init epic_init (void)
1544 /* when a module, this is printed whether or not devices are found in probe */
1546 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1547 version, version2, version3);
1550 return pci_module_init (&epic_driver);
1554 static void __exit epic_cleanup (void)
1556 pci_unregister_driver (&epic_driver);
1560 module_init(epic_init);
1561 module_exit(epic_cleanup);