1 /* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
3 Written 1996-1999 by Donald Becker.
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
30 static const char *version =
31 "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
32 "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
34 /* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
37 static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38 static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39 static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40 /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41 static int txdmacount = 128;
42 static int rxdmacount /* = 0 */;
44 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47 # define rx_align(skb) skb_reserve((skb), 2)
48 # define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
50 # define rx_align(skb)
51 # define RxFD_ALIGNMENT
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56 static int rx_copybreak = 200;
58 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59 static int max_interrupt_work = 20;
61 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62 static int multicast_filter_limit = 64;
64 /* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66 static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67 static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
69 /* A few values that may be tweaked. */
70 /* The ring sizes should be a power of two for efficiency. */
71 #define TX_RING_SIZE 64
72 #define RX_RING_SIZE 64
73 /* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75 #define TX_MULTICAST_SIZE 2
76 #define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77 /* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79 #define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80 /* Hysteresis marking queue as no longer full. */
81 #define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
83 /* Operational parameters that usually are not changed. */
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT (2*HZ)
87 /* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88 #define PKT_BUF_SZ 1536
90 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
91 #warning You must compile this file with the correct options!
92 #warning See the last lines of the source file.
93 #error You must compile this driver with "-O".
96 #include <linux/config.h>
97 #include <linux/version.h>
98 #include <linux/module.h>
100 #include <linux/kernel.h>
101 #include <linux/string.h>
102 #include <linux/errno.h>
103 #include <linux/ioport.h>
104 #include <linux/slab.h>
105 #include <linux/interrupt.h>
106 #include <linux/timer.h>
107 #include <linux/pci.h>
108 #include <linux/spinlock.h>
109 #include <linux/init.h>
110 #include <linux/mii.h>
111 #include <linux/delay.h>
113 #include <asm/bitops.h>
115 #include <asm/uaccess.h>
118 #include <linux/netdevice.h>
119 #include <linux/etherdevice.h>
120 #include <linux/rtnetlink.h>
121 #include <linux/skbuff.h>
122 #include <linux/ethtool.h>
124 /* enable PIO instead of MMIO, if CONFIG_EEPRO100_PIO is selected */
125 #ifdef CONFIG_EEPRO100_PIO
129 static int debug = -1;
130 #define DEBUG_DEFAULT (NETIF_MSG_DRV | \
134 #define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
137 MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
138 MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
139 MODULE_LICENSE("GPL");
140 MODULE_PARM(debug, "i");
141 MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
142 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
143 MODULE_PARM(congenb, "i");
144 MODULE_PARM(txfifo, "i");
145 MODULE_PARM(rxfifo, "i");
146 MODULE_PARM(txdmacount, "i");
147 MODULE_PARM(rxdmacount, "i");
148 MODULE_PARM(rx_copybreak, "i");
149 MODULE_PARM(max_interrupt_work, "i");
150 MODULE_PARM(multicast_filter_limit, "i");
151 MODULE_PARM_DESC(debug, "debug level (0-6)");
152 MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
153 MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
154 MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
155 MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
156 MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
157 MODULE_PARM_DESC(txdmaccount, "Tx DMA burst length; 128 - disable (0-128)");
158 MODULE_PARM_DESC(rxdmaccount, "Rx DMA burst length; 128 - disable (0-128)");
159 MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
160 MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
161 MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
163 #define RUN_AT(x) (jiffies + (x))
165 /* ACPI power states don't universally work (yet) */
167 #undef pci_set_power_state
168 #define pci_set_power_state null_set_power_state
169 static inline int null_set_power_state(struct pci_dev *dev, int state)
173 #endif /* CONFIG_PM */
175 #define netdevice_start(dev)
176 #define netdevice_stop(dev)
177 #define netif_set_tx_timeout(dev, tf, tm) \
179 (dev)->tx_timeout = (tf); \
180 (dev)->watchdog_timeo = (tm); \
188 I. Board Compatibility
190 This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
191 single-chip fast Ethernet controller for PCI, as used on the Intel
192 EtherExpress Pro 100 adapter.
194 II. Board-specific settings
196 PCI bus devices are configured by the system at boot time, so no jumpers
197 need to be set on the board. The system BIOS should be set to assign the
198 PCI INTA signal to an otherwise unused system IRQ line. While it's
199 possible to share PCI interrupt lines, it negatively impacts performance and
200 only recent kernels support it.
202 III. Driver operation
205 The Speedo3 is very similar to other Intel network chips, that is to say
206 "apparently designed on a different planet". This chips retains the complex
207 Rx and Tx descriptors and multiple buffers pointers as previous chips, but
208 also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
209 Tx mode, but in a simplified lower-overhead manner: it associates only a
210 single buffer descriptor with each frame descriptor.
212 Despite the extra space overhead in each receive skbuff, the driver must use
213 the simplified Rx buffer mode to assure that only a single data buffer is
214 associated with each RxFD. The driver implements this by reserving space
215 for the Rx descriptor at the head of each Rx skbuff.
217 The Speedo-3 has receive and command unit base addresses that are added to
218 almost all descriptor pointers. The driver sets these to zero, so that all
219 pointer fields are absolute addresses.
221 The System Control Block (SCB) of some previous Intel chips exists on the
222 chip in both PCI I/O and memory space. This driver uses the I/O space
223 registers, but might switch to memory mapped mode to better support non-x86
226 IIIB. Transmit structure
228 The driver must use the complex Tx command+descriptor mode in order to
229 have a indirect pointer to the skbuff data section. Each Tx command block
230 (TxCB) is associated with two immediately appended Tx Buffer Descriptor
231 (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
232 speedo_private data structure for each adapter instance.
234 The newer i82558 explicitly supports this structure, and can read the two
235 TxBDs in the same PCI burst as the TxCB.
237 This ring structure is used for all normal transmit packets, but the
238 transmit packet descriptors aren't long enough for most non-Tx commands such
239 as CmdConfigure. This is complicated by the possibility that the chip has
240 already loaded the link address in the previous descriptor. So for these
241 commands we convert the next free descriptor on the ring to a NoOp, and point
242 that descriptor's link to the complex command.
244 An additional complexity of these non-transmit commands are that they may be
245 added asynchronous to the normal transmit queue, so we disable interrupts
246 whenever the Tx descriptor ring is manipulated.
248 A notable aspect of these special configure commands is that they do
249 work with the normal Tx ring entry scavenge method. The Tx ring scavenge
250 is done at interrupt time using the 'dirty_tx' index, and checking for the
251 command-complete bit. While the setup frames may have the NoOp command on the
252 Tx ring marked as complete, but not have completed the setup command, this
253 is not a problem. The tx_ring entry can be still safely reused, as the
254 tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
256 Commands may have bits set e.g. CmdSuspend in the command word to either
257 suspend or stop the transmit/command unit. This driver always flags the last
258 command with CmdSuspend, erases the CmdSuspend in the previous command, and
259 then issues a CU_RESUME.
260 Note: Watch out for the potential race condition here: imagine
261 erasing the previous suspend
262 the chip processes the previous command
263 the chip processes the final command, and suspends
265 the chip processes the next-yet-valid post-final-command.
266 So blindly sending a CU_RESUME is only safe if we do it immediately after
267 after erasing the previous CmdSuspend, without the possibility of an
268 intervening delay. Thus the resume command is always within the
269 interrupts-disabled region. This is a timing dependence, but handling this
270 condition in a timing-independent way would considerably complicate the code.
272 Note: In previous generation Intel chips, restarting the command unit was a
273 notoriously slow process. This is presumably no longer true.
275 IIIC. Receive structure
277 Because of the bus-master support on the Speedo3 this driver uses the new
278 SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
279 This scheme allocates full-sized skbuffs as receive buffers. The value
280 SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
281 trade-off the memory wasted by passing the full-sized skbuff to the queue
282 layer for all frames vs. the copying cost of copying a frame to a
283 correctly-sized skbuff.
285 For small frames the copying cost is negligible (esp. considering that we
286 are pre-loading the cache with immediately useful header information), so we
287 allocate a new, minimally-sized skbuff. For large frames the copying cost
288 is non-trivial, and the larger copy might flush the cache of useful data, so
289 we pass up the skbuff the packet was received into.
293 Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
294 that stated that I could disclose the information. But I still resent
295 having to sign an Intel NDA when I'm helping Intel sell their own product!
299 static int speedo_found1(struct pci_dev *pdev, long ioaddr, int fnd_cnt, int acpi_idle_state);
302 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
303 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
306 static inline unsigned int io_inw(unsigned long port)
310 static inline void io_outw(unsigned int val, unsigned long port)
316 /* Currently alpha headers define in/out macros.
317 Undefine them. 2000/03/30 SAW */
332 /* Offsets to the various registers.
333 All accesses need not be longword aligned. */
334 enum speedo_offsets {
335 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
337 SCBPointer = 4, /* General purpose pointer. */
338 SCBPort = 8, /* Misc. commands and operands. */
339 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
340 SCBCtrlMDI = 16, /* MDI interface control. */
341 SCBEarlyRx = 20, /* Early receive byte count. */
343 /* Commands that can be put in a command list entry. */
345 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
346 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
347 CmdDump = 0x60000, CmdDiagnose = 0x70000,
348 CmdSuspend = 0x40000000, /* Suspend after completion. */
349 CmdIntr = 0x20000000, /* Interrupt after completion. */
350 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
352 /* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
353 status bits. Previous driver versions used separate 16 bit fields for
354 commands and statuses. --SAW
356 #if defined(__alpha__)
357 # define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
359 # if defined(__LITTLE_ENDIAN)
360 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
361 # elif defined(__BIG_ENDIAN)
362 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
364 # error Unsupported byteorder
369 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
370 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
371 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
372 /* The rest are Rx and Tx commands. */
373 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
374 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
375 CUDumpStats=0x0070, /* Dump then reset stats counters. */
376 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
377 RxResumeNoResources=0x0007,
381 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
384 /* The Speedo3 Rx and Tx frame/buffer descriptors. */
385 struct descriptor { /* A generic descriptor. */
386 volatile s32 cmd_status; /* All command and status fields. */
387 u32 link; /* struct descriptor * */
388 unsigned char params[0];
391 /* The Speedo3 Rx and Tx buffer descriptors. */
392 struct RxFD { /* Receive frame descriptor. */
394 u32 link; /* struct RxFD * */
395 u32 rx_buf_addr; /* void * */
399 /* Selected elements of the Tx/RxFD.status word. */
401 RxComplete=0x8000, RxOK=0x2000,
402 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
403 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
404 TxUnderrun=0x1000, StatusComplete=0x8000,
407 #define CONFIG_DATA_SIZE 22
408 struct TxFD { /* Transmit frame descriptor set. */
410 u32 link; /* void * */
411 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
412 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
413 /* This constitutes two "TBD" entries -- we only use one. */
414 #define TX_DESCR_BUF_OFFSET 16
415 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
416 s32 tx_buf_size0; /* Length of Tx frame. */
417 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
418 s32 tx_buf_size1; /* Length of Tx frame. */
419 /* the structure must have space for at least CONFIG_DATA_SIZE starting
420 * from tx_desc_addr field */
423 /* Multicast filter setting block. --SAW */
424 struct speedo_mc_block {
425 struct speedo_mc_block *next;
427 dma_addr_t frame_dma;
429 struct descriptor frame __attribute__ ((__aligned__(16)));
432 /* Elements of the dump_statistics block. This block must be lword aligned. */
433 struct speedo_stats {
446 u32 rx_resource_errs;
453 enum Rx_ring_state_bits {
454 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
457 /* Do not change the position (alignment) of the first few elements!
458 The later elements are grouped for cache locality.
460 Unfortunately, all the positions have been shifted since there.
461 A new re-alignment is required. 2000/03/06 SAW */
462 struct speedo_private {
463 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
464 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
465 /* The addresses of a Tx/Rx-in-place packets/buffers. */
466 struct sk_buff *tx_skbuff[TX_RING_SIZE];
467 struct sk_buff *rx_skbuff[RX_RING_SIZE];
468 /* Mapped addresses of the rings. */
469 dma_addr_t tx_ring_dma;
470 #define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
471 dma_addr_t rx_ring_dma[RX_RING_SIZE];
472 struct descriptor *last_cmd; /* Last command sent. */
473 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
474 spinlock_t lock; /* Group with Tx control cache line. */
475 u32 tx_threshold; /* The value for txdesc.count. */
476 struct RxFD *last_rxf; /* Last filled RX buffer. */
477 dma_addr_t last_rxf_dma;
478 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
479 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
480 struct net_device_stats stats;
481 struct speedo_stats *lstats;
482 dma_addr_t lstats_dma;
484 struct pci_dev *pdev;
485 struct timer_list timer; /* Media selection timer. */
486 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
487 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
488 long in_interrupt; /* Word-aligned dev->interrupt */
489 unsigned char acpi_pwr;
490 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
491 unsigned int tx_full:1; /* The Tx queue is full. */
492 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
493 unsigned int rx_bug:1; /* Work around receiver hang errata. */
494 unsigned char default_port:8; /* Last dev->if_port value. */
495 unsigned char rx_ring_state; /* RX ring status flags. */
496 unsigned short phy[2]; /* PHY media interfaces available. */
497 unsigned short partner; /* Link partner caps. */
498 struct mii_if_info mii_if; /* MII API hooks, info */
499 u32 msg_enable; /* debug message level */
505 /* The parameters for a CmdConfigure operation.
506 There are so many options that it would be difficult to document each bit.
507 We mostly use the default or recommended settings. */
508 static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
509 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
511 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
513 static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
514 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
515 0, 0x2E, 0, 0x60, 0x08, 0x88,
516 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
519 /* PHY media interface chips. */
520 static const char *phys[] = {
521 "None", "i82553-A/B", "i82553-C", "i82503",
522 "DP83840", "80c240", "80c24", "i82555",
523 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
524 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
525 enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
526 S80C24, I82555, DP83840A=10, };
527 static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
528 #define EE_READ_CMD (6)
530 static int eepro100_init_one(struct pci_dev *pdev,
531 const struct pci_device_id *ent);
533 static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
534 static int mdio_read(struct net_device *dev, int phy_id, int location);
535 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
536 static int speedo_open(struct net_device *dev);
537 static void speedo_resume(struct net_device *dev);
538 static void speedo_timer(unsigned long data);
539 static void speedo_init_rx_ring(struct net_device *dev);
540 static void speedo_tx_timeout(struct net_device *dev);
541 static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
542 static void speedo_refill_rx_buffers(struct net_device *dev, int force);
543 static int speedo_rx(struct net_device *dev);
544 static void speedo_tx_buffer_gc(struct net_device *dev);
545 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
546 static int speedo_close(struct net_device *dev);
547 static struct net_device_stats *speedo_get_stats(struct net_device *dev);
548 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
549 static void set_rx_mode(struct net_device *dev);
550 static void speedo_show_state(struct net_device *dev);
554 #ifdef honor_default_port
555 /* Optional driver feature to allow forcing the transceiver setting.
557 static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
558 0x2000, 0x2100, 0x0400, 0x3100};
561 /* How to wait for the command unit to accept a command.
562 Typically this takes 0 ticks. */
563 static inline unsigned char wait_for_cmd_done(struct net_device *dev)
566 long cmd_ioaddr = dev->base_addr + SCBCmd;
572 } while(r && --wait >= 0);
575 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
579 static int __devinit eepro100_init_one (struct pci_dev *pdev,
580 const struct pci_device_id *ent)
582 unsigned long ioaddr;
584 int acpi_idle_state = 0, pm;
585 static int cards_found /* = 0 */;
588 /* when built-in, we only print version if device is found */
589 static int did_version;
590 if (did_version++ == 0)
594 /* save power state before pci_enable_device overwrites it */
595 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
598 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
599 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
602 if (pci_enable_device(pdev))
603 goto err_out_free_mmio_region;
605 pci_set_master(pdev);
607 if (!request_region(pci_resource_start(pdev, 1),
608 pci_resource_len(pdev, 1), "eepro100")) {
609 printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
612 if (!request_mem_region(pci_resource_start(pdev, 0),
613 pci_resource_len(pdev, 0), "eepro100")) {
614 printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
615 goto err_out_free_pio_region;
620 ioaddr = pci_resource_start(pdev, 1);
621 if (DEBUG & NETIF_MSG_PROBE)
622 printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
625 ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
626 pci_resource_len(pdev, 0));
628 printk (KERN_ERR "eepro100: cannot remap MMIO region %lx @ %lx\n",
629 pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));
630 goto err_out_free_mmio_region;
632 if (DEBUG & NETIF_MSG_PROBE)
633 printk("Found Intel i82557 PCI Speedo, MMIO at %#lx, IRQ %d.\n",
634 pci_resource_start(pdev, 0), irq);
638 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
641 goto err_out_iounmap;
647 iounmap ((void *)ioaddr);
649 err_out_free_mmio_region:
650 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
651 err_out_free_pio_region:
652 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
657 #ifdef CONFIG_NET_POLL_CONTROLLER
659 * Polling 'interrupt' - used by things like netconsole to send skbs
660 * without having to re-enable interrupts. It's not called while
661 * the interrupt routine is executing.
664 static void poll_speedo (struct net_device *dev)
666 /* disable_irq is not very nice, but with the funny lockless design
667 we have no other choice. */
668 disable_irq(dev->irq);
669 speedo_interrupt (dev->irq, dev, NULL);
670 enable_irq(dev->irq);
674 static int __devinit speedo_found1(struct pci_dev *pdev,
675 long ioaddr, int card_idx, int acpi_idle_state)
677 struct net_device *dev;
678 struct speedo_private *sp;
684 dma_addr_t tx_ring_dma;
686 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
687 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
688 if (tx_ring_space == NULL)
691 dev = alloc_etherdev(sizeof(struct speedo_private));
693 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
694 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
698 SET_MODULE_OWNER(dev);
699 SET_NETDEV_DEV(dev, &pdev->dev);
701 if (dev->mem_start > 0)
702 option = dev->mem_start;
703 else if (card_idx >= 0 && options[card_idx] >= 0)
704 option = options[card_idx];
709 if (dev_alloc_name(dev, dev->name) < 0)
710 goto err_free_unlock;
712 /* Read the station address EEPROM before doing the reset.
713 Nominally his should even be done before accepting the device, but
714 then we wouldn't have a device name with which to report the error.
715 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
718 unsigned long iobase;
719 int read_cmd, ee_size;
723 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
725 iobase = pci_resource_start(pdev, 1);
726 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
729 read_cmd = EE_READ_CMD << 24;
732 read_cmd = EE_READ_CMD << 22;
735 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
736 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
740 dev->dev_addr[j++] = value;
741 dev->dev_addr[j++] = value >> 8;
745 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
746 "check settings before activating this device!\n",
748 /* Don't unregister_netdev(dev); as the EEPro may actually be
749 usable, especially if the MAC address is set later.
750 On the other hand, it may be unusable if MDI data is corrupted. */
753 /* Reset the chip: stop Tx and Rx processes and clear counters.
754 This takes less than 10usec and will easily finish before the next
756 outl(PortReset, ioaddr + SCBPort);
757 inl(ioaddr + SCBPort);
760 if (eeprom[3] & 0x0100)
761 product = "OEM i82557/i82558 10/100 Ethernet";
763 product = pci_name(pdev);
765 printk(KERN_INFO "%s: %s, ", dev->name, product);
767 for (i = 0; i < 5; i++)
768 printk("%2.2X:", dev->dev_addr[i]);
769 printk("%2.2X, ", dev->dev_addr[i]);
771 printk("I/O at %#3lx, ", ioaddr);
773 printk("IRQ %d.\n", pdev->irq);
775 /* we must initialize base_addr early, for mdio_{read,write} */
776 dev->base_addr = ioaddr;
778 #if 1 || defined(kernel_bloat)
779 /* OK, this is pure kernel bloat. I don't like it when other drivers
780 waste non-pageable kernel space to emit similar messages, but I need
781 them for bug reports. */
783 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
784 /* The self-test results must be paragraph aligned. */
785 volatile s32 *self_test_results;
786 int boguscnt = 16000; /* Timeout for set-test. */
787 if ((eeprom[3] & 0x03) != 0x03)
788 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
790 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
791 " connectors present:",
792 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
793 for (i = 0; i < 4; i++)
794 if (eeprom[5] & (1<<i))
795 printk(connectors[i]);
796 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
797 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
798 if (eeprom[7] & 0x0700)
799 printk(KERN_INFO " Secondary interface chip %s.\n",
800 phys[(eeprom[7]>>8)&7]);
801 if (((eeprom[6]>>8) & 0x3f) == DP83840
802 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
803 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
806 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
808 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
810 if ((option >= 0) && (option & 0x70)) {
811 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
812 (option & 0x20 ? 100 : 10),
813 (option & 0x10 ? "full" : "half"));
814 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
815 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
816 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
819 /* Perform a system self-test. */
820 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
821 self_test_results[0] = 0;
822 self_test_results[1] = -1;
823 outl(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
826 } while (self_test_results[1] == -1 && --boguscnt >= 0);
828 if (boguscnt < 0) { /* Test optimized out. */
829 printk(KERN_ERR "Self test failed, status %8.8x:\n"
830 KERN_ERR " Failure to initialize the i82557.\n"
831 KERN_ERR " Verify that the card is a bus-master"
833 self_test_results[1]);
835 printk(KERN_INFO " General self-test: %s.\n"
836 KERN_INFO " Serial sub-system self-test: %s.\n"
837 KERN_INFO " Internal registers self-test: %s.\n"
838 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
839 self_test_results[1] & 0x1000 ? "failed" : "passed",
840 self_test_results[1] & 0x0020 ? "failed" : "passed",
841 self_test_results[1] & 0x0008 ? "failed" : "passed",
842 self_test_results[1] & 0x0004 ? "failed" : "passed",
843 self_test_results[0]);
845 #endif /* kernel_bloat */
847 outl(PortReset, ioaddr + SCBPort);
848 inl(ioaddr + SCBPort);
851 /* Return the chip to its original power state. */
852 pci_set_power_state(pdev, acpi_idle_state);
854 pci_set_drvdata (pdev, dev);
855 SET_NETDEV_DEV(dev, &pdev->dev);
857 dev->irq = pdev->irq;
859 sp = netdev_priv(dev);
861 sp->msg_enable = DEBUG;
862 sp->acpi_pwr = acpi_idle_state;
863 sp->tx_ring = tx_ring_space;
864 sp->tx_ring_dma = tx_ring_dma;
865 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
866 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
867 init_timer(&sp->timer); /* used in ioctl() */
868 spin_lock_init(&sp->lock);
870 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
872 if (full_duplex[card_idx] >= 0)
873 sp->mii_if.full_duplex = full_duplex[card_idx];
875 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
877 sp->phy[0] = eeprom[6];
878 sp->phy[1] = eeprom[7];
880 sp->mii_if.phy_id = eeprom[6] & 0x1f;
881 sp->mii_if.phy_id_mask = 0x1f;
882 sp->mii_if.reg_num_mask = 0x1f;
883 sp->mii_if.dev = dev;
884 sp->mii_if.mdio_read = mdio_read;
885 sp->mii_if.mdio_write = mdio_write;
887 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
888 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
889 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
890 || (pdev->device == 0x245D)) {
895 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
897 /* The Speedo-specific entries in the device structure. */
898 dev->open = &speedo_open;
899 dev->hard_start_xmit = &speedo_start_xmit;
900 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
901 dev->stop = &speedo_close;
902 dev->get_stats = &speedo_get_stats;
903 dev->set_multicast_list = &set_rx_mode;
904 dev->do_ioctl = &speedo_ioctl;
905 #ifdef CONFIG_NET_POLL_CONTROLLER
906 dev->poll_controller = &poll_speedo;
909 if (register_netdevice(dev))
910 goto err_free_unlock;
921 static void do_slow_command(struct net_device *dev, int cmd)
923 long cmd_ioaddr = dev->base_addr + SCBCmd;
926 if (inb(cmd_ioaddr) == 0) break;
927 while(++wait <= 200);
929 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
930 inb(cmd_ioaddr), wait);
932 outb(cmd, cmd_ioaddr);
934 for (wait = 0; wait <= 100; wait++)
935 if (inb(cmd_ioaddr) == 0) return;
936 for (; wait <= 20000; wait++)
937 if (inb(cmd_ioaddr) == 0) return;
939 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
940 " Current status %8.8x.\n",
941 cmd, wait, inl(dev->base_addr + SCBStatus));
944 /* Serial EEPROM section.
945 A "bit" grungy, but we work our way through bit-by-bit :->. */
946 /* EEPROM_Ctrl bits. */
947 #define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
948 #define EE_CS 0x02 /* EEPROM chip select. */
949 #define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
950 #define EE_DATA_READ 0x08 /* EEPROM chip data out. */
951 #define EE_ENB (0x4800 | EE_CS)
952 #define EE_WRITE_0 0x4802
953 #define EE_WRITE_1 0x4806
954 #define EE_OFFSET SCBeeprom
956 /* The fixes for the code were kindly provided by Dragan Stancevic
957 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
959 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
960 interval for serial EEPROM. However, it looks like that there is an
961 additional requirement dictating larger udelay's in the code below.
963 static int __devinit do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
966 long ee_addr = ioaddr + SCBeeprom;
968 io_outw(EE_ENB, ee_addr); udelay(2);
969 io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
971 /* Shift the command bits out. */
973 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
974 io_outw(dataval, ee_addr); udelay(2);
975 io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
976 retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
977 } while (--cmd_len >= 0);
978 io_outw(EE_ENB, ee_addr); udelay(2);
980 /* Terminate the EEPROM access. */
981 io_outw(EE_ENB & ~EE_CS, ee_addr);
985 static int mdio_read(struct net_device *dev, int phy_id, int location)
987 long ioaddr = dev->base_addr;
988 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
989 outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
991 val = inl(ioaddr + SCBCtrlMDI);
992 if (--boguscnt < 0) {
993 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
996 } while (! (val & 0x10000000));
1000 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
1002 long ioaddr = dev->base_addr;
1003 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
1004 outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
1005 ioaddr + SCBCtrlMDI);
1007 val = inl(ioaddr + SCBCtrlMDI);
1008 if (--boguscnt < 0) {
1009 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
1012 } while (! (val & 0x10000000));
1016 speedo_open(struct net_device *dev)
1018 struct speedo_private *sp = netdev_priv(dev);
1019 long ioaddr = dev->base_addr;
1022 if (netif_msg_ifup(sp))
1023 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
1025 pci_set_power_state(sp->pdev, 0);
1027 /* Set up the Tx queue early.. */
1032 sp->in_interrupt = 0;
1034 /* .. we can safely take handler calls during init. */
1035 retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
1040 dev->if_port = sp->default_port;
1042 #ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
1043 /* Retrigger negotiation to reset previous errors. */
1044 if ((sp->phy[0] & 0x8000) == 0) {
1045 int phy_addr = sp->phy[0] & 0x1f ;
1046 /* Use 0x3300 for restarting NWay, other values to force xcvr:
1052 #ifdef honor_default_port
1053 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1055 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1060 speedo_init_rx_ring(dev);
1062 /* Fire up the hardware. */
1063 outw(SCBMaskAll, ioaddr + SCBCmd);
1066 netdevice_start(dev);
1067 netif_start_queue(dev);
1069 /* Setup the chip and configure the multicast list. */
1070 sp->mc_setup_head = NULL;
1071 sp->mc_setup_tail = NULL;
1072 sp->flow_ctrl = sp->partner = 0;
1073 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1075 if ((sp->phy[0] & 0x8000) == 0)
1076 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1078 mii_check_link(&sp->mii_if);
1080 if (netif_msg_ifup(sp)) {
1081 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1082 dev->name, inw(ioaddr + SCBStatus));
1085 /* Set the timer. The timer serves a dual purpose:
1086 1) to monitor the media interface (e.g. link beat) and perhaps switch
1087 to an alternate media type
1088 2) to monitor Rx activity, and restart the Rx process if the receiver
1090 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1091 sp->timer.data = (unsigned long)dev;
1092 sp->timer.function = &speedo_timer; /* timer handler */
1093 add_timer(&sp->timer);
1095 /* No need to wait for the command unit to accept here. */
1096 if ((sp->phy[0] & 0x8000) == 0)
1097 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1102 /* Start the chip hardware after a full reset. */
1103 static void speedo_resume(struct net_device *dev)
1105 struct speedo_private *sp = netdev_priv(dev);
1106 long ioaddr = dev->base_addr;
1108 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1109 sp->tx_threshold = 0x01208000;
1111 /* Set the segment registers to '0'. */
1112 if (wait_for_cmd_done(dev) != 0) {
1113 outl(PortPartialReset, ioaddr + SCBPort);
1117 outl(0, ioaddr + SCBPointer);
1118 inl(ioaddr + SCBPointer); /* Flush to PCI. */
1119 udelay(10); /* Bogus, but it avoids the bug. */
1121 /* Note: these next two operations can take a while. */
1122 do_slow_command(dev, RxAddrLoad);
1123 do_slow_command(dev, CUCmdBase);
1125 /* Load the statistics block and rx ring addresses. */
1126 outl(sp->lstats_dma, ioaddr + SCBPointer);
1127 inl(ioaddr + SCBPointer); /* Flush to PCI */
1129 outb(CUStatsAddr, ioaddr + SCBCmd);
1130 sp->lstats->done_marker = 0;
1131 wait_for_cmd_done(dev);
1133 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1134 if (netif_msg_rx_err(sp))
1135 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1138 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1139 ioaddr + SCBPointer);
1140 inl(ioaddr + SCBPointer); /* Flush to PCI */
1143 /* Note: RxStart should complete instantly. */
1144 do_slow_command(dev, RxStart);
1145 do_slow_command(dev, CUDumpStats);
1147 /* Fill the first command with our physical address. */
1149 struct descriptor *ias_cmd;
1152 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1153 /* Avoid a bug(?!) here by marking the command already completed. */
1154 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1156 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1157 memcpy(ias_cmd->params, dev->dev_addr, 6);
1159 clear_suspend(sp->last_cmd);
1160 sp->last_cmd = ias_cmd;
1163 /* Start the chip's Tx process and unmask interrupts. */
1164 outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1165 ioaddr + SCBPointer);
1166 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1167 remain masked --Dragan */
1168 outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1172 * Sometimes the receiver stops making progress. This routine knows how to
1173 * get it going again, without losing packets or being otherwise nasty like
1174 * a chip reset would be. Previously the driver had a whole sequence
1175 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1176 * do another, etc. But those things don't really matter. Separate logic
1177 * in the ISR provides for allocating buffers--the other half of operation
1178 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1179 * This problem with the old, more involved algorithm is shown up under
1180 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1183 speedo_rx_soft_reset(struct net_device *dev)
1185 struct speedo_private *sp = netdev_priv(dev);
1189 ioaddr = dev->base_addr;
1190 if (wait_for_cmd_done(dev) != 0) {
1191 printk("%s: previous command stalled\n", dev->name);
1195 * Put the hardware into a known state.
1197 outb(RxAbort, ioaddr + SCBCmd);
1199 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1201 rfd->rx_buf_addr = 0xffffffff;
1203 if (wait_for_cmd_done(dev) != 0) {
1204 printk("%s: RxAbort command stalled\n", dev->name);
1207 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1208 ioaddr + SCBPointer);
1209 outb(RxStart, ioaddr + SCBCmd);
1213 /* Media monitoring and control. */
1214 static void speedo_timer(unsigned long data)
1216 struct net_device *dev = (struct net_device *)data;
1217 struct speedo_private *sp = netdev_priv(dev);
1218 long ioaddr = dev->base_addr;
1219 int phy_num = sp->phy[0] & 0x1f;
1221 /* We have MII and lost link beat. */
1222 if ((sp->phy[0] & 0x8000) == 0) {
1223 int partner = mdio_read(dev, phy_num, MII_LPA);
1224 if (partner != sp->partner) {
1225 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1226 if (netif_msg_link(sp)) {
1227 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1228 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1229 dev->name, sp->partner, partner, sp->mii_if.advertising);
1231 sp->partner = partner;
1232 if (flow_ctrl != sp->flow_ctrl) {
1233 sp->flow_ctrl = flow_ctrl;
1234 sp->rx_mode = -1; /* Trigger a reload. */
1238 mii_check_link(&sp->mii_if);
1239 if (netif_msg_timer(sp)) {
1240 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1241 dev->name, inw(ioaddr + SCBStatus));
1243 if (sp->rx_mode < 0 ||
1244 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1245 /* We haven't received a packet in a Long Time. We might have been
1246 bitten by the receiver hang bug. This can be cleared by sending
1247 a set multicast list command. */
1248 if (netif_msg_timer(sp))
1249 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1250 " from a timer routine,"
1251 " m=%d, j=%ld, l=%ld.\n",
1252 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1255 /* We must continue to monitor the media. */
1256 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1257 add_timer(&sp->timer);
1260 static void speedo_show_state(struct net_device *dev)
1262 struct speedo_private *sp = netdev_priv(dev);
1265 if (netif_msg_pktdata(sp)) {
1266 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1267 dev->name, sp->cur_tx, sp->dirty_tx);
1268 for (i = 0; i < TX_RING_SIZE; i++)
1269 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1270 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1271 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1272 i, sp->tx_ring[i].status);
1274 printk(KERN_DEBUG "%s: Printing Rx ring"
1275 " (next to receive into %u, dirty index %u).\n",
1276 dev->name, sp->cur_rx, sp->dirty_rx);
1277 for (i = 0; i < RX_RING_SIZE; i++)
1278 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1279 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1280 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1281 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1282 i, (sp->rx_ringp[i] != NULL) ?
1283 (unsigned)sp->rx_ringp[i]->status : 0);
1288 long ioaddr = dev->base_addr;
1289 int phy_num = sp->phy[0] & 0x1f;
1290 for (i = 0; i < 16; i++) {
1291 /* FIXME: what does it mean? --SAW */
1293 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1294 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1301 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1303 speedo_init_rx_ring(struct net_device *dev)
1305 struct speedo_private *sp = netdev_priv(dev);
1306 struct RxFD *rxf, *last_rxf = NULL;
1307 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1312 for (i = 0; i < RX_RING_SIZE; i++) {
1313 struct sk_buff *skb;
1314 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1315 /* XXX: do we really want to call this before the NULL check? --hch */
1316 rx_align(skb); /* Align IP on 16 byte boundary */
1317 sp->rx_skbuff[i] = skb;
1319 break; /* OK. Just initially short of Rx bufs. */
1320 skb->dev = dev; /* Mark as being used by this device. */
1321 rxf = (struct RxFD *)skb->tail;
1322 sp->rx_ringp[i] = rxf;
1323 sp->rx_ring_dma[i] =
1324 pci_map_single(sp->pdev, rxf,
1325 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1326 skb_reserve(skb, sizeof(struct RxFD));
1328 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1329 pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1330 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1333 last_rxf_dma = sp->rx_ring_dma[i];
1334 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1335 rxf->link = 0; /* None yet. */
1336 /* This field unused by i82557. */
1337 rxf->rx_buf_addr = 0xffffffff;
1338 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1339 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1340 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1342 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1343 /* Mark the last entry as end-of-list. */
1344 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1345 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1346 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1347 sp->last_rxf = last_rxf;
1348 sp->last_rxf_dma = last_rxf_dma;
1351 static void speedo_purge_tx(struct net_device *dev)
1353 struct speedo_private *sp = netdev_priv(dev);
1356 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1357 entry = sp->dirty_tx % TX_RING_SIZE;
1358 if (sp->tx_skbuff[entry]) {
1359 sp->stats.tx_errors++;
1360 pci_unmap_single(sp->pdev,
1361 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1362 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1363 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1364 sp->tx_skbuff[entry] = 0;
1368 while (sp->mc_setup_head != NULL) {
1369 struct speedo_mc_block *t;
1370 if (netif_msg_tx_err(sp))
1371 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1372 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1373 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1374 t = sp->mc_setup_head->next;
1375 kfree(sp->mc_setup_head);
1376 sp->mc_setup_head = t;
1378 sp->mc_setup_tail = NULL;
1380 netif_wake_queue(dev);
1383 static void reset_mii(struct net_device *dev)
1385 struct speedo_private *sp = netdev_priv(dev);
1387 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1388 if ((sp->phy[0] & 0x8000) == 0) {
1389 int phy_addr = sp->phy[0] & 0x1f;
1390 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1391 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1392 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1393 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1394 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1395 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1396 #ifdef honor_default_port
1397 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1399 mdio_read(dev, phy_addr, MII_BMCR);
1400 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1401 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1406 static void speedo_tx_timeout(struct net_device *dev)
1408 struct speedo_private *sp = netdev_priv(dev);
1409 long ioaddr = dev->base_addr;
1410 int status = inw(ioaddr + SCBStatus);
1411 unsigned long flags;
1413 if (netif_msg_tx_err(sp)) {
1414 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1415 " %4.4x at %d/%d command %8.8x.\n",
1416 dev->name, status, inw(ioaddr + SCBCmd),
1417 sp->dirty_tx, sp->cur_tx,
1418 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1421 speedo_show_state(dev);
1423 if ((status & 0x00C0) != 0x0080
1424 && (status & 0x003C) == 0x0010) {
1425 /* Only the command unit has stopped. */
1426 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1428 outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1429 ioaddr + SCBPointer);
1430 outw(CUStart, ioaddr + SCBCmd);
1436 del_timer_sync(&sp->timer);
1437 /* Reset the Tx and Rx units. */
1438 outl(PortReset, ioaddr + SCBPort);
1439 /* We may get spurious interrupts here. But I don't think that they
1440 may do much harm. 1999/12/09 SAW */
1442 /* Disable interrupts. */
1443 outw(SCBMaskAll, ioaddr + SCBCmd);
1444 synchronize_irq(dev->irq);
1445 speedo_tx_buffer_gc(dev);
1446 /* Free as much as possible.
1447 It helps to recover from a hang because of out-of-memory.
1448 It also simplifies speedo_resume() in case TX ring is full or
1449 close-to-be full. */
1450 speedo_purge_tx(dev);
1451 speedo_refill_rx_buffers(dev, 1);
1452 spin_lock_irqsave(&sp->lock, flags);
1455 dev->trans_start = jiffies;
1456 spin_unlock_irqrestore(&sp->lock, flags);
1457 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1458 /* Reset MII transceiver. Do it before starting the timer to serialize
1459 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1461 sp->timer.expires = RUN_AT(2*HZ);
1462 add_timer(&sp->timer);
1468 speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1470 struct speedo_private *sp = netdev_priv(dev);
1471 long ioaddr = dev->base_addr;
1474 /* Prevent interrupts from changing the Tx ring from underneath us. */
1475 unsigned long flags;
1477 spin_lock_irqsave(&sp->lock, flags);
1479 /* Check if there are enough space. */
1480 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1481 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1482 netif_stop_queue(dev);
1484 spin_unlock_irqrestore(&sp->lock, flags);
1488 /* Calculate the Tx descriptor entry. */
1489 entry = sp->cur_tx++ % TX_RING_SIZE;
1491 sp->tx_skbuff[entry] = skb;
1492 sp->tx_ring[entry].status =
1493 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1494 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1495 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1496 sp->tx_ring[entry].link =
1497 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1498 sp->tx_ring[entry].tx_desc_addr =
1499 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1500 /* The data region is always in one buffer descriptor. */
1501 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1502 sp->tx_ring[entry].tx_buf_addr0 =
1503 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1504 skb->len, PCI_DMA_TODEVICE));
1505 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1507 /* workaround for hardware bug on 10 mbit half duplex */
1509 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1510 wait_for_cmd_done(dev);
1511 outb(0 , ioaddr + SCBCmd);
1515 /* Trigger the command unit resume. */
1516 wait_for_cmd_done(dev);
1517 clear_suspend(sp->last_cmd);
1518 /* We want the time window between clearing suspend flag on the previous
1519 command and resuming CU to be as small as possible.
1520 Interrupts in between are very undesired. --SAW */
1521 outb(CUResume, ioaddr + SCBCmd);
1522 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1524 /* Leave room for set_rx_mode(). If there is no more space than reserved
1525 for multicast filter mark the ring as full. */
1526 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1527 netif_stop_queue(dev);
1531 spin_unlock_irqrestore(&sp->lock, flags);
1533 dev->trans_start = jiffies;
1538 static void speedo_tx_buffer_gc(struct net_device *dev)
1540 unsigned int dirty_tx;
1541 struct speedo_private *sp = netdev_priv(dev);
1543 dirty_tx = sp->dirty_tx;
1544 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1545 int entry = dirty_tx % TX_RING_SIZE;
1546 int status = le32_to_cpu(sp->tx_ring[entry].status);
1548 if (netif_msg_tx_done(sp))
1549 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1551 if ((status & StatusComplete) == 0)
1552 break; /* It still hasn't been processed. */
1553 if (status & TxUnderrun)
1554 if (sp->tx_threshold < 0x01e08000) {
1555 if (netif_msg_tx_err(sp))
1556 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1558 sp->tx_threshold += 0x00040000;
1560 /* Free the original skb. */
1561 if (sp->tx_skbuff[entry]) {
1562 sp->stats.tx_packets++; /* Count only user packets. */
1563 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1564 pci_unmap_single(sp->pdev,
1565 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1566 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1567 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1568 sp->tx_skbuff[entry] = 0;
1573 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1574 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1576 dirty_tx, sp->cur_tx, sp->tx_full);
1577 dirty_tx += TX_RING_SIZE;
1580 while (sp->mc_setup_head != NULL
1581 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1582 struct speedo_mc_block *t;
1583 if (netif_msg_tx_err(sp))
1584 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1585 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1586 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1587 t = sp->mc_setup_head->next;
1588 kfree(sp->mc_setup_head);
1589 sp->mc_setup_head = t;
1591 if (sp->mc_setup_head == NULL)
1592 sp->mc_setup_tail = NULL;
1594 sp->dirty_tx = dirty_tx;
1597 /* The interrupt handler does all of the Rx thread work and cleans up
1598 after the Tx thread. */
1599 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1601 struct net_device *dev = (struct net_device *)dev_instance;
1602 struct speedo_private *sp;
1603 long ioaddr, boguscnt = max_interrupt_work;
1604 unsigned short status;
1605 unsigned int handled = 0;
1607 ioaddr = dev->base_addr;
1608 sp = netdev_priv(dev);
1610 #ifndef final_version
1611 /* A lock to prevent simultaneous entry on SMP machines. */
1612 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1613 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1615 sp->in_interrupt = 0; /* Avoid halting machine. */
1621 status = inw(ioaddr + SCBStatus);
1622 /* Acknowledge all of the current interrupt sources ASAP. */
1623 /* Will change from 0xfc00 to 0xff00 when we start handling
1624 FCP and ER interrupts --Dragan */
1625 outw(status & 0xfc00, ioaddr + SCBStatus);
1627 if (netif_msg_intr(sp))
1628 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1631 if ((status & 0xfc00) == 0)
1636 if ((status & 0x5000) || /* Packet received, or Rx error. */
1637 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1638 /* Need to gather the postponed packet. */
1641 /* Always check if all rx buffers are allocated. --SAW */
1642 speedo_refill_rx_buffers(dev, 0);
1644 spin_lock(&sp->lock);
1646 * The chip may have suspended reception for various reasons.
1647 * Check for that, and re-prime it should this be the case.
1649 switch ((status >> 2) & 0xf) {
1652 case 1: /* Suspended */
1653 case 2: /* No resources (RxFDs) */
1654 case 9: /* Suspended with no more RBDs */
1655 case 10: /* No resources due to no RBDs */
1656 case 12: /* Ready with no RBDs */
1657 speedo_rx_soft_reset(dev);
1659 case 3: case 5: case 6: case 7: case 8:
1660 case 11: case 13: case 14: case 15:
1661 /* these are all reserved values */
1666 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1667 if (status & 0xA400) {
1668 speedo_tx_buffer_gc(dev);
1670 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1671 /* The ring is no longer full. */
1673 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1677 spin_unlock(&sp->lock);
1679 if (--boguscnt < 0) {
1680 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1682 /* Clear all interrupt sources. */
1683 /* Will change from 0xfc00 to 0xff00 when we start handling
1684 FCP and ER interrupts --Dragan */
1685 outw(0xfc00, ioaddr + SCBStatus);
1690 if (netif_msg_intr(sp))
1691 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1692 dev->name, inw(ioaddr + SCBStatus));
1694 clear_bit(0, (void*)&sp->in_interrupt);
1695 return IRQ_RETVAL(handled);
1698 static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1700 struct speedo_private *sp = netdev_priv(dev);
1702 struct sk_buff *skb;
1703 /* Get a fresh skbuff to replace the consumed one. */
1704 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1705 /* XXX: do we really want to call this before the NULL check? --hch */
1706 rx_align(skb); /* Align IP on 16 byte boundary */
1707 sp->rx_skbuff[entry] = skb;
1709 sp->rx_ringp[entry] = NULL;
1712 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
1713 sp->rx_ring_dma[entry] =
1714 pci_map_single(sp->pdev, rxf,
1715 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1717 skb_reserve(skb, sizeof(struct RxFD));
1718 rxf->rx_buf_addr = 0xffffffff;
1719 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1720 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1724 static inline void speedo_rx_link(struct net_device *dev, int entry,
1725 struct RxFD *rxf, dma_addr_t rxf_dma)
1727 struct speedo_private *sp = netdev_priv(dev);
1728 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1729 rxf->link = 0; /* None yet. */
1730 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1731 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1732 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1733 pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1734 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1736 sp->last_rxf_dma = rxf_dma;
1739 static int speedo_refill_rx_buf(struct net_device *dev, int force)
1741 struct speedo_private *sp = netdev_priv(dev);
1745 entry = sp->dirty_rx % RX_RING_SIZE;
1746 if (sp->rx_skbuff[entry] == NULL) {
1747 rxf = speedo_rx_alloc(dev, entry);
1751 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1752 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1754 sp->rx_ring_state |= RrOOMReported;
1756 speedo_show_state(dev);
1758 return -1; /* Better luck next time! */
1759 /* Borrow an skb from one of next entries. */
1760 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1761 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1763 if (forw == sp->cur_rx)
1765 forw_entry = forw % RX_RING_SIZE;
1766 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1767 sp->rx_skbuff[forw_entry] = NULL;
1768 rxf = sp->rx_ringp[forw_entry];
1769 sp->rx_ringp[forw_entry] = NULL;
1770 sp->rx_ringp[entry] = rxf;
1773 rxf = sp->rx_ringp[entry];
1775 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1777 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1781 static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1783 struct speedo_private *sp = netdev_priv(dev);
1785 /* Refill the RX ring. */
1786 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1787 speedo_refill_rx_buf(dev, force) != -1);
1791 speedo_rx(struct net_device *dev)
1793 struct speedo_private *sp = netdev_priv(dev);
1794 int entry = sp->cur_rx % RX_RING_SIZE;
1795 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1799 if (netif_msg_intr(sp))
1800 printk(KERN_DEBUG " In speedo_rx().\n");
1801 /* If we own the next entry, it's a new packet. Send it up. */
1802 while (sp->rx_ringp[entry] != NULL) {
1806 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1807 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1808 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1809 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1811 if (!(status & RxComplete))
1814 if (--rx_work_limit < 0)
1817 /* Check for a rare out-of-memory case: the current buffer is
1818 the last buffer allocated in the RX ring. --SAW */
1819 if (sp->last_rxf == sp->rx_ringp[entry]) {
1820 /* Postpone the packet. It'll be reaped at an interrupt when this
1821 packet is no longer the last packet in the ring. */
1822 if (netif_msg_rx_err(sp))
1823 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1825 sp->rx_ring_state |= RrPostponed;
1829 if (netif_msg_rx_status(sp))
1830 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1832 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1833 if (status & RxErrTooBig)
1834 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1835 "status %8.8x!\n", dev->name, status);
1836 else if (! (status & RxOK)) {
1837 /* There was a fatal error. This *should* be impossible. */
1838 sp->stats.rx_errors++;
1839 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1844 struct sk_buff *skb;
1846 /* Check if the packet is long enough to just accept without
1847 copying to a properly sized skbuff. */
1848 if (pkt_len < rx_copybreak
1849 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1851 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1852 /* 'skb_put()' points to the start of sk_buff data area. */
1853 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1854 sizeof(struct RxFD) + pkt_len,
1855 PCI_DMA_FROMDEVICE);
1857 #if 1 || USE_IP_CSUM
1858 /* Packet is in one chunk -- we can copy + cksum. */
1859 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
1860 skb_put(skb, pkt_len);
1862 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
1865 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1866 sizeof(struct RxFD) + pkt_len,
1867 PCI_DMA_FROMDEVICE);
1870 /* Pass up the already-filled skbuff. */
1871 skb = sp->rx_skbuff[entry];
1873 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1877 sp->rx_skbuff[entry] = NULL;
1878 skb_put(skb, pkt_len);
1880 sp->rx_ringp[entry] = NULL;
1881 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1882 PKT_BUF_SZ + sizeof(struct RxFD),
1883 PCI_DMA_FROMDEVICE);
1885 skb->protocol = eth_type_trans(skb, dev);
1887 dev->last_rx = jiffies;
1888 sp->stats.rx_packets++;
1889 sp->stats.rx_bytes += pkt_len;
1891 entry = (++sp->cur_rx) % RX_RING_SIZE;
1892 sp->rx_ring_state &= ~RrPostponed;
1893 /* Refill the recently taken buffers.
1894 Do it one-by-one to handle traffic bursts better. */
1895 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1899 /* Try hard to refill the recently taken buffers. */
1900 speedo_refill_rx_buffers(dev, 1);
1903 sp->last_rx_time = jiffies;
1909 speedo_close(struct net_device *dev)
1911 long ioaddr = dev->base_addr;
1912 struct speedo_private *sp = netdev_priv(dev);
1915 netdevice_stop(dev);
1916 netif_stop_queue(dev);
1918 if (netif_msg_ifdown(sp))
1919 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1920 dev->name, inw(ioaddr + SCBStatus));
1922 /* Shut off the media monitoring timer. */
1923 del_timer_sync(&sp->timer);
1925 outw(SCBMaskAll, ioaddr + SCBCmd);
1927 /* Shutting down the chip nicely fails to disable flow control. So.. */
1928 outl(PortPartialReset, ioaddr + SCBPort);
1929 inl(ioaddr + SCBPort); /* flush posted write */
1931 * The chip requires a 10 microsecond quiet period. Wait here!
1935 free_irq(dev->irq, dev);
1936 speedo_show_state(dev);
1938 /* Free all the skbuffs in the Rx and Tx queues. */
1939 for (i = 0; i < RX_RING_SIZE; i++) {
1940 struct sk_buff *skb = sp->rx_skbuff[i];
1941 sp->rx_skbuff[i] = 0;
1942 /* Clear the Rx descriptors. */
1944 pci_unmap_single(sp->pdev,
1946 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1951 for (i = 0; i < TX_RING_SIZE; i++) {
1952 struct sk_buff *skb = sp->tx_skbuff[i];
1953 sp->tx_skbuff[i] = 0;
1954 /* Clear the Tx descriptors. */
1956 pci_unmap_single(sp->pdev,
1957 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1958 skb->len, PCI_DMA_TODEVICE);
1963 /* Free multicast setting blocks. */
1964 for (i = 0; sp->mc_setup_head != NULL; i++) {
1965 struct speedo_mc_block *t;
1966 t = sp->mc_setup_head->next;
1967 kfree(sp->mc_setup_head);
1968 sp->mc_setup_head = t;
1970 sp->mc_setup_tail = NULL;
1971 if (netif_msg_ifdown(sp))
1972 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1974 pci_set_power_state(sp->pdev, 2);
1979 /* The Speedo-3 has an especially awkward and unusable method of getting
1980 statistics out of the chip. It takes an unpredictable length of time
1981 for the dump-stats command to complete. To avoid a busy-wait loop we
1982 update the stats with the previous dump results, and then trigger a
1985 Oh, and incoming frames are dropped while executing dump-stats!
1987 static struct net_device_stats *
1988 speedo_get_stats(struct net_device *dev)
1990 struct speedo_private *sp = netdev_priv(dev);
1991 long ioaddr = dev->base_addr;
1993 /* Update only if the previous dump finished. */
1994 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1995 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1996 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1997 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1998 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1999 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
2000 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
2001 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
2002 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
2003 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
2004 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
2005 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
2006 sp->lstats->done_marker = 0x0000;
2007 if (netif_running(dev)) {
2008 unsigned long flags;
2009 /* Take a spinlock to make wait_for_cmd_done and sending the
2010 command atomic. --SAW */
2011 spin_lock_irqsave(&sp->lock, flags);
2012 wait_for_cmd_done(dev);
2013 outb(CUDumpStats, ioaddr + SCBCmd);
2014 spin_unlock_irqrestore(&sp->lock, flags);
2020 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
2023 struct speedo_private *sp = netdev_priv(dev);
2025 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
2029 /* get driver-specific version/etc. info */
2030 case ETHTOOL_GDRVINFO: {
2031 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
2032 strncpy(info.driver, "eepro100", sizeof(info.driver)-1);
2033 strncpy(info.version, version, sizeof(info.version)-1);
2035 strcpy(info.bus_info, pci_name(sp->pdev));
2036 if (copy_to_user(useraddr, &info, sizeof(info)))
2042 case ETHTOOL_GSET: {
2043 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
2044 spin_lock_irq(&sp->lock);
2045 mii_ethtool_gset(&sp->mii_if, &ecmd);
2046 spin_unlock_irq(&sp->lock);
2047 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
2052 case ETHTOOL_SSET: {
2054 struct ethtool_cmd ecmd;
2055 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
2057 spin_lock_irq(&sp->lock);
2058 r = mii_ethtool_sset(&sp->mii_if, &ecmd);
2059 spin_unlock_irq(&sp->lock);
2062 /* restart autonegotiation */
2063 case ETHTOOL_NWAY_RST: {
2064 return mii_nway_restart(&sp->mii_if);
2066 /* get link status */
2067 case ETHTOOL_GLINK: {
2068 struct ethtool_value edata = {ETHTOOL_GLINK};
2069 edata.data = mii_link_ok(&sp->mii_if);
2070 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2074 /* get message-level */
2075 case ETHTOOL_GMSGLVL: {
2076 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
2077 edata.data = sp->msg_enable;
2078 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2082 /* set message-level */
2083 case ETHTOOL_SMSGLVL: {
2084 struct ethtool_value edata;
2085 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2087 sp->msg_enable = edata.data;
2096 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2098 struct speedo_private *sp = netdev_priv(dev);
2099 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
2100 int phy = sp->phy[0] & 0x1f;
2105 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2108 case SIOCGMIIREG: /* Read MII PHY register. */
2109 /* FIXME: these operations need to be serialized with MDIO
2110 access from the timeout handler.
2111 They are currently serialized only with MDIO access from the
2112 timer routine. 2000/05/09 SAW */
2113 saved_acpi = pci_set_power_state(sp->pdev, 0);
2114 t = del_timer_sync(&sp->timer);
2115 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2117 add_timer(&sp->timer); /* may be set to the past --SAW */
2118 pci_set_power_state(sp->pdev, saved_acpi);
2121 case SIOCSMIIREG: /* Write MII PHY register. */
2122 if (!capable(CAP_NET_ADMIN))
2124 saved_acpi = pci_set_power_state(sp->pdev, 0);
2125 t = del_timer_sync(&sp->timer);
2126 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2128 add_timer(&sp->timer); /* may be set to the past --SAW */
2129 pci_set_power_state(sp->pdev, saved_acpi);
2132 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
2138 /* Set or clear the multicast filter for this adaptor.
2139 This is very ugly with Intel chips -- we usually have to execute an
2140 entire configuration command, plus process a multicast command.
2141 This is complicated. We must put a large configuration command and
2142 an arbitrarily-sized multicast command in the transmit list.
2143 To minimize the disruption -- the previous command might have already
2144 loaded the link -- we convert the current command block, normally a Tx
2145 command, into a no-op and link it to the new command.
2147 static void set_rx_mode(struct net_device *dev)
2149 struct speedo_private *sp = netdev_priv(dev);
2150 long ioaddr = dev->base_addr;
2151 struct descriptor *last_cmd;
2153 unsigned long flags;
2156 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2158 } else if ((dev->flags & IFF_ALLMULTI) ||
2159 dev->mc_count > multicast_filter_limit) {
2164 if (netif_msg_rx_status(sp))
2165 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2166 sp->rx_mode, new_rx_mode);
2168 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2169 /* The Tx ring is full -- don't add anything! Hope the mode will be
2170 * set again later. */
2175 if (new_rx_mode != sp->rx_mode) {
2176 u8 *config_cmd_data;
2178 spin_lock_irqsave(&sp->lock, flags);
2179 entry = sp->cur_tx++ % TX_RING_SIZE;
2180 last_cmd = sp->last_cmd;
2181 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2183 sp->tx_skbuff[entry] = 0; /* Redundant. */
2184 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2185 sp->tx_ring[entry].link =
2186 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2187 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2188 /* Construct a full CmdConfig frame. */
2189 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2190 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2191 config_cmd_data[4] = rxdmacount;
2192 config_cmd_data[5] = txdmacount + 0x80;
2193 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2194 /* 0x80 doesn't disable FC 0x84 does.
2195 Disable Flow control since we are not ACK-ing any FC interrupts
2196 for now. --Dragan */
2197 config_cmd_data[19] = 0x84;
2198 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2199 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2200 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2201 config_cmd_data[15] |= 0x80;
2202 config_cmd_data[8] = 0;
2204 /* Trigger the command unit resume. */
2205 wait_for_cmd_done(dev);
2206 clear_suspend(last_cmd);
2207 outb(CUResume, ioaddr + SCBCmd);
2208 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2209 netif_stop_queue(dev);
2212 spin_unlock_irqrestore(&sp->lock, flags);
2215 if (new_rx_mode == 0 && dev->mc_count < 4) {
2216 /* The simple case of 0-3 multicast list entries occurs often, and
2217 fits within one tx_ring[] entry. */
2218 struct dev_mc_list *mclist;
2219 u16 *setup_params, *eaddrs;
2221 spin_lock_irqsave(&sp->lock, flags);
2222 entry = sp->cur_tx++ % TX_RING_SIZE;
2223 last_cmd = sp->last_cmd;
2224 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2226 sp->tx_skbuff[entry] = 0;
2227 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2228 sp->tx_ring[entry].link =
2229 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2230 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2231 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2232 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2233 /* Fill in the multicast addresses. */
2234 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2235 i++, mclist = mclist->next) {
2236 eaddrs = (u16 *)mclist->dmi_addr;
2237 *setup_params++ = *eaddrs++;
2238 *setup_params++ = *eaddrs++;
2239 *setup_params++ = *eaddrs++;
2242 wait_for_cmd_done(dev);
2243 clear_suspend(last_cmd);
2244 /* Immediately trigger the command unit resume. */
2245 outb(CUResume, ioaddr + SCBCmd);
2247 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2248 netif_stop_queue(dev);
2251 spin_unlock_irqrestore(&sp->lock, flags);
2252 } else if (new_rx_mode == 0) {
2253 struct dev_mc_list *mclist;
2254 u16 *setup_params, *eaddrs;
2255 struct speedo_mc_block *mc_blk;
2256 struct descriptor *mc_setup_frm;
2259 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2261 if (mc_blk == NULL) {
2262 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2264 sp->rx_mode = -1; /* We failed, try again. */
2267 mc_blk->next = NULL;
2268 mc_blk->len = 2 + multicast_filter_limit*6;
2270 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2272 mc_setup_frm = &mc_blk->frame;
2274 /* Fill the setup frame. */
2275 if (netif_msg_ifup(sp))
2276 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2277 dev->name, mc_setup_frm);
2278 mc_setup_frm->cmd_status =
2279 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2280 /* Link set below. */
2281 setup_params = (u16 *)&mc_setup_frm->params;
2282 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2283 /* Fill in the multicast addresses. */
2284 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2285 i++, mclist = mclist->next) {
2286 eaddrs = (u16 *)mclist->dmi_addr;
2287 *setup_params++ = *eaddrs++;
2288 *setup_params++ = *eaddrs++;
2289 *setup_params++ = *eaddrs++;
2292 /* Disable interrupts while playing with the Tx Cmd list. */
2293 spin_lock_irqsave(&sp->lock, flags);
2295 if (sp->mc_setup_tail)
2296 sp->mc_setup_tail->next = mc_blk;
2298 sp->mc_setup_head = mc_blk;
2299 sp->mc_setup_tail = mc_blk;
2300 mc_blk->tx = sp->cur_tx;
2302 entry = sp->cur_tx++ % TX_RING_SIZE;
2303 last_cmd = sp->last_cmd;
2304 sp->last_cmd = mc_setup_frm;
2306 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2307 sp->tx_skbuff[entry] = 0;
2308 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2309 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2311 /* Set the link in the setup frame. */
2312 mc_setup_frm->link =
2313 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2315 pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2316 mc_blk->len, PCI_DMA_TODEVICE);
2318 wait_for_cmd_done(dev);
2319 clear_suspend(last_cmd);
2320 /* Immediately trigger the command unit resume. */
2321 outb(CUResume, ioaddr + SCBCmd);
2323 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2324 netif_stop_queue(dev);
2327 spin_unlock_irqrestore(&sp->lock, flags);
2329 if (netif_msg_rx_status(sp))
2330 printk(" CmdMCSetup frame length %d in entry %d.\n",
2331 dev->mc_count, entry);
2334 sp->rx_mode = new_rx_mode;
2338 static int eepro100_suspend(struct pci_dev *pdev, u32 state)
2340 struct net_device *dev = pci_get_drvdata (pdev);
2341 struct speedo_private *sp = netdev_priv(dev);
2342 long ioaddr = dev->base_addr;
2344 pci_save_state(pdev, sp->pm_state);
2346 if (!netif_running(dev))
2349 del_timer_sync(&sp->timer);
2351 netif_device_detach(dev);
2352 outl(PortPartialReset, ioaddr + SCBPort);
2354 /* XXX call pci_set_power_state ()? */
2358 static int eepro100_resume(struct pci_dev *pdev)
2360 struct net_device *dev = pci_get_drvdata (pdev);
2361 struct speedo_private *sp = netdev_priv(dev);
2362 long ioaddr = dev->base_addr;
2364 pci_restore_state(pdev, sp->pm_state);
2366 if (!netif_running(dev))
2369 /* I'm absolutely uncertain if this part of code may work.
2371 - correct hardware reinitialization;
2372 - correct driver behavior between different steps of the
2374 - serialization with other driver calls.
2376 outw(SCBMaskAll, ioaddr + SCBCmd);
2378 netif_device_attach(dev);
2380 sp->flow_ctrl = sp->partner = 0;
2382 sp->timer.expires = RUN_AT(2*HZ);
2383 add_timer(&sp->timer);
2386 #endif /* CONFIG_PM */
2388 static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2390 struct net_device *dev = pci_get_drvdata (pdev);
2391 struct speedo_private *sp = netdev_priv(dev);
2393 unregister_netdev(dev);
2395 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2396 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2399 iounmap((char *)dev->base_addr);
2402 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2403 + sizeof(struct speedo_stats),
2404 sp->tx_ring, sp->tx_ring_dma);
2405 pci_disable_device(pdev);
2409 static struct pci_device_id eepro100_pci_tbl[] = {
2410 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
2411 PCI_ANY_ID, PCI_ANY_ID, },
2412 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
2413 PCI_ANY_ID, PCI_ANY_ID, },
2414 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_7,
2415 PCI_ANY_ID, PCI_ANY_ID, },
2416 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2417 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2418 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2419 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2420 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2421 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2422 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2423 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2424 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2425 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2426 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2427 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2428 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2429 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2430 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2431 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2432 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2433 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2434 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2435 { PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, },
2436 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2437 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2438 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2439 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2440 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2443 MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2445 static struct pci_driver eepro100_driver = {
2447 .id_table = eepro100_pci_tbl,
2448 .probe = eepro100_init_one,
2449 .remove = __devexit_p(eepro100_remove_one),
2451 .suspend = eepro100_suspend,
2452 .resume = eepro100_resume,
2453 #endif /* CONFIG_PM */
2456 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,48)
2457 static int pci_module_init(struct pci_driver *pdev)
2461 rc = pci_register_driver(pdev);
2463 printk(KERN_INFO "%s: No cards found, driver not installed.\n",
2465 pci_unregister_driver(pdev);
2472 static int __init eepro100_init_module(void)
2477 return pci_module_init(&eepro100_driver);
2480 static void __exit eepro100_cleanup_module(void)
2482 pci_unregister_driver(&eepro100_driver);
2485 module_init(eepro100_init_module);
2486 module_exit(eepro100_cleanup_module);
2490 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"