1 /* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
3 Written 1996-1999 by Donald Becker.
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
30 static const char *version =
31 "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
32 "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
34 /* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
37 static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38 static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39 static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40 /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41 static int txdmacount = 128;
42 static int rxdmacount /* = 0 */;
44 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47 # define rx_align(skb) skb_reserve((skb), 2)
48 # define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
50 # define rx_align(skb)
51 # define RxFD_ALIGNMENT
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56 static int rx_copybreak = 200;
58 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59 static int max_interrupt_work = 20;
61 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62 static int multicast_filter_limit = 64;
64 /* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66 static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67 static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
69 /* A few values that may be tweaked. */
70 /* The ring sizes should be a power of two for efficiency. */
71 #define TX_RING_SIZE 64
72 #define RX_RING_SIZE 64
73 /* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75 #define TX_MULTICAST_SIZE 2
76 #define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77 /* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79 #define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80 /* Hysteresis marking queue as no longer full. */
81 #define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
83 /* Operational parameters that usually are not changed. */
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT (2*HZ)
87 /* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88 #define PKT_BUF_SZ 1536
90 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
91 #warning You must compile this file with the correct options!
92 #warning See the last lines of the source file.
93 #error You must compile this driver with "-O".
96 #include <linux/config.h>
97 #include <linux/version.h>
98 #include <linux/module.h>
100 #include <linux/kernel.h>
101 #include <linux/string.h>
102 #include <linux/errno.h>
103 #include <linux/ioport.h>
104 #include <linux/slab.h>
105 #include <linux/interrupt.h>
106 #include <linux/timer.h>
107 #include <linux/pci.h>
108 #include <linux/spinlock.h>
109 #include <linux/init.h>
110 #include <linux/mii.h>
111 #include <linux/delay.h>
113 #include <asm/bitops.h>
115 #include <asm/uaccess.h>
118 #include <linux/netdevice.h>
119 #include <linux/etherdevice.h>
120 #include <linux/rtnetlink.h>
121 #include <linux/skbuff.h>
122 #include <linux/ethtool.h>
123 #include <linux/mii.h>
125 /* enable PIO instead of MMIO, if CONFIG_EEPRO100_PIO is selected */
126 #ifdef CONFIG_EEPRO100_PIO
130 static int debug = -1;
131 #define DEBUG_DEFAULT (NETIF_MSG_DRV | \
135 #define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
138 MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
139 MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
140 MODULE_LICENSE("GPL");
141 MODULE_PARM(debug, "i");
142 MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
143 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
144 MODULE_PARM(congenb, "i");
145 MODULE_PARM(txfifo, "i");
146 MODULE_PARM(rxfifo, "i");
147 MODULE_PARM(txdmacount, "i");
148 MODULE_PARM(rxdmacount, "i");
149 MODULE_PARM(rx_copybreak, "i");
150 MODULE_PARM(max_interrupt_work, "i");
151 MODULE_PARM(multicast_filter_limit, "i");
152 MODULE_PARM_DESC(debug, "debug level (0-6)");
153 MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
154 MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
155 MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
156 MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
157 MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
158 MODULE_PARM_DESC(txdmaccount, "Tx DMA burst length; 128 - disable (0-128)");
159 MODULE_PARM_DESC(rxdmaccount, "Rx DMA burst length; 128 - disable (0-128)");
160 MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
161 MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
162 MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
164 #define RUN_AT(x) (jiffies + (x))
166 /* ACPI power states don't universally work (yet) */
168 #undef pci_set_power_state
169 #define pci_set_power_state null_set_power_state
170 static inline int null_set_power_state(struct pci_dev *dev, int state)
174 #endif /* CONFIG_PM */
176 #define netdevice_start(dev)
177 #define netdevice_stop(dev)
178 #define netif_set_tx_timeout(dev, tf, tm) \
180 (dev)->tx_timeout = (tf); \
181 (dev)->watchdog_timeo = (tm); \
189 I. Board Compatibility
191 This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
192 single-chip fast Ethernet controller for PCI, as used on the Intel
193 EtherExpress Pro 100 adapter.
195 II. Board-specific settings
197 PCI bus devices are configured by the system at boot time, so no jumpers
198 need to be set on the board. The system BIOS should be set to assign the
199 PCI INTA signal to an otherwise unused system IRQ line. While it's
200 possible to share PCI interrupt lines, it negatively impacts performance and
201 only recent kernels support it.
203 III. Driver operation
206 The Speedo3 is very similar to other Intel network chips, that is to say
207 "apparently designed on a different planet". This chips retains the complex
208 Rx and Tx descriptors and multiple buffers pointers as previous chips, but
209 also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
210 Tx mode, but in a simplified lower-overhead manner: it associates only a
211 single buffer descriptor with each frame descriptor.
213 Despite the extra space overhead in each receive skbuff, the driver must use
214 the simplified Rx buffer mode to assure that only a single data buffer is
215 associated with each RxFD. The driver implements this by reserving space
216 for the Rx descriptor at the head of each Rx skbuff.
218 The Speedo-3 has receive and command unit base addresses that are added to
219 almost all descriptor pointers. The driver sets these to zero, so that all
220 pointer fields are absolute addresses.
222 The System Control Block (SCB) of some previous Intel chips exists on the
223 chip in both PCI I/O and memory space. This driver uses the I/O space
224 registers, but might switch to memory mapped mode to better support non-x86
227 IIIB. Transmit structure
229 The driver must use the complex Tx command+descriptor mode in order to
230 have a indirect pointer to the skbuff data section. Each Tx command block
231 (TxCB) is associated with two immediately appended Tx Buffer Descriptor
232 (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
233 speedo_private data structure for each adapter instance.
235 The newer i82558 explicitly supports this structure, and can read the two
236 TxBDs in the same PCI burst as the TxCB.
238 This ring structure is used for all normal transmit packets, but the
239 transmit packet descriptors aren't long enough for most non-Tx commands such
240 as CmdConfigure. This is complicated by the possibility that the chip has
241 already loaded the link address in the previous descriptor. So for these
242 commands we convert the next free descriptor on the ring to a NoOp, and point
243 that descriptor's link to the complex command.
245 An additional complexity of these non-transmit commands are that they may be
246 added asynchronous to the normal transmit queue, so we disable interrupts
247 whenever the Tx descriptor ring is manipulated.
249 A notable aspect of these special configure commands is that they do
250 work with the normal Tx ring entry scavenge method. The Tx ring scavenge
251 is done at interrupt time using the 'dirty_tx' index, and checking for the
252 command-complete bit. While the setup frames may have the NoOp command on the
253 Tx ring marked as complete, but not have completed the setup command, this
254 is not a problem. The tx_ring entry can be still safely reused, as the
255 tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
257 Commands may have bits set e.g. CmdSuspend in the command word to either
258 suspend or stop the transmit/command unit. This driver always flags the last
259 command with CmdSuspend, erases the CmdSuspend in the previous command, and
260 then issues a CU_RESUME.
261 Note: Watch out for the potential race condition here: imagine
262 erasing the previous suspend
263 the chip processes the previous command
264 the chip processes the final command, and suspends
266 the chip processes the next-yet-valid post-final-command.
267 So blindly sending a CU_RESUME is only safe if we do it immediately after
268 after erasing the previous CmdSuspend, without the possibility of an
269 intervening delay. Thus the resume command is always within the
270 interrupts-disabled region. This is a timing dependence, but handling this
271 condition in a timing-independent way would considerably complicate the code.
273 Note: In previous generation Intel chips, restarting the command unit was a
274 notoriously slow process. This is presumably no longer true.
276 IIIC. Receive structure
278 Because of the bus-master support on the Speedo3 this driver uses the new
279 SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
280 This scheme allocates full-sized skbuffs as receive buffers. The value
281 SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
282 trade-off the memory wasted by passing the full-sized skbuff to the queue
283 layer for all frames vs. the copying cost of copying a frame to a
284 correctly-sized skbuff.
286 For small frames the copying cost is negligible (esp. considering that we
287 are pre-loading the cache with immediately useful header information), so we
288 allocate a new, minimally-sized skbuff. For large frames the copying cost
289 is non-trivial, and the larger copy might flush the cache of useful data, so
290 we pass up the skbuff the packet was received into.
294 Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
295 that stated that I could disclose the information. But I still resent
296 having to sign an Intel NDA when I'm helping Intel sell their own product!
300 static int speedo_found1(struct pci_dev *pdev, long ioaddr, int fnd_cnt, int acpi_idle_state);
303 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
304 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
307 static inline unsigned int io_inw(unsigned long port)
311 static inline void io_outw(unsigned int val, unsigned long port)
317 /* Currently alpha headers define in/out macros.
318 Undefine them. 2000/03/30 SAW */
333 /* Offsets to the various registers.
334 All accesses need not be longword aligned. */
335 enum speedo_offsets {
336 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
338 SCBPointer = 4, /* General purpose pointer. */
339 SCBPort = 8, /* Misc. commands and operands. */
340 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
341 SCBCtrlMDI = 16, /* MDI interface control. */
342 SCBEarlyRx = 20, /* Early receive byte count. */
344 /* Commands that can be put in a command list entry. */
346 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
347 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
348 CmdDump = 0x60000, CmdDiagnose = 0x70000,
349 CmdSuspend = 0x40000000, /* Suspend after completion. */
350 CmdIntr = 0x20000000, /* Interrupt after completion. */
351 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
353 /* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
354 status bits. Previous driver versions used separate 16 bit fields for
355 commands and statuses. --SAW
357 #if defined(__alpha__)
358 # define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
360 # if defined(__LITTLE_ENDIAN)
361 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
362 # elif defined(__BIG_ENDIAN)
363 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
365 # error Unsupported byteorder
370 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
371 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
372 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
373 /* The rest are Rx and Tx commands. */
374 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
375 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
376 CUDumpStats=0x0070, /* Dump then reset stats counters. */
377 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
378 RxResumeNoResources=0x0007,
382 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
385 /* The Speedo3 Rx and Tx frame/buffer descriptors. */
386 struct descriptor { /* A generic descriptor. */
387 volatile s32 cmd_status; /* All command and status fields. */
388 u32 link; /* struct descriptor * */
389 unsigned char params[0];
392 /* The Speedo3 Rx and Tx buffer descriptors. */
393 struct RxFD { /* Receive frame descriptor. */
395 u32 link; /* struct RxFD * */
396 u32 rx_buf_addr; /* void * */
400 /* Selected elements of the Tx/RxFD.status word. */
402 RxComplete=0x8000, RxOK=0x2000,
403 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
404 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
405 TxUnderrun=0x1000, StatusComplete=0x8000,
408 #define CONFIG_DATA_SIZE 22
409 struct TxFD { /* Transmit frame descriptor set. */
411 u32 link; /* void * */
412 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
413 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
414 /* This constitutes two "TBD" entries -- we only use one. */
415 #define TX_DESCR_BUF_OFFSET 16
416 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
417 s32 tx_buf_size0; /* Length of Tx frame. */
418 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
419 s32 tx_buf_size1; /* Length of Tx frame. */
420 /* the structure must have space for at least CONFIG_DATA_SIZE starting
421 * from tx_desc_addr field */
424 /* Multicast filter setting block. --SAW */
425 struct speedo_mc_block {
426 struct speedo_mc_block *next;
428 dma_addr_t frame_dma;
430 struct descriptor frame __attribute__ ((__aligned__(16)));
433 /* Elements of the dump_statistics block. This block must be lword aligned. */
434 struct speedo_stats {
447 u32 rx_resource_errs;
454 enum Rx_ring_state_bits {
455 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
458 /* Do not change the position (alignment) of the first few elements!
459 The later elements are grouped for cache locality.
461 Unfortunately, all the positions have been shifted since there.
462 A new re-alignment is required. 2000/03/06 SAW */
463 struct speedo_private {
464 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
465 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
466 /* The addresses of a Tx/Rx-in-place packets/buffers. */
467 struct sk_buff *tx_skbuff[TX_RING_SIZE];
468 struct sk_buff *rx_skbuff[RX_RING_SIZE];
469 /* Mapped addresses of the rings. */
470 dma_addr_t tx_ring_dma;
471 #define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
472 dma_addr_t rx_ring_dma[RX_RING_SIZE];
473 struct descriptor *last_cmd; /* Last command sent. */
474 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
475 spinlock_t lock; /* Group with Tx control cache line. */
476 u32 tx_threshold; /* The value for txdesc.count. */
477 struct RxFD *last_rxf; /* Last filled RX buffer. */
478 dma_addr_t last_rxf_dma;
479 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
480 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
481 struct net_device_stats stats;
482 struct speedo_stats *lstats;
483 dma_addr_t lstats_dma;
485 struct pci_dev *pdev;
486 struct timer_list timer; /* Media selection timer. */
487 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
488 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
489 long in_interrupt; /* Word-aligned dev->interrupt */
490 unsigned char acpi_pwr;
491 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
492 unsigned int tx_full:1; /* The Tx queue is full. */
493 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
494 unsigned int rx_bug:1; /* Work around receiver hang errata. */
495 unsigned char default_port:8; /* Last dev->if_port value. */
496 unsigned char rx_ring_state; /* RX ring status flags. */
497 unsigned short phy[2]; /* PHY media interfaces available. */
498 unsigned short partner; /* Link partner caps. */
499 struct mii_if_info mii_if; /* MII API hooks, info */
500 u32 msg_enable; /* debug message level */
506 /* The parameters for a CmdConfigure operation.
507 There are so many options that it would be difficult to document each bit.
508 We mostly use the default or recommended settings. */
509 static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
510 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
512 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
514 static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
515 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
516 0, 0x2E, 0, 0x60, 0x08, 0x88,
517 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
520 /* PHY media interface chips. */
521 static const char *phys[] = {
522 "None", "i82553-A/B", "i82553-C", "i82503",
523 "DP83840", "80c240", "80c24", "i82555",
524 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
525 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
526 enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
527 S80C24, I82555, DP83840A=10, };
528 static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
529 #define EE_READ_CMD (6)
531 static int eepro100_init_one(struct pci_dev *pdev,
532 const struct pci_device_id *ent);
534 static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
535 static int mdio_read(struct net_device *dev, int phy_id, int location);
536 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
537 static int speedo_open(struct net_device *dev);
538 static void speedo_resume(struct net_device *dev);
539 static void speedo_timer(unsigned long data);
540 static void speedo_init_rx_ring(struct net_device *dev);
541 static void speedo_tx_timeout(struct net_device *dev);
542 static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
543 static void speedo_refill_rx_buffers(struct net_device *dev, int force);
544 static int speedo_rx(struct net_device *dev);
545 static void speedo_tx_buffer_gc(struct net_device *dev);
546 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
547 static int speedo_close(struct net_device *dev);
548 static struct net_device_stats *speedo_get_stats(struct net_device *dev);
549 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
550 static void set_rx_mode(struct net_device *dev);
551 static void speedo_show_state(struct net_device *dev);
555 #ifdef honor_default_port
556 /* Optional driver feature to allow forcing the transceiver setting.
558 static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
559 0x2000, 0x2100, 0x0400, 0x3100};
562 /* How to wait for the command unit to accept a command.
563 Typically this takes 0 ticks. */
564 static inline unsigned char wait_for_cmd_done(struct net_device *dev)
567 long cmd_ioaddr = dev->base_addr + SCBCmd;
573 } while(r && --wait >= 0);
576 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
580 static int __devinit eepro100_init_one (struct pci_dev *pdev,
581 const struct pci_device_id *ent)
583 unsigned long ioaddr;
585 int acpi_idle_state = 0, pm;
586 static int cards_found /* = 0 */;
589 /* when built-in, we only print version if device is found */
590 static int did_version;
591 if (did_version++ == 0)
595 /* save power state before pci_enable_device overwrites it */
596 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
599 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
600 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
603 if (pci_enable_device(pdev))
604 goto err_out_free_mmio_region;
606 pci_set_master(pdev);
608 if (!request_region(pci_resource_start(pdev, 1),
609 pci_resource_len(pdev, 1), "eepro100")) {
610 printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
613 if (!request_mem_region(pci_resource_start(pdev, 0),
614 pci_resource_len(pdev, 0), "eepro100")) {
615 printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
616 goto err_out_free_pio_region;
621 ioaddr = pci_resource_start(pdev, 1);
622 if (DEBUG & NETIF_MSG_PROBE)
623 printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
626 ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
627 pci_resource_len(pdev, 0));
629 printk (KERN_ERR "eepro100: cannot remap MMIO region %lx @ %lx\n",
630 pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));
631 goto err_out_free_mmio_region;
633 if (DEBUG & NETIF_MSG_PROBE)
634 printk("Found Intel i82557 PCI Speedo, MMIO at %#lx, IRQ %d.\n",
635 pci_resource_start(pdev, 0), irq);
639 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
642 goto err_out_iounmap;
648 iounmap ((void *)ioaddr);
650 err_out_free_mmio_region:
651 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
652 err_out_free_pio_region:
653 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
658 #ifdef CONFIG_NET_POLL_CONTROLLER
660 * Polling 'interrupt' - used by things like netconsole to send skbs
661 * without having to re-enable interrupts. It's not called while
662 * the interrupt routine is executing.
665 static void poll_speedo (struct net_device *dev)
667 /* disable_irq is not very nice, but with the funny lockless design
668 we have no other choice. */
669 disable_irq(dev->irq);
670 speedo_interrupt (dev->irq, dev, NULL);
671 enable_irq(dev->irq);
675 static int __devinit speedo_found1(struct pci_dev *pdev,
676 long ioaddr, int card_idx, int acpi_idle_state)
678 struct net_device *dev;
679 struct speedo_private *sp;
685 dma_addr_t tx_ring_dma;
687 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
688 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
689 if (tx_ring_space == NULL)
692 dev = alloc_etherdev(sizeof(struct speedo_private));
694 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
695 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
699 SET_MODULE_OWNER(dev);
700 SET_NETDEV_DEV(dev, &pdev->dev);
702 if (dev->mem_start > 0)
703 option = dev->mem_start;
704 else if (card_idx >= 0 && options[card_idx] >= 0)
705 option = options[card_idx];
710 if (dev_alloc_name(dev, dev->name) < 0)
711 goto err_free_unlock;
713 /* Read the station address EEPROM before doing the reset.
714 Nominally his should even be done before accepting the device, but
715 then we wouldn't have a device name with which to report the error.
716 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
719 unsigned long iobase;
720 int read_cmd, ee_size;
724 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
726 iobase = pci_resource_start(pdev, 1);
727 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
730 read_cmd = EE_READ_CMD << 24;
733 read_cmd = EE_READ_CMD << 22;
736 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
737 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
741 dev->dev_addr[j++] = value;
742 dev->dev_addr[j++] = value >> 8;
746 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
747 "check settings before activating this device!\n",
749 /* Don't unregister_netdev(dev); as the EEPro may actually be
750 usable, especially if the MAC address is set later.
751 On the other hand, it may be unusable if MDI data is corrupted. */
754 /* Reset the chip: stop Tx and Rx processes and clear counters.
755 This takes less than 10usec and will easily finish before the next
757 outl(PortReset, ioaddr + SCBPort);
758 inl(ioaddr + SCBPort);
761 if (eeprom[3] & 0x0100)
762 product = "OEM i82557/i82558 10/100 Ethernet";
764 product = pci_name(pdev);
766 printk(KERN_INFO "%s: %s, ", dev->name, product);
768 for (i = 0; i < 5; i++)
769 printk("%2.2X:", dev->dev_addr[i]);
770 printk("%2.2X, ", dev->dev_addr[i]);
772 printk("I/O at %#3lx, ", ioaddr);
774 printk("IRQ %d.\n", pdev->irq);
776 /* we must initialize base_addr early, for mdio_{read,write} */
777 dev->base_addr = ioaddr;
779 #if 1 || defined(kernel_bloat)
780 /* OK, this is pure kernel bloat. I don't like it when other drivers
781 waste non-pageable kernel space to emit similar messages, but I need
782 them for bug reports. */
784 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
785 /* The self-test results must be paragraph aligned. */
786 volatile s32 *self_test_results;
787 int boguscnt = 16000; /* Timeout for set-test. */
788 if ((eeprom[3] & 0x03) != 0x03)
789 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
791 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
792 " connectors present:",
793 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
794 for (i = 0; i < 4; i++)
795 if (eeprom[5] & (1<<i))
796 printk(connectors[i]);
797 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
798 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
799 if (eeprom[7] & 0x0700)
800 printk(KERN_INFO " Secondary interface chip %s.\n",
801 phys[(eeprom[7]>>8)&7]);
802 if (((eeprom[6]>>8) & 0x3f) == DP83840
803 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
804 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
807 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
809 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
811 if ((option >= 0) && (option & 0x70)) {
812 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
813 (option & 0x20 ? 100 : 10),
814 (option & 0x10 ? "full" : "half"));
815 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
816 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
817 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
820 /* Perform a system self-test. */
821 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
822 self_test_results[0] = 0;
823 self_test_results[1] = -1;
824 outl(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
827 } while (self_test_results[1] == -1 && --boguscnt >= 0);
829 if (boguscnt < 0) { /* Test optimized out. */
830 printk(KERN_ERR "Self test failed, status %8.8x:\n"
831 KERN_ERR " Failure to initialize the i82557.\n"
832 KERN_ERR " Verify that the card is a bus-master"
834 self_test_results[1]);
836 printk(KERN_INFO " General self-test: %s.\n"
837 KERN_INFO " Serial sub-system self-test: %s.\n"
838 KERN_INFO " Internal registers self-test: %s.\n"
839 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
840 self_test_results[1] & 0x1000 ? "failed" : "passed",
841 self_test_results[1] & 0x0020 ? "failed" : "passed",
842 self_test_results[1] & 0x0008 ? "failed" : "passed",
843 self_test_results[1] & 0x0004 ? "failed" : "passed",
844 self_test_results[0]);
846 #endif /* kernel_bloat */
848 outl(PortReset, ioaddr + SCBPort);
849 inl(ioaddr + SCBPort);
852 /* Return the chip to its original power state. */
853 pci_set_power_state(pdev, acpi_idle_state);
855 pci_set_drvdata (pdev, dev);
856 SET_NETDEV_DEV(dev, &pdev->dev);
858 dev->irq = pdev->irq;
860 sp = netdev_priv(dev);
862 sp->msg_enable = DEBUG;
863 sp->acpi_pwr = acpi_idle_state;
864 sp->tx_ring = tx_ring_space;
865 sp->tx_ring_dma = tx_ring_dma;
866 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
867 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
868 init_timer(&sp->timer); /* used in ioctl() */
869 spin_lock_init(&sp->lock);
871 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
873 if (full_duplex[card_idx] >= 0)
874 sp->mii_if.full_duplex = full_duplex[card_idx];
876 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
878 sp->phy[0] = eeprom[6];
879 sp->phy[1] = eeprom[7];
881 sp->mii_if.phy_id = eeprom[6] & 0x1f;
882 sp->mii_if.phy_id_mask = 0x1f;
883 sp->mii_if.reg_num_mask = 0x1f;
884 sp->mii_if.dev = dev;
885 sp->mii_if.mdio_read = mdio_read;
886 sp->mii_if.mdio_write = mdio_write;
888 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
889 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
890 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
891 || (pdev->device == 0x245D)) {
896 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
898 /* The Speedo-specific entries in the device structure. */
899 dev->open = &speedo_open;
900 dev->hard_start_xmit = &speedo_start_xmit;
901 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
902 dev->stop = &speedo_close;
903 dev->get_stats = &speedo_get_stats;
904 dev->set_multicast_list = &set_rx_mode;
905 dev->do_ioctl = &speedo_ioctl;
906 #ifdef CONFIG_NET_POLL_CONTROLLER
907 dev->poll_controller = &poll_speedo;
910 if (register_netdevice(dev))
911 goto err_free_unlock;
922 static void do_slow_command(struct net_device *dev, int cmd)
924 long cmd_ioaddr = dev->base_addr + SCBCmd;
927 if (inb(cmd_ioaddr) == 0) break;
928 while(++wait <= 200);
930 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
931 inb(cmd_ioaddr), wait);
933 outb(cmd, cmd_ioaddr);
935 for (wait = 0; wait <= 100; wait++)
936 if (inb(cmd_ioaddr) == 0) return;
937 for (; wait <= 20000; wait++)
938 if (inb(cmd_ioaddr) == 0) return;
940 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
941 " Current status %8.8x.\n",
942 cmd, wait, inl(dev->base_addr + SCBStatus));
945 /* Serial EEPROM section.
946 A "bit" grungy, but we work our way through bit-by-bit :->. */
947 /* EEPROM_Ctrl bits. */
948 #define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
949 #define EE_CS 0x02 /* EEPROM chip select. */
950 #define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
951 #define EE_DATA_READ 0x08 /* EEPROM chip data out. */
952 #define EE_ENB (0x4800 | EE_CS)
953 #define EE_WRITE_0 0x4802
954 #define EE_WRITE_1 0x4806
955 #define EE_OFFSET SCBeeprom
957 /* The fixes for the code were kindly provided by Dragan Stancevic
958 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
960 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
961 interval for serial EEPROM. However, it looks like that there is an
962 additional requirement dictating larger udelay's in the code below.
964 static int __devinit do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
967 long ee_addr = ioaddr + SCBeeprom;
969 io_outw(EE_ENB, ee_addr); udelay(2);
970 io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
972 /* Shift the command bits out. */
974 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
975 io_outw(dataval, ee_addr); udelay(2);
976 io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
977 retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
978 } while (--cmd_len >= 0);
979 io_outw(EE_ENB, ee_addr); udelay(2);
981 /* Terminate the EEPROM access. */
982 io_outw(EE_ENB & ~EE_CS, ee_addr);
986 static int mdio_read(struct net_device *dev, int phy_id, int location)
988 long ioaddr = dev->base_addr;
989 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
990 outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
992 val = inl(ioaddr + SCBCtrlMDI);
993 if (--boguscnt < 0) {
994 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
997 } while (! (val & 0x10000000));
1001 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
1003 long ioaddr = dev->base_addr;
1004 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
1005 outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
1006 ioaddr + SCBCtrlMDI);
1008 val = inl(ioaddr + SCBCtrlMDI);
1009 if (--boguscnt < 0) {
1010 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
1013 } while (! (val & 0x10000000));
1017 speedo_open(struct net_device *dev)
1019 struct speedo_private *sp = netdev_priv(dev);
1020 long ioaddr = dev->base_addr;
1023 if (netif_msg_ifup(sp))
1024 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
1026 pci_set_power_state(sp->pdev, 0);
1028 /* Set up the Tx queue early.. */
1033 sp->in_interrupt = 0;
1035 /* .. we can safely take handler calls during init. */
1036 retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
1041 dev->if_port = sp->default_port;
1043 #ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
1044 /* Retrigger negotiation to reset previous errors. */
1045 if ((sp->phy[0] & 0x8000) == 0) {
1046 int phy_addr = sp->phy[0] & 0x1f ;
1047 /* Use 0x3300 for restarting NWay, other values to force xcvr:
1053 #ifdef honor_default_port
1054 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1056 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1061 speedo_init_rx_ring(dev);
1063 /* Fire up the hardware. */
1064 outw(SCBMaskAll, ioaddr + SCBCmd);
1067 netdevice_start(dev);
1068 netif_start_queue(dev);
1070 /* Setup the chip and configure the multicast list. */
1071 sp->mc_setup_head = NULL;
1072 sp->mc_setup_tail = NULL;
1073 sp->flow_ctrl = sp->partner = 0;
1074 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1076 if ((sp->phy[0] & 0x8000) == 0)
1077 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1079 mii_check_link(&sp->mii_if);
1081 if (netif_msg_ifup(sp)) {
1082 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1083 dev->name, inw(ioaddr + SCBStatus));
1086 /* Set the timer. The timer serves a dual purpose:
1087 1) to monitor the media interface (e.g. link beat) and perhaps switch
1088 to an alternate media type
1089 2) to monitor Rx activity, and restart the Rx process if the receiver
1091 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1092 sp->timer.data = (unsigned long)dev;
1093 sp->timer.function = &speedo_timer; /* timer handler */
1094 add_timer(&sp->timer);
1096 /* No need to wait for the command unit to accept here. */
1097 if ((sp->phy[0] & 0x8000) == 0)
1098 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1103 /* Start the chip hardware after a full reset. */
1104 static void speedo_resume(struct net_device *dev)
1106 struct speedo_private *sp = netdev_priv(dev);
1107 long ioaddr = dev->base_addr;
1109 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1110 sp->tx_threshold = 0x01208000;
1112 /* Set the segment registers to '0'. */
1113 if (wait_for_cmd_done(dev) != 0) {
1114 outl(PortPartialReset, ioaddr + SCBPort);
1118 outl(0, ioaddr + SCBPointer);
1119 inl(ioaddr + SCBPointer); /* Flush to PCI. */
1120 udelay(10); /* Bogus, but it avoids the bug. */
1122 /* Note: these next two operations can take a while. */
1123 do_slow_command(dev, RxAddrLoad);
1124 do_slow_command(dev, CUCmdBase);
1126 /* Load the statistics block and rx ring addresses. */
1127 outl(sp->lstats_dma, ioaddr + SCBPointer);
1128 inl(ioaddr + SCBPointer); /* Flush to PCI */
1130 outb(CUStatsAddr, ioaddr + SCBCmd);
1131 sp->lstats->done_marker = 0;
1132 wait_for_cmd_done(dev);
1134 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1135 if (netif_msg_rx_err(sp))
1136 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1139 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1140 ioaddr + SCBPointer);
1141 inl(ioaddr + SCBPointer); /* Flush to PCI */
1144 /* Note: RxStart should complete instantly. */
1145 do_slow_command(dev, RxStart);
1146 do_slow_command(dev, CUDumpStats);
1148 /* Fill the first command with our physical address. */
1150 struct descriptor *ias_cmd;
1153 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1154 /* Avoid a bug(?!) here by marking the command already completed. */
1155 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1157 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1158 memcpy(ias_cmd->params, dev->dev_addr, 6);
1160 clear_suspend(sp->last_cmd);
1161 sp->last_cmd = ias_cmd;
1164 /* Start the chip's Tx process and unmask interrupts. */
1165 outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1166 ioaddr + SCBPointer);
1167 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1168 remain masked --Dragan */
1169 outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1173 * Sometimes the receiver stops making progress. This routine knows how to
1174 * get it going again, without losing packets or being otherwise nasty like
1175 * a chip reset would be. Previously the driver had a whole sequence
1176 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1177 * do another, etc. But those things don't really matter. Separate logic
1178 * in the ISR provides for allocating buffers--the other half of operation
1179 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1180 * This problem with the old, more involved algorithm is shown up under
1181 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1184 speedo_rx_soft_reset(struct net_device *dev)
1186 struct speedo_private *sp = netdev_priv(dev);
1190 ioaddr = dev->base_addr;
1191 if (wait_for_cmd_done(dev) != 0) {
1192 printk("%s: previous command stalled\n", dev->name);
1196 * Put the hardware into a known state.
1198 outb(RxAbort, ioaddr + SCBCmd);
1200 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1202 rfd->rx_buf_addr = 0xffffffff;
1204 if (wait_for_cmd_done(dev) != 0) {
1205 printk("%s: RxAbort command stalled\n", dev->name);
1208 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1209 ioaddr + SCBPointer);
1210 outb(RxStart, ioaddr + SCBCmd);
1214 /* Media monitoring and control. */
1215 static void speedo_timer(unsigned long data)
1217 struct net_device *dev = (struct net_device *)data;
1218 struct speedo_private *sp = netdev_priv(dev);
1219 long ioaddr = dev->base_addr;
1220 int phy_num = sp->phy[0] & 0x1f;
1222 /* We have MII and lost link beat. */
1223 if ((sp->phy[0] & 0x8000) == 0) {
1224 int partner = mdio_read(dev, phy_num, MII_LPA);
1225 if (partner != sp->partner) {
1226 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1227 if (netif_msg_link(sp)) {
1228 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1229 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1230 dev->name, sp->partner, partner, sp->mii_if.advertising);
1232 sp->partner = partner;
1233 if (flow_ctrl != sp->flow_ctrl) {
1234 sp->flow_ctrl = flow_ctrl;
1235 sp->rx_mode = -1; /* Trigger a reload. */
1239 mii_check_link(&sp->mii_if);
1240 if (netif_msg_timer(sp)) {
1241 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1242 dev->name, inw(ioaddr + SCBStatus));
1244 if (sp->rx_mode < 0 ||
1245 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1246 /* We haven't received a packet in a Long Time. We might have been
1247 bitten by the receiver hang bug. This can be cleared by sending
1248 a set multicast list command. */
1249 if (netif_msg_timer(sp))
1250 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1251 " from a timer routine,"
1252 " m=%d, j=%ld, l=%ld.\n",
1253 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1256 /* We must continue to monitor the media. */
1257 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1258 add_timer(&sp->timer);
1261 static void speedo_show_state(struct net_device *dev)
1263 struct speedo_private *sp = netdev_priv(dev);
1266 if (netif_msg_pktdata(sp)) {
1267 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1268 dev->name, sp->cur_tx, sp->dirty_tx);
1269 for (i = 0; i < TX_RING_SIZE; i++)
1270 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1271 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1272 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1273 i, sp->tx_ring[i].status);
1275 printk(KERN_DEBUG "%s: Printing Rx ring"
1276 " (next to receive into %u, dirty index %u).\n",
1277 dev->name, sp->cur_rx, sp->dirty_rx);
1278 for (i = 0; i < RX_RING_SIZE; i++)
1279 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1280 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1281 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1282 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1283 i, (sp->rx_ringp[i] != NULL) ?
1284 (unsigned)sp->rx_ringp[i]->status : 0);
1289 long ioaddr = dev->base_addr;
1290 int phy_num = sp->phy[0] & 0x1f;
1291 for (i = 0; i < 16; i++) {
1292 /* FIXME: what does it mean? --SAW */
1294 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1295 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1302 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1304 speedo_init_rx_ring(struct net_device *dev)
1306 struct speedo_private *sp = netdev_priv(dev);
1307 struct RxFD *rxf, *last_rxf = NULL;
1308 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1313 for (i = 0; i < RX_RING_SIZE; i++) {
1314 struct sk_buff *skb;
1315 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1316 /* XXX: do we really want to call this before the NULL check? --hch */
1317 rx_align(skb); /* Align IP on 16 byte boundary */
1318 sp->rx_skbuff[i] = skb;
1320 break; /* OK. Just initially short of Rx bufs. */
1321 skb->dev = dev; /* Mark as being used by this device. */
1322 rxf = (struct RxFD *)skb->tail;
1323 sp->rx_ringp[i] = rxf;
1324 sp->rx_ring_dma[i] =
1325 pci_map_single(sp->pdev, rxf,
1326 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1327 skb_reserve(skb, sizeof(struct RxFD));
1329 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1330 pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1331 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1334 last_rxf_dma = sp->rx_ring_dma[i];
1335 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1336 rxf->link = 0; /* None yet. */
1337 /* This field unused by i82557. */
1338 rxf->rx_buf_addr = 0xffffffff;
1339 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1340 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1341 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1343 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1344 /* Mark the last entry as end-of-list. */
1345 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1346 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1347 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1348 sp->last_rxf = last_rxf;
1349 sp->last_rxf_dma = last_rxf_dma;
1352 static void speedo_purge_tx(struct net_device *dev)
1354 struct speedo_private *sp = netdev_priv(dev);
1357 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1358 entry = sp->dirty_tx % TX_RING_SIZE;
1359 if (sp->tx_skbuff[entry]) {
1360 sp->stats.tx_errors++;
1361 pci_unmap_single(sp->pdev,
1362 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1363 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1364 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1365 sp->tx_skbuff[entry] = 0;
1369 while (sp->mc_setup_head != NULL) {
1370 struct speedo_mc_block *t;
1371 if (netif_msg_tx_err(sp))
1372 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1373 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1374 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1375 t = sp->mc_setup_head->next;
1376 kfree(sp->mc_setup_head);
1377 sp->mc_setup_head = t;
1379 sp->mc_setup_tail = NULL;
1381 netif_wake_queue(dev);
1384 static void reset_mii(struct net_device *dev)
1386 struct speedo_private *sp = netdev_priv(dev);
1388 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1389 if ((sp->phy[0] & 0x8000) == 0) {
1390 int phy_addr = sp->phy[0] & 0x1f;
1391 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1392 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1393 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1394 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1395 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1396 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1397 #ifdef honor_default_port
1398 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1400 mdio_read(dev, phy_addr, MII_BMCR);
1401 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1402 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1407 static void speedo_tx_timeout(struct net_device *dev)
1409 struct speedo_private *sp = netdev_priv(dev);
1410 long ioaddr = dev->base_addr;
1411 int status = inw(ioaddr + SCBStatus);
1412 unsigned long flags;
1414 if (netif_msg_tx_err(sp)) {
1415 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1416 " %4.4x at %d/%d command %8.8x.\n",
1417 dev->name, status, inw(ioaddr + SCBCmd),
1418 sp->dirty_tx, sp->cur_tx,
1419 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1422 speedo_show_state(dev);
1424 if ((status & 0x00C0) != 0x0080
1425 && (status & 0x003C) == 0x0010) {
1426 /* Only the command unit has stopped. */
1427 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1429 outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1430 ioaddr + SCBPointer);
1431 outw(CUStart, ioaddr + SCBCmd);
1437 del_timer_sync(&sp->timer);
1438 /* Reset the Tx and Rx units. */
1439 outl(PortReset, ioaddr + SCBPort);
1440 /* We may get spurious interrupts here. But I don't think that they
1441 may do much harm. 1999/12/09 SAW */
1443 /* Disable interrupts. */
1444 outw(SCBMaskAll, ioaddr + SCBCmd);
1445 synchronize_irq(dev->irq);
1446 speedo_tx_buffer_gc(dev);
1447 /* Free as much as possible.
1448 It helps to recover from a hang because of out-of-memory.
1449 It also simplifies speedo_resume() in case TX ring is full or
1450 close-to-be full. */
1451 speedo_purge_tx(dev);
1452 speedo_refill_rx_buffers(dev, 1);
1453 spin_lock_irqsave(&sp->lock, flags);
1456 dev->trans_start = jiffies;
1457 spin_unlock_irqrestore(&sp->lock, flags);
1458 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1459 /* Reset MII transceiver. Do it before starting the timer to serialize
1460 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1462 sp->timer.expires = RUN_AT(2*HZ);
1463 add_timer(&sp->timer);
1469 speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1471 struct speedo_private *sp = netdev_priv(dev);
1472 long ioaddr = dev->base_addr;
1475 /* Prevent interrupts from changing the Tx ring from underneath us. */
1476 unsigned long flags;
1478 spin_lock_irqsave(&sp->lock, flags);
1480 /* Check if there are enough space. */
1481 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1482 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1483 netif_stop_queue(dev);
1485 spin_unlock_irqrestore(&sp->lock, flags);
1489 /* Calculate the Tx descriptor entry. */
1490 entry = sp->cur_tx++ % TX_RING_SIZE;
1492 sp->tx_skbuff[entry] = skb;
1493 sp->tx_ring[entry].status =
1494 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1495 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1496 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1497 sp->tx_ring[entry].link =
1498 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1499 sp->tx_ring[entry].tx_desc_addr =
1500 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1501 /* The data region is always in one buffer descriptor. */
1502 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1503 sp->tx_ring[entry].tx_buf_addr0 =
1504 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1505 skb->len, PCI_DMA_TODEVICE));
1506 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1508 /* workaround for hardware bug on 10 mbit half duplex */
1510 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1511 wait_for_cmd_done(dev);
1512 outb(0 , ioaddr + SCBCmd);
1516 /* Trigger the command unit resume. */
1517 wait_for_cmd_done(dev);
1518 clear_suspend(sp->last_cmd);
1519 /* We want the time window between clearing suspend flag on the previous
1520 command and resuming CU to be as small as possible.
1521 Interrupts in between are very undesired. --SAW */
1522 outb(CUResume, ioaddr + SCBCmd);
1523 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1525 /* Leave room for set_rx_mode(). If there is no more space than reserved
1526 for multicast filter mark the ring as full. */
1527 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1528 netif_stop_queue(dev);
1532 spin_unlock_irqrestore(&sp->lock, flags);
1534 dev->trans_start = jiffies;
1539 static void speedo_tx_buffer_gc(struct net_device *dev)
1541 unsigned int dirty_tx;
1542 struct speedo_private *sp = netdev_priv(dev);
1544 dirty_tx = sp->dirty_tx;
1545 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1546 int entry = dirty_tx % TX_RING_SIZE;
1547 int status = le32_to_cpu(sp->tx_ring[entry].status);
1549 if (netif_msg_tx_done(sp))
1550 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1552 if ((status & StatusComplete) == 0)
1553 break; /* It still hasn't been processed. */
1554 if (status & TxUnderrun)
1555 if (sp->tx_threshold < 0x01e08000) {
1556 if (netif_msg_tx_err(sp))
1557 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1559 sp->tx_threshold += 0x00040000;
1561 /* Free the original skb. */
1562 if (sp->tx_skbuff[entry]) {
1563 sp->stats.tx_packets++; /* Count only user packets. */
1564 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1565 pci_unmap_single(sp->pdev,
1566 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1567 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1568 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1569 sp->tx_skbuff[entry] = 0;
1574 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1575 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1577 dirty_tx, sp->cur_tx, sp->tx_full);
1578 dirty_tx += TX_RING_SIZE;
1581 while (sp->mc_setup_head != NULL
1582 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1583 struct speedo_mc_block *t;
1584 if (netif_msg_tx_err(sp))
1585 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1586 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1587 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1588 t = sp->mc_setup_head->next;
1589 kfree(sp->mc_setup_head);
1590 sp->mc_setup_head = t;
1592 if (sp->mc_setup_head == NULL)
1593 sp->mc_setup_tail = NULL;
1595 sp->dirty_tx = dirty_tx;
1598 /* The interrupt handler does all of the Rx thread work and cleans up
1599 after the Tx thread. */
1600 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1602 struct net_device *dev = (struct net_device *)dev_instance;
1603 struct speedo_private *sp;
1604 long ioaddr, boguscnt = max_interrupt_work;
1605 unsigned short status;
1606 unsigned int handled = 0;
1608 ioaddr = dev->base_addr;
1609 sp = netdev_priv(dev);
1611 #ifndef final_version
1612 /* A lock to prevent simultaneous entry on SMP machines. */
1613 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1614 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1616 sp->in_interrupt = 0; /* Avoid halting machine. */
1622 status = inw(ioaddr + SCBStatus);
1623 /* Acknowledge all of the current interrupt sources ASAP. */
1624 /* Will change from 0xfc00 to 0xff00 when we start handling
1625 FCP and ER interrupts --Dragan */
1626 outw(status & 0xfc00, ioaddr + SCBStatus);
1628 if (netif_msg_intr(sp))
1629 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1632 if ((status & 0xfc00) == 0)
1637 if ((status & 0x5000) || /* Packet received, or Rx error. */
1638 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1639 /* Need to gather the postponed packet. */
1642 /* Always check if all rx buffers are allocated. --SAW */
1643 speedo_refill_rx_buffers(dev, 0);
1645 spin_lock(&sp->lock);
1647 * The chip may have suspended reception for various reasons.
1648 * Check for that, and re-prime it should this be the case.
1650 switch ((status >> 2) & 0xf) {
1653 case 1: /* Suspended */
1654 case 2: /* No resources (RxFDs) */
1655 case 9: /* Suspended with no more RBDs */
1656 case 10: /* No resources due to no RBDs */
1657 case 12: /* Ready with no RBDs */
1658 speedo_rx_soft_reset(dev);
1660 case 3: case 5: case 6: case 7: case 8:
1661 case 11: case 13: case 14: case 15:
1662 /* these are all reserved values */
1667 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1668 if (status & 0xA400) {
1669 speedo_tx_buffer_gc(dev);
1671 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1672 /* The ring is no longer full. */
1674 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1678 spin_unlock(&sp->lock);
1680 if (--boguscnt < 0) {
1681 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1683 /* Clear all interrupt sources. */
1684 /* Will change from 0xfc00 to 0xff00 when we start handling
1685 FCP and ER interrupts --Dragan */
1686 outw(0xfc00, ioaddr + SCBStatus);
1691 if (netif_msg_intr(sp))
1692 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1693 dev->name, inw(ioaddr + SCBStatus));
1695 clear_bit(0, (void*)&sp->in_interrupt);
1696 return IRQ_RETVAL(handled);
1699 static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1701 struct speedo_private *sp = netdev_priv(dev);
1703 struct sk_buff *skb;
1704 /* Get a fresh skbuff to replace the consumed one. */
1705 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1706 /* XXX: do we really want to call this before the NULL check? --hch */
1707 rx_align(skb); /* Align IP on 16 byte boundary */
1708 sp->rx_skbuff[entry] = skb;
1710 sp->rx_ringp[entry] = NULL;
1713 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
1714 sp->rx_ring_dma[entry] =
1715 pci_map_single(sp->pdev, rxf,
1716 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1718 skb_reserve(skb, sizeof(struct RxFD));
1719 rxf->rx_buf_addr = 0xffffffff;
1720 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1721 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1725 static inline void speedo_rx_link(struct net_device *dev, int entry,
1726 struct RxFD *rxf, dma_addr_t rxf_dma)
1728 struct speedo_private *sp = netdev_priv(dev);
1729 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1730 rxf->link = 0; /* None yet. */
1731 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1732 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1733 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1734 pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1735 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1737 sp->last_rxf_dma = rxf_dma;
1740 static int speedo_refill_rx_buf(struct net_device *dev, int force)
1742 struct speedo_private *sp = netdev_priv(dev);
1746 entry = sp->dirty_rx % RX_RING_SIZE;
1747 if (sp->rx_skbuff[entry] == NULL) {
1748 rxf = speedo_rx_alloc(dev, entry);
1752 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1753 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1755 sp->rx_ring_state |= RrOOMReported;
1757 speedo_show_state(dev);
1759 return -1; /* Better luck next time! */
1760 /* Borrow an skb from one of next entries. */
1761 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1762 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1764 if (forw == sp->cur_rx)
1766 forw_entry = forw % RX_RING_SIZE;
1767 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1768 sp->rx_skbuff[forw_entry] = NULL;
1769 rxf = sp->rx_ringp[forw_entry];
1770 sp->rx_ringp[forw_entry] = NULL;
1771 sp->rx_ringp[entry] = rxf;
1774 rxf = sp->rx_ringp[entry];
1776 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1778 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1782 static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1784 struct speedo_private *sp = netdev_priv(dev);
1786 /* Refill the RX ring. */
1787 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1788 speedo_refill_rx_buf(dev, force) != -1);
1792 speedo_rx(struct net_device *dev)
1794 struct speedo_private *sp = netdev_priv(dev);
1795 int entry = sp->cur_rx % RX_RING_SIZE;
1796 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1800 if (netif_msg_intr(sp))
1801 printk(KERN_DEBUG " In speedo_rx().\n");
1802 /* If we own the next entry, it's a new packet. Send it up. */
1803 while (sp->rx_ringp[entry] != NULL) {
1807 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1808 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1809 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1810 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1812 if (!(status & RxComplete))
1815 if (--rx_work_limit < 0)
1818 /* Check for a rare out-of-memory case: the current buffer is
1819 the last buffer allocated in the RX ring. --SAW */
1820 if (sp->last_rxf == sp->rx_ringp[entry]) {
1821 /* Postpone the packet. It'll be reaped at an interrupt when this
1822 packet is no longer the last packet in the ring. */
1823 if (netif_msg_rx_err(sp))
1824 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1826 sp->rx_ring_state |= RrPostponed;
1830 if (netif_msg_rx_status(sp))
1831 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1833 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1834 if (status & RxErrTooBig)
1835 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1836 "status %8.8x!\n", dev->name, status);
1837 else if (! (status & RxOK)) {
1838 /* There was a fatal error. This *should* be impossible. */
1839 sp->stats.rx_errors++;
1840 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1845 struct sk_buff *skb;
1847 /* Check if the packet is long enough to just accept without
1848 copying to a properly sized skbuff. */
1849 if (pkt_len < rx_copybreak
1850 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1852 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1853 /* 'skb_put()' points to the start of sk_buff data area. */
1854 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1855 sizeof(struct RxFD) + pkt_len,
1856 PCI_DMA_FROMDEVICE);
1858 #if 1 || USE_IP_CSUM
1859 /* Packet is in one chunk -- we can copy + cksum. */
1860 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
1861 skb_put(skb, pkt_len);
1863 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
1866 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1867 sizeof(struct RxFD) + pkt_len,
1868 PCI_DMA_FROMDEVICE);
1871 /* Pass up the already-filled skbuff. */
1872 skb = sp->rx_skbuff[entry];
1874 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1878 sp->rx_skbuff[entry] = NULL;
1879 skb_put(skb, pkt_len);
1881 sp->rx_ringp[entry] = NULL;
1882 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1883 PKT_BUF_SZ + sizeof(struct RxFD),
1884 PCI_DMA_FROMDEVICE);
1886 skb->protocol = eth_type_trans(skb, dev);
1888 dev->last_rx = jiffies;
1889 sp->stats.rx_packets++;
1890 sp->stats.rx_bytes += pkt_len;
1892 entry = (++sp->cur_rx) % RX_RING_SIZE;
1893 sp->rx_ring_state &= ~RrPostponed;
1894 /* Refill the recently taken buffers.
1895 Do it one-by-one to handle traffic bursts better. */
1896 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1900 /* Try hard to refill the recently taken buffers. */
1901 speedo_refill_rx_buffers(dev, 1);
1904 sp->last_rx_time = jiffies;
1910 speedo_close(struct net_device *dev)
1912 long ioaddr = dev->base_addr;
1913 struct speedo_private *sp = netdev_priv(dev);
1916 netdevice_stop(dev);
1917 netif_stop_queue(dev);
1919 if (netif_msg_ifdown(sp))
1920 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1921 dev->name, inw(ioaddr + SCBStatus));
1923 /* Shut off the media monitoring timer. */
1924 del_timer_sync(&sp->timer);
1926 outw(SCBMaskAll, ioaddr + SCBCmd);
1928 /* Shutting down the chip nicely fails to disable flow control. So.. */
1929 outl(PortPartialReset, ioaddr + SCBPort);
1930 inl(ioaddr + SCBPort); /* flush posted write */
1932 * The chip requires a 10 microsecond quiet period. Wait here!
1936 free_irq(dev->irq, dev);
1937 speedo_show_state(dev);
1939 /* Free all the skbuffs in the Rx and Tx queues. */
1940 for (i = 0; i < RX_RING_SIZE; i++) {
1941 struct sk_buff *skb = sp->rx_skbuff[i];
1942 sp->rx_skbuff[i] = 0;
1943 /* Clear the Rx descriptors. */
1945 pci_unmap_single(sp->pdev,
1947 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1952 for (i = 0; i < TX_RING_SIZE; i++) {
1953 struct sk_buff *skb = sp->tx_skbuff[i];
1954 sp->tx_skbuff[i] = 0;
1955 /* Clear the Tx descriptors. */
1957 pci_unmap_single(sp->pdev,
1958 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1959 skb->len, PCI_DMA_TODEVICE);
1964 /* Free multicast setting blocks. */
1965 for (i = 0; sp->mc_setup_head != NULL; i++) {
1966 struct speedo_mc_block *t;
1967 t = sp->mc_setup_head->next;
1968 kfree(sp->mc_setup_head);
1969 sp->mc_setup_head = t;
1971 sp->mc_setup_tail = NULL;
1972 if (netif_msg_ifdown(sp))
1973 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1975 pci_set_power_state(sp->pdev, 2);
1980 /* The Speedo-3 has an especially awkward and unusable method of getting
1981 statistics out of the chip. It takes an unpredictable length of time
1982 for the dump-stats command to complete. To avoid a busy-wait loop we
1983 update the stats with the previous dump results, and then trigger a
1986 Oh, and incoming frames are dropped while executing dump-stats!
1988 static struct net_device_stats *
1989 speedo_get_stats(struct net_device *dev)
1991 struct speedo_private *sp = netdev_priv(dev);
1992 long ioaddr = dev->base_addr;
1994 /* Update only if the previous dump finished. */
1995 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1996 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1997 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1998 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1999 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
2000 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
2001 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
2002 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
2003 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
2004 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
2005 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
2006 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
2007 sp->lstats->done_marker = 0x0000;
2008 if (netif_running(dev)) {
2009 unsigned long flags;
2010 /* Take a spinlock to make wait_for_cmd_done and sending the
2011 command atomic. --SAW */
2012 spin_lock_irqsave(&sp->lock, flags);
2013 wait_for_cmd_done(dev);
2014 outb(CUDumpStats, ioaddr + SCBCmd);
2015 spin_unlock_irqrestore(&sp->lock, flags);
2021 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
2024 struct speedo_private *sp = netdev_priv(dev);
2026 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
2030 /* get driver-specific version/etc. info */
2031 case ETHTOOL_GDRVINFO: {
2032 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
2033 strncpy(info.driver, "eepro100", sizeof(info.driver)-1);
2034 strncpy(info.version, version, sizeof(info.version)-1);
2036 strcpy(info.bus_info, pci_name(sp->pdev));
2037 if (copy_to_user(useraddr, &info, sizeof(info)))
2043 case ETHTOOL_GSET: {
2044 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
2045 spin_lock_irq(&sp->lock);
2046 mii_ethtool_gset(&sp->mii_if, &ecmd);
2047 spin_unlock_irq(&sp->lock);
2048 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
2053 case ETHTOOL_SSET: {
2055 struct ethtool_cmd ecmd;
2056 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
2058 spin_lock_irq(&sp->lock);
2059 r = mii_ethtool_sset(&sp->mii_if, &ecmd);
2060 spin_unlock_irq(&sp->lock);
2063 /* restart autonegotiation */
2064 case ETHTOOL_NWAY_RST: {
2065 return mii_nway_restart(&sp->mii_if);
2067 /* get link status */
2068 case ETHTOOL_GLINK: {
2069 struct ethtool_value edata = {ETHTOOL_GLINK};
2070 edata.data = mii_link_ok(&sp->mii_if);
2071 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2075 /* get message-level */
2076 case ETHTOOL_GMSGLVL: {
2077 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
2078 edata.data = sp->msg_enable;
2079 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2083 /* set message-level */
2084 case ETHTOOL_SMSGLVL: {
2085 struct ethtool_value edata;
2086 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2088 sp->msg_enable = edata.data;
2097 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2099 struct speedo_private *sp = netdev_priv(dev);
2100 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
2101 int phy = sp->phy[0] & 0x1f;
2106 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2109 case SIOCGMIIREG: /* Read MII PHY register. */
2110 /* FIXME: these operations need to be serialized with MDIO
2111 access from the timeout handler.
2112 They are currently serialized only with MDIO access from the
2113 timer routine. 2000/05/09 SAW */
2114 saved_acpi = pci_set_power_state(sp->pdev, 0);
2115 t = del_timer_sync(&sp->timer);
2116 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2118 add_timer(&sp->timer); /* may be set to the past --SAW */
2119 pci_set_power_state(sp->pdev, saved_acpi);
2122 case SIOCSMIIREG: /* Write MII PHY register. */
2123 if (!capable(CAP_NET_ADMIN))
2125 saved_acpi = pci_set_power_state(sp->pdev, 0);
2126 t = del_timer_sync(&sp->timer);
2127 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2129 add_timer(&sp->timer); /* may be set to the past --SAW */
2130 pci_set_power_state(sp->pdev, saved_acpi);
2133 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
2139 /* Set or clear the multicast filter for this adaptor.
2140 This is very ugly with Intel chips -- we usually have to execute an
2141 entire configuration command, plus process a multicast command.
2142 This is complicated. We must put a large configuration command and
2143 an arbitrarily-sized multicast command in the transmit list.
2144 To minimize the disruption -- the previous command might have already
2145 loaded the link -- we convert the current command block, normally a Tx
2146 command, into a no-op and link it to the new command.
2148 static void set_rx_mode(struct net_device *dev)
2150 struct speedo_private *sp = netdev_priv(dev);
2151 long ioaddr = dev->base_addr;
2152 struct descriptor *last_cmd;
2154 unsigned long flags;
2157 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2159 } else if ((dev->flags & IFF_ALLMULTI) ||
2160 dev->mc_count > multicast_filter_limit) {
2165 if (netif_msg_rx_status(sp))
2166 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2167 sp->rx_mode, new_rx_mode);
2169 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2170 /* The Tx ring is full -- don't add anything! Hope the mode will be
2171 * set again later. */
2176 if (new_rx_mode != sp->rx_mode) {
2177 u8 *config_cmd_data;
2179 spin_lock_irqsave(&sp->lock, flags);
2180 entry = sp->cur_tx++ % TX_RING_SIZE;
2181 last_cmd = sp->last_cmd;
2182 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2184 sp->tx_skbuff[entry] = 0; /* Redundant. */
2185 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2186 sp->tx_ring[entry].link =
2187 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2188 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2189 /* Construct a full CmdConfig frame. */
2190 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2191 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2192 config_cmd_data[4] = rxdmacount;
2193 config_cmd_data[5] = txdmacount + 0x80;
2194 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2195 /* 0x80 doesn't disable FC 0x84 does.
2196 Disable Flow control since we are not ACK-ing any FC interrupts
2197 for now. --Dragan */
2198 config_cmd_data[19] = 0x84;
2199 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2200 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2201 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2202 config_cmd_data[15] |= 0x80;
2203 config_cmd_data[8] = 0;
2205 /* Trigger the command unit resume. */
2206 wait_for_cmd_done(dev);
2207 clear_suspend(last_cmd);
2208 outb(CUResume, ioaddr + SCBCmd);
2209 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2210 netif_stop_queue(dev);
2213 spin_unlock_irqrestore(&sp->lock, flags);
2216 if (new_rx_mode == 0 && dev->mc_count < 4) {
2217 /* The simple case of 0-3 multicast list entries occurs often, and
2218 fits within one tx_ring[] entry. */
2219 struct dev_mc_list *mclist;
2220 u16 *setup_params, *eaddrs;
2222 spin_lock_irqsave(&sp->lock, flags);
2223 entry = sp->cur_tx++ % TX_RING_SIZE;
2224 last_cmd = sp->last_cmd;
2225 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2227 sp->tx_skbuff[entry] = 0;
2228 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2229 sp->tx_ring[entry].link =
2230 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2231 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2232 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2233 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2234 /* Fill in the multicast addresses. */
2235 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2236 i++, mclist = mclist->next) {
2237 eaddrs = (u16 *)mclist->dmi_addr;
2238 *setup_params++ = *eaddrs++;
2239 *setup_params++ = *eaddrs++;
2240 *setup_params++ = *eaddrs++;
2243 wait_for_cmd_done(dev);
2244 clear_suspend(last_cmd);
2245 /* Immediately trigger the command unit resume. */
2246 outb(CUResume, ioaddr + SCBCmd);
2248 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2249 netif_stop_queue(dev);
2252 spin_unlock_irqrestore(&sp->lock, flags);
2253 } else if (new_rx_mode == 0) {
2254 struct dev_mc_list *mclist;
2255 u16 *setup_params, *eaddrs;
2256 struct speedo_mc_block *mc_blk;
2257 struct descriptor *mc_setup_frm;
2260 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2262 if (mc_blk == NULL) {
2263 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2265 sp->rx_mode = -1; /* We failed, try again. */
2268 mc_blk->next = NULL;
2269 mc_blk->len = 2 + multicast_filter_limit*6;
2271 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2273 mc_setup_frm = &mc_blk->frame;
2275 /* Fill the setup frame. */
2276 if (netif_msg_ifup(sp))
2277 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2278 dev->name, mc_setup_frm);
2279 mc_setup_frm->cmd_status =
2280 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2281 /* Link set below. */
2282 setup_params = (u16 *)&mc_setup_frm->params;
2283 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2284 /* Fill in the multicast addresses. */
2285 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2286 i++, mclist = mclist->next) {
2287 eaddrs = (u16 *)mclist->dmi_addr;
2288 *setup_params++ = *eaddrs++;
2289 *setup_params++ = *eaddrs++;
2290 *setup_params++ = *eaddrs++;
2293 /* Disable interrupts while playing with the Tx Cmd list. */
2294 spin_lock_irqsave(&sp->lock, flags);
2296 if (sp->mc_setup_tail)
2297 sp->mc_setup_tail->next = mc_blk;
2299 sp->mc_setup_head = mc_blk;
2300 sp->mc_setup_tail = mc_blk;
2301 mc_blk->tx = sp->cur_tx;
2303 entry = sp->cur_tx++ % TX_RING_SIZE;
2304 last_cmd = sp->last_cmd;
2305 sp->last_cmd = mc_setup_frm;
2307 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2308 sp->tx_skbuff[entry] = 0;
2309 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2310 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2312 /* Set the link in the setup frame. */
2313 mc_setup_frm->link =
2314 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2316 pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2317 mc_blk->len, PCI_DMA_TODEVICE);
2319 wait_for_cmd_done(dev);
2320 clear_suspend(last_cmd);
2321 /* Immediately trigger the command unit resume. */
2322 outb(CUResume, ioaddr + SCBCmd);
2324 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2325 netif_stop_queue(dev);
2328 spin_unlock_irqrestore(&sp->lock, flags);
2330 if (netif_msg_rx_status(sp))
2331 printk(" CmdMCSetup frame length %d in entry %d.\n",
2332 dev->mc_count, entry);
2335 sp->rx_mode = new_rx_mode;
2339 static int eepro100_suspend(struct pci_dev *pdev, u32 state)
2341 struct net_device *dev = pci_get_drvdata (pdev);
2342 struct speedo_private *sp = netdev_priv(dev);
2343 long ioaddr = dev->base_addr;
2345 pci_save_state(pdev, sp->pm_state);
2347 if (!netif_running(dev))
2350 del_timer_sync(&sp->timer);
2352 netif_device_detach(dev);
2353 outl(PortPartialReset, ioaddr + SCBPort);
2355 /* XXX call pci_set_power_state ()? */
2359 static int eepro100_resume(struct pci_dev *pdev)
2361 struct net_device *dev = pci_get_drvdata (pdev);
2362 struct speedo_private *sp = netdev_priv(dev);
2363 long ioaddr = dev->base_addr;
2365 pci_restore_state(pdev, sp->pm_state);
2367 if (!netif_running(dev))
2370 /* I'm absolutely uncertain if this part of code may work.
2372 - correct hardware reinitialization;
2373 - correct driver behavior between different steps of the
2375 - serialization with other driver calls.
2377 outw(SCBMaskAll, ioaddr + SCBCmd);
2379 netif_device_attach(dev);
2381 sp->flow_ctrl = sp->partner = 0;
2383 sp->timer.expires = RUN_AT(2*HZ);
2384 add_timer(&sp->timer);
2387 #endif /* CONFIG_PM */
2389 static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2391 struct net_device *dev = pci_get_drvdata (pdev);
2392 struct speedo_private *sp = netdev_priv(dev);
2394 unregister_netdev(dev);
2396 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2397 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2400 iounmap((char *)dev->base_addr);
2403 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2404 + sizeof(struct speedo_stats),
2405 sp->tx_ring, sp->tx_ring_dma);
2406 pci_disable_device(pdev);
2410 static struct pci_device_id eepro100_pci_tbl[] = {
2411 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
2412 PCI_ANY_ID, PCI_ANY_ID, },
2413 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
2414 PCI_ANY_ID, PCI_ANY_ID, },
2415 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_7,
2416 PCI_ANY_ID, PCI_ANY_ID, },
2417 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2418 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2419 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2420 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2421 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2422 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2423 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2424 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2425 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2426 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2427 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2428 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2429 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2430 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2431 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2432 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2433 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2434 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2435 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2436 { PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, },
2437 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2438 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2439 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2440 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2441 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2444 MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2446 static struct pci_driver eepro100_driver = {
2448 .id_table = eepro100_pci_tbl,
2449 .probe = eepro100_init_one,
2450 .remove = __devexit_p(eepro100_remove_one),
2452 .suspend = eepro100_suspend,
2453 .resume = eepro100_resume,
2454 #endif /* CONFIG_PM */
2457 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,48)
2458 static int pci_module_init(struct pci_driver *pdev)
2462 rc = pci_register_driver(pdev);
2464 printk(KERN_INFO "%s: No cards found, driver not installed.\n",
2466 pci_unregister_driver(pdev);
2473 static int __init eepro100_init_module(void)
2478 return pci_module_init(&eepro100_driver);
2481 static void __exit eepro100_cleanup_module(void)
2483 pci_unregister_driver(&eepro100_driver);
2486 module_init(eepro100_init_module);
2487 module_exit(eepro100_cleanup_module);
2491 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"