1 /*******************************************************************************
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 * e100.c: Intel(R) PRO/100 ethernet driver
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
55 * II. Driver Operation
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
109 * Under typical operation, the receive unit (RU) is start once,
110 * and the controller happily fills RFDs as frames arrive. If
111 * replacement RFDs cannot be allocated, or the RU goes non-active,
112 * the RU must be restarted. Frame arrival generates an interrupt,
113 * and Rx indication and re-allocation happen in the same context,
114 * therefore no locking is required. A software-generated interrupt
115 * is generated from the watchdog to recover from a failed allocation
116 * senario where all Rx resources have been indicated and none re-
121 * VLAN offloading of tagging, stripping and filtering is not
122 * supported, but driver will accommodate the extra 4-byte VLAN tag
123 * for processing by upper layers. Tx/Rx Checksum offloading is not
124 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
125 * not supported (hardware limitation).
127 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
129 * Thanks to JC (jchapman@katalix.com) for helping with
130 * testing/troubleshooting the development driver.
133 * o several entry points race with dev->close
134 * o check for tx-no-resources/stop Q races with tx clean/wake Q
137 #include <linux/config.h>
138 #include <linux/module.h>
139 #include <linux/moduleparam.h>
140 #include <linux/kernel.h>
141 #include <linux/types.h>
142 #include <linux/slab.h>
143 #include <linux/delay.h>
144 #include <linux/init.h>
145 #include <linux/pci.h>
146 #include <linux/netdevice.h>
147 #include <linux/etherdevice.h>
148 #include <linux/mii.h>
149 #include <linux/if_vlan.h>
150 #include <linux/skbuff.h>
151 #include <linux/ethtool.h>
152 #include <linux/string.h>
153 #include <asm/unaligned.h>
156 #define DRV_NAME "e100"
157 #define DRV_EXT "-NAPI"
158 #define DRV_VERSION "3.0.27-k2"DRV_EXT
159 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
160 #define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation"
161 #define PFX DRV_NAME ": "
163 #define E100_WATCHDOG_PERIOD (2 * HZ)
164 #define E100_NAPI_WEIGHT 16
166 MODULE_DESCRIPTION(DRV_DESCRIPTION);
167 MODULE_AUTHOR(DRV_COPYRIGHT);
168 MODULE_LICENSE("GPL");
169 MODULE_VERSION(DRV_VERSION);
171 static int debug = 3;
172 module_param(debug, int, 0);
173 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
174 #define DPRINTK(nlevel, klevel, fmt, args...) \
175 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
176 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
177 __FUNCTION__ , ## args))
179 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
180 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
181 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
182 static struct pci_device_id e100_id_table[] = {
183 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
184 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
185 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
186 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
187 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
188 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
189 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
190 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
191 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
192 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
193 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
194 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
195 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
196 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
197 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
198 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
199 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
200 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
201 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
202 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
203 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
204 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
205 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
206 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
207 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
208 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
209 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
210 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
211 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
212 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
215 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
216 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
217 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
220 MODULE_DEVICE_TABLE(pci, e100_id_table);
223 mac_82557_D100_A = 0,
224 mac_82557_D100_B = 1,
225 mac_82557_D100_C = 2,
226 mac_82558_D101_A4 = 4,
227 mac_82558_D101_B0 = 5,
231 mac_82550_D102_C = 13,
239 phy_100a = 0x000003E0,
240 phy_100c = 0x035002A8,
241 phy_82555_tx = 0x015002A8,
242 phy_nsc_tx = 0x5C002000,
243 phy_82562_et = 0x033002A8,
244 phy_82562_em = 0x032002A8,
245 phy_82562_ek = 0x031002A8,
246 phy_82562_eh = 0x017002A8,
247 phy_unknown = 0xFFFFFFFF,
250 /* CSR (Control/Status Registers) */
273 stat_ack_not_ours = 0x00,
274 stat_ack_sw_gen = 0x04,
276 stat_ack_cu_idle = 0x20,
277 stat_ack_frame_rx = 0x40,
278 stat_ack_cu_cmd_done = 0x80,
279 stat_ack_not_present = 0xFF,
280 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
281 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
285 irq_mask_none = 0x00,
293 ruc_load_base = 0x06,
296 cuc_dump_addr = 0x40,
297 cuc_dump_stats = 0x50,
298 cuc_load_base = 0x60,
299 cuc_dump_reset = 0x70,
303 cuc_dump_complete = 0x0000A005,
304 cuc_dump_reset_complete = 0x0000A007,
308 software_reset = 0x0000,
310 selective_reset = 0x0002,
313 enum eeprom_ctrl_lo {
321 mdi_write = 0x04000000,
322 mdi_read = 0x08000000,
323 mdi_ready = 0x10000000,
333 enum eeprom_offsets {
334 eeprom_cnfg_mdix = 0x03,
336 eeprom_config_asf = 0x0D,
337 eeprom_smbus_addr = 0x90,
340 enum eeprom_cnfg_mdix {
341 eeprom_mdix_enabled = 0x0080,
345 eeprom_id_wol = 0x0020,
348 enum eeprom_config_asf {
354 cb_complete = 0x8000,
383 struct rx *next, *prev;
388 #if defined(__BIG_ENDIAN_BITFIELD)
394 /*0*/ u8 X(byte_count:6, pad0:2);
395 /*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
396 /*2*/ u8 adaptive_ifs;
397 /*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
398 term_write_cache_line:1), pad3:4);
399 /*4*/ u8 X(rx_dma_max_count:7, pad4:1);
400 /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
401 /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
402 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
403 rx_discard_overruns:1), rx_save_bad_frames:1);
404 /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
405 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
407 /*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
408 /*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
409 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
410 /*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
412 /*11*/ u8 X(linear_priority:3, pad11:5);
413 /*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
414 /*13*/ u8 ip_addr_lo;
415 /*14*/ u8 ip_addr_hi;
416 /*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
417 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
418 pad15_2:1), crs_or_cdt:1);
419 /*16*/ u8 fc_delay_lo;
420 /*17*/ u8 fc_delay_hi;
421 /*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
422 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
423 /*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
424 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
425 full_duplex_force:1), full_duplex_pin:1);
426 /*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
427 /*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
428 /*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
432 #define E100_MAX_MULTICAST_ADDRS 64
435 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
438 /* Important: keep total struct u32-aligned */
439 #define UCODE_SIZE 134
446 u32 ucode[UCODE_SIZE];
447 struct config config;
460 u32 dump_buffer_addr;
462 struct cb *next, *prev;
468 lb_none = 0, lb_mac = 1, lb_phy = 3,
472 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
473 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
474 tx_multiple_collisions, tx_total_collisions;
475 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
476 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
477 rx_short_frame_errors;
478 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
479 u16 xmt_tco_frames, rcv_tco_frames;
499 struct param_range rfds;
500 struct param_range cbs;
504 /* Begin: frequently used values: keep adjacent for cache effect */
505 u32 msg_enable ____cacheline_aligned;
506 struct net_device *netdev;
507 struct pci_dev *pdev;
509 struct rx *rxs ____cacheline_aligned;
510 struct rx *rx_to_use;
511 struct rx *rx_to_clean;
512 struct rfd blank_rfd;
515 spinlock_t cb_lock ____cacheline_aligned;
518 enum scb_cmd_lo cuc_cmd;
519 unsigned int cbs_avail;
521 struct cb *cb_to_use;
522 struct cb *cb_to_send;
523 struct cb *cb_to_clean;
525 /* End: frequently used values: keep adjacent for cache effect */
529 promiscuous = (1 << 1),
530 multicast_all = (1 << 2),
531 wol_magic = (1 << 3),
532 ich_10h_workaround = (1 << 4),
533 } flags ____cacheline_aligned;
537 struct params params;
538 struct net_device_stats net_stats;
539 struct timer_list watchdog;
540 struct timer_list blink_timer;
541 struct mii_if_info mii;
542 enum loopback loopback;
547 dma_addr_t cbs_dma_addr;
553 u32 tx_single_collisions;
554 u32 tx_multiple_collisions;
559 u32 rx_fc_unsupported;
561 u32 rx_over_length_errors;
570 static inline void e100_write_flush(struct nic *nic)
572 /* Flush previous PCI writes through intermediate bridges
573 * by doing a benign read */
574 (void)readb(&nic->csr->scb.status);
577 static inline void e100_enable_irq(struct nic *nic)
581 spin_lock_irqsave(&nic->cmd_lock, flags);
582 writeb(irq_mask_none, &nic->csr->scb.cmd_hi);
583 spin_unlock_irqrestore(&nic->cmd_lock, flags);
584 e100_write_flush(nic);
587 static inline void e100_disable_irq(struct nic *nic)
591 spin_lock_irqsave(&nic->cmd_lock, flags);
592 writeb(irq_mask_all, &nic->csr->scb.cmd_hi);
593 spin_unlock_irqrestore(&nic->cmd_lock, flags);
594 e100_write_flush(nic);
597 static void e100_hw_reset(struct nic *nic)
599 /* Put CU and RU into idle with a selective reset to get
600 * device off of PCI bus */
601 writel(selective_reset, &nic->csr->port);
602 e100_write_flush(nic); udelay(20);
604 /* Now fully reset device */
605 writel(software_reset, &nic->csr->port);
606 e100_write_flush(nic); udelay(20);
608 /* TCO workaround - 82559 and greater */
609 if(nic->mac >= mac_82559_D101M) {
610 /* Issue a redundant CU load base without setting
611 * general pointer, and without waiting for scb to
612 * clear. This gets us into post-driver. Finally,
613 * wait 20 msec for reset to take effect. */
614 writeb(cuc_load_base, &nic->csr->scb.cmd_lo);
618 /* Mask off our interrupt line - it's unmasked after reset */
619 e100_disable_irq(nic);
622 static int e100_self_test(struct nic *nic)
624 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
626 /* Passing the self-test is a pretty good indication
627 * that the device can DMA to/from host memory */
629 nic->mem->selftest.signature = 0;
630 nic->mem->selftest.result = 0xFFFFFFFF;
632 writel(selftest | dma_addr, &nic->csr->port);
633 e100_write_flush(nic);
634 /* Wait 10 msec for self-test to complete */
635 set_current_state(TASK_UNINTERRUPTIBLE);
636 schedule_timeout(HZ / 100 + 1);
638 /* Interrupts are enabled after self-test */
639 e100_disable_irq(nic);
641 /* Check results of self-test */
642 if(nic->mem->selftest.result != 0) {
643 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
644 nic->mem->selftest.result);
647 if(nic->mem->selftest.signature == 0) {
648 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
655 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
657 u32 cmd_addr_data[3];
661 /* Three cmds: write/erase enable, write data, write/erase disable */
662 cmd_addr_data[0] = op_ewen << (addr_len - 2);
663 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
665 cmd_addr_data[2] = op_ewds << (addr_len - 2);
667 /* Bit-bang cmds to write word to eeprom */
668 for(j = 0; j < 3; j++) {
671 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
672 e100_write_flush(nic); udelay(4);
674 for(i = 31; i >= 0; i--) {
675 ctrl = (cmd_addr_data[j] & (1 << i)) ?
677 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
678 e100_write_flush(nic); udelay(4);
680 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
681 e100_write_flush(nic); udelay(4);
683 /* Wait 10 msec for cmd to complete */
684 set_current_state(TASK_UNINTERRUPTIBLE);
685 schedule_timeout(HZ / 100 + 1);
688 writeb(0, &nic->csr->eeprom_ctrl_lo);
689 e100_write_flush(nic); udelay(4);
693 /* General technique stolen from the eepro100 driver - very clever */
694 static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
701 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
704 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
705 e100_write_flush(nic); udelay(4);
707 /* Bit-bang to read word from eeprom */
708 for(i = 31; i >= 0; i--) {
709 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
710 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
711 e100_write_flush(nic); udelay(4);
713 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
714 e100_write_flush(nic); udelay(4);
716 /* Eeprom drives a dummy zero to EEDO after receiving
717 * complete address. Use this to adjust addr_len. */
718 ctrl = readb(&nic->csr->eeprom_ctrl_lo);
719 if(!(ctrl & eedo) && i > 16) {
720 *addr_len -= (i - 16);
724 data = (data << 1) | (ctrl & eedo ? 1 : 0);
728 writeb(0, &nic->csr->eeprom_ctrl_lo);
729 e100_write_flush(nic); udelay(4);
731 return le16_to_cpu(data);
734 /* Load entire EEPROM image into driver cache and validate checksum */
735 static int e100_eeprom_load(struct nic *nic)
737 u16 addr, addr_len = 8, checksum = 0;
739 /* Try reading with an 8-bit addr len to discover actual addr len */
740 e100_eeprom_read(nic, &addr_len, 0);
741 nic->eeprom_wc = 1 << addr_len;
743 for(addr = 0; addr < nic->eeprom_wc; addr++) {
744 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
745 if(addr < nic->eeprom_wc - 1)
746 checksum += cpu_to_le16(nic->eeprom[addr]);
749 /* The checksum, stored in the last word, is calculated such that
750 * the sum of words should be 0xBABA */
751 checksum = le16_to_cpu(0xBABA - checksum);
752 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
753 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
760 /* Save (portion of) driver EEPROM cache to device and update checksum */
761 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
763 u16 addr, addr_len = 8, checksum = 0;
765 /* Try reading with an 8-bit addr len to discover actual addr len */
766 e100_eeprom_read(nic, &addr_len, 0);
767 nic->eeprom_wc = 1 << addr_len;
769 if(start + count >= nic->eeprom_wc)
772 for(addr = start; addr < start + count; addr++)
773 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
775 /* The checksum, stored in the last word, is calculated such that
776 * the sum of words should be 0xBABA */
777 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
778 checksum += cpu_to_le16(nic->eeprom[addr]);
779 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
780 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
781 nic->eeprom[nic->eeprom_wc - 1]);
786 #define E100_WAIT_SCB_TIMEOUT 40
787 static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
793 spin_lock_irqsave(&nic->cmd_lock, flags);
795 /* Previous command is accepted when SCB clears */
796 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
797 if(likely(!readb(&nic->csr->scb.cmd_lo)))
800 if(unlikely(i > (E100_WAIT_SCB_TIMEOUT >> 1)))
803 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
808 if(unlikely(cmd != cuc_resume))
809 writel(dma_addr, &nic->csr->scb.gen_ptr);
810 writeb(cmd, &nic->csr->scb.cmd_lo);
813 spin_unlock_irqrestore(&nic->cmd_lock, flags);
818 static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
819 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
825 spin_lock_irqsave(&nic->cb_lock, flags);
827 if(unlikely(!nic->cbs_avail)) {
833 nic->cb_to_use = cb->next;
837 if(unlikely(!nic->cbs_avail))
840 cb_prepare(nic, cb, skb);
842 /* Order is important otherwise we'll be in a race with h/w:
843 * set S-bit in current first, then clear S-bit in previous. */
844 cb->command |= cpu_to_le16(cb_s);
846 cb->prev->command &= cpu_to_le16(~cb_s);
848 while(nic->cb_to_send != nic->cb_to_use) {
849 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
850 nic->cb_to_send->dma_addr))) {
851 /* Ok, here's where things get sticky. It's
852 * possible that we can't schedule the command
853 * because the controller is too busy, so
854 * let's just queue the command and try again
855 * when another command is scheduled. */
858 nic->cuc_cmd = cuc_resume;
859 nic->cb_to_send = nic->cb_to_send->next;
864 spin_unlock_irqrestore(&nic->cb_lock, flags);
869 static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
874 writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
876 for(i = 0; i < 100; i++) {
878 if((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
883 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
884 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
885 return (u16)data_out;
888 static int mdio_read(struct net_device *netdev, int addr, int reg)
890 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
893 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
895 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
898 static void e100_get_defaults(struct nic *nic)
900 struct param_range rfds = { .min = 64, .max = 256, .count = 64 };
901 struct param_range cbs = { .min = 64, .max = 256, .count = 64 };
903 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
904 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
905 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id;
906 if(nic->mac == mac_unknown)
907 nic->mac = mac_82557_D100_A;
909 nic->params.rfds = rfds;
910 nic->params.cbs = cbs;
912 /* Quadwords to DMA into FIFO before starting frame transmit */
913 nic->tx_threshold = 0xE0;
915 nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf |
916 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0));
918 /* Template for a freshly allocated RFD */
919 nic->blank_rfd.command = cpu_to_le16(cb_el);
920 nic->blank_rfd.rbd = 0xFFFFFFFF;
921 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
924 nic->mii.phy_id_mask = 0x1F;
925 nic->mii.reg_num_mask = 0x1F;
926 nic->mii.dev = nic->netdev;
927 nic->mii.mdio_read = mdio_read;
928 nic->mii.mdio_write = mdio_write;
931 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
933 struct config *config = &cb->u.config;
934 u8 *c = (u8 *)config;
936 cb->command = cpu_to_le16(cb_config);
938 memset(config, 0, sizeof(struct config));
940 config->byte_count = 0x16; /* bytes in this struct */
941 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
942 config->direct_rx_dma = 0x1; /* reserved */
943 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
944 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
945 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
946 config->tx_underrun_retry = 0x3; /* # of underrun retries */
947 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
949 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
950 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
951 config->ifs = 0x6; /* x16 = inter frame spacing */
952 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
953 config->pad15_1 = 0x1;
954 config->pad15_2 = 0x1;
955 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
956 config->fc_delay_hi = 0x40; /* time delay for fc frame */
957 config->tx_padding = 0x1; /* 1=pad short frames */
958 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
960 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
961 config->pad20_1 = 0x1F;
962 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
963 config->pad21_1 = 0x5;
965 config->adaptive_ifs = nic->adaptive_ifs;
966 config->loopback = nic->loopback;
968 if(nic->mii.force_media && nic->mii.full_duplex)
969 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
971 if(nic->flags & promiscuous || nic->loopback) {
972 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
973 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
974 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
977 if(nic->flags & multicast_all)
978 config->multicast_all = 0x1; /* 1=accept, 0=no */
980 if(!(nic->flags & wol_magic))
981 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
983 if(nic->mac >= mac_82558_D101_A4) {
984 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
985 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
986 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
987 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
988 if(nic->mac >= mac_82559_D101M)
989 config->tno_intr = 0x1; /* TCO stats enable */
991 config->standard_stat_counter = 0x0;
994 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
995 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
996 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
997 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
998 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
999 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1002 static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1005 static const u32 ucode[UCODE_SIZE] = {
1006 /* NFS packets are misinterpreted as TCO packets and
1007 * incorrectly routed to the BMC over SMBus. This
1008 * microcode patch checks the fragmented IP bit in the
1009 * NFS/UDP header to distinguish between NFS and TCO. */
1010 0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF,
1011 0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000,
1012 0x00906EFD, 0x00900EFD, 0x00E00EF8,
1015 if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1016 for(i = 0; i < UCODE_SIZE; i++)
1017 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1018 cb->command = cpu_to_le16(cb_ucode);
1020 cb->command = cpu_to_le16(cb_nop);
1023 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1024 struct sk_buff *skb)
1026 cb->command = cpu_to_le16(cb_iaaddr);
1027 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1030 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1032 cb->command = cpu_to_le16(cb_dump);
1033 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1034 offsetof(struct mem, dump_buf));
1037 #define NCONFIG_AUTO_SWITCH 0x0080
1038 #define MII_NSC_CONG MII_RESV1
1039 #define NSC_CONG_ENABLE 0x0100
1040 #define NSC_CONG_TXREADY 0x0400
1041 #define ADVERTISE_FC_SUPPORTED 0x0400
1042 static int e100_phy_init(struct nic *nic)
1044 struct net_device *netdev = nic->netdev;
1046 u16 bmcr, stat, id_lo, id_hi, cong;
1048 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1049 for(addr = 0; addr < 32; addr++) {
1050 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1051 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1052 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1053 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1054 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1057 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1061 /* Selected the phy and isolate the rest */
1062 for(addr = 0; addr < 32; addr++) {
1063 if(addr != nic->mii.phy_id) {
1064 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1066 bmcr = mdio_read(netdev, addr, MII_BMCR);
1067 mdio_write(netdev, addr, MII_BMCR,
1068 bmcr & ~BMCR_ISOLATE);
1073 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1074 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1075 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1076 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1078 /* Handle National tx phys */
1079 #define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1080 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1081 /* Disable congestion control */
1082 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1083 cong |= NSC_CONG_TXREADY;
1084 cong &= ~NSC_CONG_ENABLE;
1085 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1088 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1089 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1090 (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)))
1091 /* enable/disable MDI/MDI-X auto-switching */
1092 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1093 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1098 static int e100_hw_init(struct nic *nic)
1104 DPRINTK(HW, ERR, "e100_hw_init\n");
1105 if(!in_interrupt() && (err = e100_self_test(nic)))
1108 if((err = e100_phy_init(nic)))
1110 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1112 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1114 if((err = e100_exec_cb(nic, NULL, e100_load_ucode)))
1116 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1118 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1120 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1121 nic->dma_addr + offsetof(struct mem, stats))))
1123 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1126 e100_disable_irq(nic);
1131 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1133 struct net_device *netdev = nic->netdev;
1134 struct dev_mc_list *list = netdev->mc_list;
1135 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1137 cb->command = cpu_to_le16(cb_multi);
1138 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1139 for(i = 0; list && i < count; i++, list = list->next)
1140 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1144 static void e100_set_multicast_list(struct net_device *netdev)
1146 struct nic *nic = netdev_priv(netdev);
1148 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1149 netdev->mc_count, netdev->flags);
1151 if(netdev->flags & IFF_PROMISC)
1152 nic->flags |= promiscuous;
1154 nic->flags &= ~promiscuous;
1156 if(netdev->flags & IFF_ALLMULTI ||
1157 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1158 nic->flags |= multicast_all;
1160 nic->flags &= ~multicast_all;
1162 e100_exec_cb(nic, NULL, e100_configure);
1163 e100_exec_cb(nic, NULL, e100_multi);
1166 static void e100_update_stats(struct nic *nic)
1168 struct net_device_stats *ns = &nic->net_stats;
1169 struct stats *s = &nic->mem->stats;
1170 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1171 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1174 /* Device's stats reporting may take several microseconds to
1175 * complete, so where always waiting for results of the
1176 * previous command. */
1178 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1180 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1181 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1182 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1183 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1184 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1185 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1186 ns->collisions += nic->tx_collisions;
1187 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1188 le32_to_cpu(s->tx_lost_crs);
1189 ns->rx_dropped += le32_to_cpu(s->rx_resource_errors);
1190 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1191 nic->rx_over_length_errors;
1192 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1193 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1194 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1195 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1196 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1197 le32_to_cpu(s->rx_alignment_errors) +
1198 le32_to_cpu(s->rx_short_frame_errors) +
1199 le32_to_cpu(s->rx_cdt_errors);
1200 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1201 nic->tx_single_collisions +=
1202 le32_to_cpu(s->tx_single_collisions);
1203 nic->tx_multiple_collisions +=
1204 le32_to_cpu(s->tx_multiple_collisions);
1205 if(nic->mac >= mac_82558_D101_A4) {
1206 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1207 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1208 nic->rx_fc_unsupported +=
1209 le32_to_cpu(s->fc_rcv_unsupported);
1210 if(nic->mac >= mac_82559_D101M) {
1211 nic->tx_tco_frames +=
1212 le16_to_cpu(s->xmt_tco_frames);
1213 nic->rx_tco_frames +=
1214 le16_to_cpu(s->rcv_tco_frames);
1219 e100_exec_cmd(nic, cuc_dump_reset, 0);
1222 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1224 /* Adjust inter-frame-spacing (IFS) between two transmits if
1225 * we're getting collisions on a half-duplex connection. */
1227 if(duplex == DUPLEX_HALF) {
1228 u32 prev = nic->adaptive_ifs;
1229 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1231 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1232 (nic->tx_frames > min_frames)) {
1233 if(nic->adaptive_ifs < 60)
1234 nic->adaptive_ifs += 5;
1235 } else if (nic->tx_frames < min_frames) {
1236 if(nic->adaptive_ifs >= 5)
1237 nic->adaptive_ifs -= 5;
1239 if(nic->adaptive_ifs != prev)
1240 e100_exec_cb(nic, NULL, e100_configure);
1244 static void e100_watchdog(unsigned long data)
1246 struct nic *nic = (struct nic *)data;
1247 struct ethtool_cmd cmd;
1249 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1251 /* mii library handles link maintenance tasks */
1253 mii_ethtool_gset(&nic->mii, &cmd);
1255 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1256 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1257 cmd.speed == SPEED_100 ? "100" : "10",
1258 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1259 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1260 DPRINTK(LINK, INFO, "link down\n");
1263 mii_check_link(&nic->mii);
1265 /* Software generated interrupt to recover from (rare) Rx
1266 * allocation failure.
1267 * Unfortunately have to use a spinlock to not re-enable interrupts
1268 * accidentally, due to hardware that shares a register between the
1269 * interrupt mask bit and the SW Interrupt generation bit */
1270 spin_lock_irq(&nic->cmd_lock);
1271 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1272 spin_unlock_irq(&nic->cmd_lock);
1273 e100_write_flush(nic);
1275 e100_update_stats(nic);
1276 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1278 if(nic->mac <= mac_82557_D100_C)
1279 /* Issue a multicast command to workaround a 557 lock up */
1280 e100_set_multicast_list(nic->netdev);
1282 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1283 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1284 nic->flags |= ich_10h_workaround;
1286 nic->flags &= ~ich_10h_workaround;
1288 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
1291 static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1292 struct sk_buff *skb)
1294 cb->command = nic->tx_command;
1295 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1296 cb->u.tcb.tcb_byte_count = 0;
1297 cb->u.tcb.threshold = nic->tx_threshold;
1298 cb->u.tcb.tbd_count = 1;
1299 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1300 skb->data, skb->len, PCI_DMA_TODEVICE));
1301 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1304 static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1306 struct nic *nic = netdev_priv(netdev);
1309 if(nic->flags & ich_10h_workaround) {
1310 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1311 Issue a NOP command followed by a 1us delay before
1312 issuing the Tx command. */
1313 e100_exec_cmd(nic, cuc_nop, 0);
1317 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1321 /* We queued the skb, but now we're out of space. */
1322 netif_stop_queue(netdev);
1325 /* This is a hard error - log it. */
1326 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1327 netif_stop_queue(netdev);
1331 netdev->trans_start = jiffies;
1335 static inline int e100_tx_clean(struct nic *nic)
1340 spin_lock(&nic->cb_lock);
1342 DPRINTK(TX_DONE, DEBUG, "cb->status = 0x%04X\n",
1343 nic->cb_to_clean->status);
1345 /* Clean CBs marked complete */
1346 for(cb = nic->cb_to_clean;
1347 cb->status & cpu_to_le16(cb_complete);
1348 cb = nic->cb_to_clean = cb->next) {
1349 if(likely(cb->skb != NULL)) {
1350 nic->net_stats.tx_packets++;
1351 nic->net_stats.tx_bytes += cb->skb->len;
1353 pci_unmap_single(nic->pdev,
1354 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1355 le16_to_cpu(cb->u.tcb.tbd.size),
1357 dev_kfree_skb_any(cb->skb);
1365 spin_unlock(&nic->cb_lock);
1367 /* Recover from running out of Tx resources in xmit_frame */
1368 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1369 netif_wake_queue(nic->netdev);
1374 static void e100_clean_cbs(struct nic *nic)
1377 while(nic->cbs_avail != nic->params.cbs.count) {
1378 struct cb *cb = nic->cb_to_clean;
1380 pci_unmap_single(nic->pdev,
1381 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1382 le16_to_cpu(cb->u.tcb.tbd.size),
1384 dev_kfree_skb(cb->skb);
1386 nic->cb_to_clean = nic->cb_to_clean->next;
1389 pci_free_consistent(nic->pdev,
1390 sizeof(struct cb) * nic->params.cbs.count,
1391 nic->cbs, nic->cbs_dma_addr);
1395 nic->cuc_cmd = cuc_start;
1396 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1400 static int e100_alloc_cbs(struct nic *nic)
1403 unsigned int i, count = nic->params.cbs.count;
1405 nic->cuc_cmd = cuc_start;
1406 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1409 nic->cbs = pci_alloc_consistent(nic->pdev,
1410 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1414 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1415 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1416 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1418 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1419 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1420 ((i+1) % count) * sizeof(struct cb));
1424 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1425 nic->cbs_avail = count;
1430 static inline void e100_start_receiver(struct nic *nic)
1432 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1433 if(!nic->ru_running && nic->rx_to_clean->skb) {
1434 e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr);
1435 nic->ru_running = 1;
1439 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1440 static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1442 unsigned int rx_offset = 2; /* u32 align protocol headers */
1444 if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + rx_offset)))
1447 /* Align, init, and map the RFD. */
1448 rx->skb->dev = nic->netdev;
1449 skb_reserve(rx->skb, rx_offset);
1450 memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
1451 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1452 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1454 /* Link the RFD to end of RFA by linking previous RFD to
1455 * this one, and clearing EL bit of previous. */
1457 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1458 put_unaligned(cpu_to_le32(rx->dma_addr),
1459 (u32 *)&prev_rfd->link);
1461 prev_rfd->command &= ~cpu_to_le16(cb_el);
1462 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1463 sizeof(struct rfd), PCI_DMA_TODEVICE);
1469 static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1470 unsigned int *work_done, unsigned int work_to_do)
1472 struct sk_buff *skb = rx->skb;
1473 struct rfd *rfd = (struct rfd *)skb->data;
1474 u16 rfd_status, actual_size;
1476 if(unlikely(work_done && *work_done >= work_to_do))
1479 /* Need to sync before taking a peek at cb_complete bit */
1480 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1481 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1482 rfd_status = le16_to_cpu(rfd->status);
1484 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1486 /* If data isn't ready, nothing to indicate */
1487 if(unlikely(!(rfd_status & cb_complete)))
1490 /* Get actual data size */
1491 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1492 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1493 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1496 pci_unmap_single(nic->pdev, rx->dma_addr,
1497 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1499 /* Pull off the RFD and put the actual data (minus eth hdr) */
1500 skb_reserve(skb, sizeof(struct rfd));
1501 skb_put(skb, actual_size);
1502 skb->protocol = eth_type_trans(skb, nic->netdev);
1504 if(unlikely(!(rfd_status & cb_ok))) {
1505 /* Don't indicate if hardware indicates errors */
1506 nic->net_stats.rx_dropped++;
1507 dev_kfree_skb_any(skb);
1508 } else if(actual_size > nic->netdev->mtu + VLAN_ETH_HLEN) {
1509 /* Don't indicate oversized frames */
1510 nic->rx_over_length_errors++;
1511 nic->net_stats.rx_dropped++;
1512 dev_kfree_skb_any(skb);
1514 nic->net_stats.rx_packets++;
1515 nic->net_stats.rx_bytes += actual_size;
1516 nic->netdev->last_rx = jiffies;
1517 netif_receive_skb(skb);
1527 static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1528 unsigned int work_to_do)
1532 /* Indicate newly arrived packets */
1533 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
1534 if(e100_rx_indicate(nic, rx, work_done, work_to_do))
1535 break; /* No more to clean */
1538 /* Alloc new skbs to refill list */
1539 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1540 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1541 break; /* Better luck next time (see watchdog) */
1544 e100_start_receiver(nic);
1547 static void e100_rx_clean_list(struct nic *nic)
1550 unsigned int i, count = nic->params.rfds.count;
1553 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1555 pci_unmap_single(nic->pdev, rx->dma_addr,
1556 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1557 dev_kfree_skb(rx->skb);
1564 nic->rx_to_use = nic->rx_to_clean = NULL;
1565 nic->ru_running = 0;
1568 static int e100_rx_alloc_list(struct nic *nic)
1571 unsigned int i, count = nic->params.rfds.count;
1573 nic->rx_to_use = nic->rx_to_clean = NULL;
1575 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC)))
1577 memset(nic->rxs, 0, sizeof(struct rx) * count);
1579 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1580 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1581 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
1582 if(e100_rx_alloc_skb(nic, rx)) {
1583 e100_rx_clean_list(nic);
1588 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1593 static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
1595 struct net_device *netdev = dev_id;
1596 struct nic *nic = netdev_priv(netdev);
1597 u8 stat_ack = readb(&nic->csr->scb.stat_ack);
1599 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1601 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
1602 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1605 /* Ack interrupt(s) */
1606 writeb(stat_ack, &nic->csr->scb.stat_ack);
1608 /* We hit Receive No Resource (RNR); restart RU after cleaning */
1609 if(stat_ack & stat_ack_rnr)
1610 nic->ru_running = 0;
1612 e100_disable_irq(nic);
1613 netif_rx_schedule(netdev);
1618 static int e100_poll(struct net_device *netdev, int *budget)
1620 struct nic *nic = netdev_priv(netdev);
1621 unsigned int work_to_do = min(netdev->quota, *budget);
1622 unsigned int work_done = 0;
1625 e100_rx_clean(nic, &work_done, work_to_do);
1626 tx_cleaned = e100_tx_clean(nic);
1628 /* If no Rx and Tx cleanup work was done, exit polling mode. */
1629 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1630 netif_rx_complete(netdev);
1631 e100_enable_irq(nic);
1635 *budget -= work_done;
1636 netdev->quota -= work_done;
1641 #ifdef CONFIG_NET_POLL_CONTROLLER
1642 static void e100_netpoll(struct net_device *netdev)
1644 struct nic *nic = netdev_priv(netdev);
1645 e100_disable_irq(nic);
1646 e100_intr(nic->pdev->irq, netdev, NULL);
1647 e100_enable_irq(nic);
1651 static struct net_device_stats *e100_get_stats(struct net_device *netdev)
1653 struct nic *nic = netdev_priv(netdev);
1654 return &nic->net_stats;
1657 static int e100_set_mac_address(struct net_device *netdev, void *p)
1659 struct nic *nic = netdev_priv(netdev);
1660 struct sockaddr *addr = p;
1662 if (!is_valid_ether_addr(addr->sa_data))
1663 return -EADDRNOTAVAIL;
1665 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1666 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
1671 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
1673 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1675 netdev->mtu = new_mtu;
1679 static int e100_asf(struct nic *nic)
1681 /* ASF can be enabled from eeprom */
1682 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
1683 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
1684 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
1685 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
1688 static int e100_up(struct nic *nic)
1692 if((err = e100_rx_alloc_list(nic)))
1694 if((err = e100_alloc_cbs(nic)))
1695 goto err_rx_clean_list;
1696 if((err = e100_hw_init(nic)))
1698 e100_set_multicast_list(nic->netdev);
1699 e100_start_receiver(nic);
1700 mod_timer(&nic->watchdog, jiffies);
1701 if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ,
1702 nic->netdev->name, nic->netdev)))
1704 e100_enable_irq(nic);
1705 netif_wake_queue(nic->netdev);
1709 del_timer_sync(&nic->watchdog);
1711 e100_clean_cbs(nic);
1713 e100_rx_clean_list(nic);
1717 static void e100_down(struct nic *nic)
1720 free_irq(nic->pdev->irq, nic->netdev);
1721 del_timer_sync(&nic->watchdog);
1722 netif_carrier_off(nic->netdev);
1723 netif_stop_queue(nic->netdev);
1724 e100_clean_cbs(nic);
1725 e100_rx_clean_list(nic);
1728 static void e100_tx_timeout(struct net_device *netdev)
1730 struct nic *nic = netdev_priv(netdev);
1732 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
1733 readb(&nic->csr->scb.status));
1734 e100_down(netdev_priv(netdev));
1735 e100_up(netdev_priv(netdev));
1738 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
1741 struct sk_buff *skb;
1743 /* Use driver resources to perform internal MAC or PHY
1744 * loopback test. A single packet is prepared and transmitted
1745 * in loopback mode, and the test passes if the received
1746 * packet compares byte-for-byte to the transmitted packet. */
1748 if((err = e100_rx_alloc_list(nic)))
1750 if((err = e100_alloc_cbs(nic)))
1753 /* ICH PHY loopback is broken so do MAC loopback instead */
1754 if(nic->flags & ich && loopback_mode == lb_phy)
1755 loopback_mode = lb_mac;
1757 nic->loopback = loopback_mode;
1758 if((err = e100_hw_init(nic)))
1759 goto err_loopback_none;
1761 if(loopback_mode == lb_phy)
1762 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
1765 e100_start_receiver(nic);
1767 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) {
1769 goto err_loopback_none;
1771 skb_put(skb, ETH_DATA_LEN);
1772 memset(skb->data, 0xFF, ETH_DATA_LEN);
1773 e100_xmit_frame(skb, nic->netdev);
1775 set_current_state(TASK_UNINTERRUPTIBLE);
1776 schedule_timeout(HZ / 100 + 1);
1778 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
1779 skb->data, ETH_DATA_LEN))
1783 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
1784 nic->loopback = lb_none;
1786 e100_clean_cbs(nic);
1788 e100_rx_clean_list(nic);
1792 #define MII_LED_CONTROL 0x1B
1793 static void e100_blink_led(unsigned long data)
1795 struct nic *nic = (struct nic *)data;
1803 nic->leds = (nic->leds & led_on) ? led_off :
1804 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
1805 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
1806 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
1809 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1811 struct nic *nic = netdev_priv(netdev);
1812 return mii_ethtool_gset(&nic->mii, cmd);
1815 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1817 struct nic *nic = netdev_priv(netdev);
1820 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
1821 err = mii_ethtool_sset(&nic->mii, cmd);
1822 e100_exec_cb(nic, NULL, e100_configure);
1827 static void e100_get_drvinfo(struct net_device *netdev,
1828 struct ethtool_drvinfo *info)
1830 struct nic *nic = netdev_priv(netdev);
1831 strcpy(info->driver, DRV_NAME);
1832 strcpy(info->version, DRV_VERSION);
1833 strcpy(info->fw_version, "N/A");
1834 strcpy(info->bus_info, pci_name(nic->pdev));
1837 static int e100_get_regs_len(struct net_device *netdev)
1839 struct nic *nic = netdev_priv(netdev);
1840 #define E100_PHY_REGS 0x1C
1841 #define E100_REGS_LEN 1 + E100_PHY_REGS + \
1842 sizeof(nic->mem->dump_buf) / sizeof(u32)
1843 return E100_REGS_LEN * sizeof(u32);
1846 static void e100_get_regs(struct net_device *netdev,
1847 struct ethtool_regs *regs, void *p)
1849 struct nic *nic = netdev_priv(netdev);
1853 regs->version = (1 << 24) | nic->rev_id;
1854 buff[0] = readb(&nic->csr->scb.cmd_hi) << 24 |
1855 readb(&nic->csr->scb.cmd_lo) << 16 |
1856 readw(&nic->csr->scb.status);
1857 for(i = E100_PHY_REGS; i >= 0; i--)
1858 buff[1 + E100_PHY_REGS - i] =
1859 mdio_read(netdev, nic->mii.phy_id, i);
1860 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
1861 e100_exec_cb(nic, NULL, e100_dump);
1862 set_current_state(TASK_UNINTERRUPTIBLE);
1863 schedule_timeout(HZ / 100 + 1);
1864 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
1865 sizeof(nic->mem->dump_buf));
1868 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1870 struct nic *nic = netdev_priv(netdev);
1871 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
1872 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
1875 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1877 struct nic *nic = netdev_priv(netdev);
1879 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
1883 nic->flags |= wol_magic;
1885 nic->flags &= ~wol_magic;
1887 pci_enable_wake(nic->pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
1888 e100_exec_cb(nic, NULL, e100_configure);
1893 static u32 e100_get_msglevel(struct net_device *netdev)
1895 struct nic *nic = netdev_priv(netdev);
1896 return nic->msg_enable;
1899 static void e100_set_msglevel(struct net_device *netdev, u32 value)
1901 struct nic *nic = netdev_priv(netdev);
1902 nic->msg_enable = value;
1905 static int e100_nway_reset(struct net_device *netdev)
1907 struct nic *nic = netdev_priv(netdev);
1908 return mii_nway_restart(&nic->mii);
1911 static u32 e100_get_link(struct net_device *netdev)
1913 struct nic *nic = netdev_priv(netdev);
1914 return mii_link_ok(&nic->mii);
1917 static int e100_get_eeprom_len(struct net_device *netdev)
1919 struct nic *nic = netdev_priv(netdev);
1920 return nic->eeprom_wc << 1;
1923 #define E100_EEPROM_MAGIC 0x1234
1924 static int e100_get_eeprom(struct net_device *netdev,
1925 struct ethtool_eeprom *eeprom, u8 *bytes)
1927 struct nic *nic = netdev_priv(netdev);
1929 eeprom->magic = E100_EEPROM_MAGIC;
1930 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
1935 static int e100_set_eeprom(struct net_device *netdev,
1936 struct ethtool_eeprom *eeprom, u8 *bytes)
1938 struct nic *nic = netdev_priv(netdev);
1940 if(eeprom->magic != E100_EEPROM_MAGIC)
1943 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
1945 return e100_eeprom_save(nic, eeprom->offset >> 1,
1946 (eeprom->len >> 1) + 1);
1949 static void e100_get_ringparam(struct net_device *netdev,
1950 struct ethtool_ringparam *ring)
1952 struct nic *nic = netdev_priv(netdev);
1953 struct param_range *rfds = &nic->params.rfds;
1954 struct param_range *cbs = &nic->params.cbs;
1956 ring->rx_max_pending = rfds->max;
1957 ring->tx_max_pending = cbs->max;
1958 ring->rx_mini_max_pending = 0;
1959 ring->rx_jumbo_max_pending = 0;
1960 ring->rx_pending = rfds->count;
1961 ring->tx_pending = cbs->count;
1962 ring->rx_mini_pending = 0;
1963 ring->rx_jumbo_pending = 0;
1966 static int e100_set_ringparam(struct net_device *netdev,
1967 struct ethtool_ringparam *ring)
1969 struct nic *nic = netdev_priv(netdev);
1970 struct param_range *rfds = &nic->params.rfds;
1971 struct param_range *cbs = &nic->params.cbs;
1973 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1976 if(netif_running(netdev))
1978 rfds->count = max(ring->rx_pending, rfds->min);
1979 rfds->count = min(rfds->count, rfds->max);
1980 cbs->count = max(ring->tx_pending, cbs->min);
1981 cbs->count = min(cbs->count, cbs->max);
1982 if(netif_running(netdev))
1988 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
1989 "Link test (on/offline)",
1990 "Eeprom test (on/offline)",
1991 "Self test (offline)",
1992 "Mac loopback (offline)",
1993 "Phy loopback (offline)",
1995 #define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
1997 static int e100_diag_test_count(struct net_device *netdev)
1999 return E100_TEST_LEN;
2002 static void e100_diag_test(struct net_device *netdev,
2003 struct ethtool_test *test, u64 *data)
2005 struct ethtool_cmd cmd;
2006 struct nic *nic = netdev_priv(netdev);
2009 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2010 data[0] = !mii_link_ok(&nic->mii);
2011 data[1] = e100_eeprom_load(nic);
2012 if(test->flags & ETH_TEST_FL_OFFLINE) {
2014 /* save speed, duplex & autoneg settings */
2015 err = mii_ethtool_gset(&nic->mii, &cmd);
2017 if(netif_running(netdev))
2019 data[2] = e100_self_test(nic);
2020 data[3] = e100_loopback_test(nic, lb_mac);
2021 data[4] = e100_loopback_test(nic, lb_phy);
2023 /* restore speed, duplex & autoneg settings */
2024 err = mii_ethtool_sset(&nic->mii, &cmd);
2026 if(netif_running(netdev))
2029 for(i = 0; i < E100_TEST_LEN; i++)
2030 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2033 static int e100_phys_id(struct net_device *netdev, u32 data)
2035 struct nic *nic = netdev_priv(netdev);
2037 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2038 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2039 mod_timer(&nic->blink_timer, jiffies);
2040 set_current_state(TASK_INTERRUPTIBLE);
2041 schedule_timeout(data * HZ);
2042 del_timer_sync(&nic->blink_timer);
2043 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2048 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2049 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2050 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2051 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2052 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2053 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2054 "tx_heartbeat_errors", "tx_window_errors",
2055 /* device-specific stats */
2056 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2057 "tx_flow_control_pause", "rx_flow_control_pause",
2058 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2060 #define E100_NET_STATS_LEN 21
2061 #define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
2063 static int e100_get_stats_count(struct net_device *netdev)
2065 return E100_STATS_LEN;
2068 static void e100_get_ethtool_stats(struct net_device *netdev,
2069 struct ethtool_stats *stats, u64 *data)
2071 struct nic *nic = netdev_priv(netdev);
2074 for(i = 0; i < E100_NET_STATS_LEN; i++)
2075 data[i] = ((unsigned long *)&nic->net_stats)[i];
2077 data[i++] = nic->tx_deferred;
2078 data[i++] = nic->tx_single_collisions;
2079 data[i++] = nic->tx_multiple_collisions;
2080 data[i++] = nic->tx_fc_pause;
2081 data[i++] = nic->rx_fc_pause;
2082 data[i++] = nic->rx_fc_unsupported;
2083 data[i++] = nic->tx_tco_frames;
2084 data[i++] = nic->rx_tco_frames;
2087 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2091 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2094 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2099 static struct ethtool_ops e100_ethtool_ops = {
2100 .get_settings = e100_get_settings,
2101 .set_settings = e100_set_settings,
2102 .get_drvinfo = e100_get_drvinfo,
2103 .get_regs_len = e100_get_regs_len,
2104 .get_regs = e100_get_regs,
2105 .get_wol = e100_get_wol,
2106 .set_wol = e100_set_wol,
2107 .get_msglevel = e100_get_msglevel,
2108 .set_msglevel = e100_set_msglevel,
2109 .nway_reset = e100_nway_reset,
2110 .get_link = e100_get_link,
2111 .get_eeprom_len = e100_get_eeprom_len,
2112 .get_eeprom = e100_get_eeprom,
2113 .set_eeprom = e100_set_eeprom,
2114 .get_ringparam = e100_get_ringparam,
2115 .set_ringparam = e100_set_ringparam,
2116 .self_test_count = e100_diag_test_count,
2117 .self_test = e100_diag_test,
2118 .get_strings = e100_get_strings,
2119 .phys_id = e100_phys_id,
2120 .get_stats_count = e100_get_stats_count,
2121 .get_ethtool_stats = e100_get_ethtool_stats,
2124 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2126 struct nic *nic = netdev_priv(netdev);
2128 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2131 static int e100_alloc(struct nic *nic)
2133 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2135 return nic->mem ? 0 : -ENOMEM;
2138 static void e100_free(struct nic *nic)
2141 pci_free_consistent(nic->pdev, sizeof(struct mem),
2142 nic->mem, nic->dma_addr);
2147 static int e100_open(struct net_device *netdev)
2149 struct nic *nic = netdev_priv(netdev);
2152 netif_carrier_off(netdev);
2153 if((err = e100_up(nic)))
2154 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2158 static int e100_close(struct net_device *netdev)
2160 e100_down(netdev_priv(netdev));
2164 static int __devinit e100_probe(struct pci_dev *pdev,
2165 const struct pci_device_id *ent)
2167 struct net_device *netdev;
2171 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2172 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2173 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2177 netdev->open = e100_open;
2178 netdev->stop = e100_close;
2179 netdev->hard_start_xmit = e100_xmit_frame;
2180 netdev->get_stats = e100_get_stats;
2181 netdev->set_multicast_list = e100_set_multicast_list;
2182 netdev->set_mac_address = e100_set_mac_address;
2183 netdev->change_mtu = e100_change_mtu;
2184 netdev->do_ioctl = e100_do_ioctl;
2185 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2186 netdev->tx_timeout = e100_tx_timeout;
2187 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2188 netdev->poll = e100_poll;
2189 netdev->weight = E100_NAPI_WEIGHT;
2190 #ifdef CONFIG_NET_POLL_CONTROLLER
2191 netdev->poll_controller = e100_netpoll;
2194 nic = netdev_priv(netdev);
2195 nic->netdev = netdev;
2197 nic->msg_enable = (1 << debug) - 1;
2198 pci_set_drvdata(pdev, netdev);
2200 if((err = pci_enable_device(pdev))) {
2201 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2202 goto err_out_free_dev;
2205 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2206 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2207 "base address, aborting.\n");
2209 goto err_out_disable_pdev;
2212 if((err = pci_request_regions(pdev, DRV_NAME))) {
2213 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2214 goto err_out_disable_pdev;
2217 pci_set_master(pdev);
2219 if((err = pci_set_dma_mask(pdev, 0xFFFFFFFFULL))) {
2220 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2221 goto err_out_free_res;
2224 SET_MODULE_OWNER(netdev);
2225 SET_NETDEV_DEV(netdev, &pdev->dev);
2227 nic->csr = ioremap(pci_resource_start(pdev, 0), sizeof(struct csr));
2229 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2231 goto err_out_free_res;
2234 if(ent->driver_data)
2239 spin_lock_init(&nic->cb_lock);
2240 spin_lock_init(&nic->cmd_lock);
2242 init_timer(&nic->watchdog);
2243 nic->watchdog.function = e100_watchdog;
2244 nic->watchdog.data = (unsigned long)nic;
2245 init_timer(&nic->blink_timer);
2246 nic->blink_timer.function = e100_blink_led;
2247 nic->blink_timer.data = (unsigned long)nic;
2249 if((err = e100_alloc(nic))) {
2250 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2251 goto err_out_iounmap;
2254 e100_get_defaults(nic);
2258 if((err = e100_eeprom_load(nic)))
2261 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2262 if(!is_valid_ether_addr(netdev->dev_addr)) {
2263 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2264 "EEPROM, aborting.\n");
2269 /* Wol magic packet can be enabled from eeprom */
2270 if((nic->mac >= mac_82558_D101_A4) &&
2271 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2272 nic->flags |= wol_magic;
2274 pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
2276 if((err = register_netdev(netdev))) {
2277 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2281 DPRINTK(PROBE, INFO, "addr 0x%lx, irq %d, "
2282 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
2283 pci_resource_start(pdev, 0), pdev->irq,
2284 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
2285 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
2294 pci_release_regions(pdev);
2295 err_out_disable_pdev:
2296 pci_disable_device(pdev);
2298 pci_set_drvdata(pdev, NULL);
2299 free_netdev(netdev);
2303 static void __devexit e100_remove(struct pci_dev *pdev)
2305 struct net_device *netdev = pci_get_drvdata(pdev);
2308 struct nic *nic = netdev_priv(netdev);
2309 unregister_netdev(netdev);
2312 free_netdev(netdev);
2313 pci_release_regions(pdev);
2314 pci_disable_device(pdev);
2315 pci_set_drvdata(pdev, NULL);
2320 static int e100_suspend(struct pci_dev *pdev, u32 state)
2322 struct net_device *netdev = pci_get_drvdata(pdev);
2323 struct nic *nic = netdev_priv(netdev);
2325 if(netif_running(netdev))
2328 netif_device_detach(netdev);
2330 pci_save_state(pdev, nic->pm_state);
2331 pci_enable_wake(pdev, state, nic->flags & (wol_magic | e100_asf(nic)));
2332 pci_disable_device(pdev);
2333 pci_set_power_state(pdev, state);
2338 static int e100_resume(struct pci_dev *pdev)
2340 struct net_device *netdev = pci_get_drvdata(pdev);
2341 struct nic *nic = netdev_priv(netdev);
2343 pci_set_power_state(pdev, 0);
2344 pci_restore_state(pdev, nic->pm_state);
2347 netif_device_attach(netdev);
2348 if(netif_running(netdev))
2355 static struct pci_driver e100_driver = {
2357 .id_table = e100_id_table,
2358 .probe = e100_probe,
2359 .remove = __devexit_p(e100_remove),
2361 .suspend = e100_suspend,
2362 .resume = e100_resume,
2366 static int __init e100_init_module(void)
2368 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2369 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2370 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2372 return pci_module_init(&e100_driver);
2375 static void __exit e100_cleanup_module(void)
2377 pci_unregister_driver(&e100_driver);
2380 module_init(e100_init_module);
2381 module_exit(e100_cleanup_module);