1 /*******************************************************************************
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 #include <linux/rtnetlink.h>
35 * o set default configuration to 'NAPI disabled'. NAPI enabled driver
36 * causes kernel panic when the interface is shutdown while data is being
39 * o fixed ethtool -t implementation
41 * o fixed ethtool -e implementation
42 * o Support for ethtool ops [Stephen Hemminger (shemminger@osdl.org)]
44 * o Added support for the DPRINTK macro for enhanced error logging. Some
45 * parts of the patch were supplied by Jon Mason.
46 * o Move the register_netdevice() donw in the probe routine due to a
47 * loading/unloading test issue.
48 * o Added a long RX byte count the the extra ethtool data members for BER
53 char e1000_driver_name[] = "e1000";
54 char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
55 char e1000_driver_version[] = "5.2.52-k4";
56 char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
58 /* e1000_pci_tbl - PCI Device ID Table
60 * Wildcard entries (PCI_ANY_ID) should come last
61 * Last entry must be all 0s
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64 * Class, Class Mask, private data (not used) }
66 static struct pci_device_id e1000_pci_tbl[] = {
67 {0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
68 {0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
69 {0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
70 {0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
71 {0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
72 {0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
73 {0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
74 {0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
75 {0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
76 {0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
77 {0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
78 {0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
79 {0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
80 {0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
81 {0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
82 {0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
83 {0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
84 {0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
85 {0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
86 {0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
87 {0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
88 {0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
89 {0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
90 {0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
91 {0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
92 {0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
93 {0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
94 {0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
95 {0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
96 {0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
97 /* required last entry */
101 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
103 int e1000_up(struct e1000_adapter *adapter);
104 void e1000_down(struct e1000_adapter *adapter);
105 void e1000_reset(struct e1000_adapter *adapter);
106 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
107 int e1000_setup_tx_resources(struct e1000_adapter *adapter);
108 int e1000_setup_rx_resources(struct e1000_adapter *adapter);
109 void e1000_free_tx_resources(struct e1000_adapter *adapter);
110 void e1000_free_rx_resources(struct e1000_adapter *adapter);
111 void e1000_update_stats(struct e1000_adapter *adapter);
113 /* Local Function Prototypes */
115 static int e1000_init_module(void);
116 static void e1000_exit_module(void);
117 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
118 static void __devexit e1000_remove(struct pci_dev *pdev);
119 static int e1000_sw_init(struct e1000_adapter *adapter);
120 static int e1000_open(struct net_device *netdev);
121 static int e1000_close(struct net_device *netdev);
122 static void e1000_configure_tx(struct e1000_adapter *adapter);
123 static void e1000_configure_rx(struct e1000_adapter *adapter);
124 static void e1000_setup_rctl(struct e1000_adapter *adapter);
125 static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
127 static void e1000_set_multi(struct net_device *netdev);
128 static void e1000_update_phy_info(unsigned long data);
129 static void e1000_watchdog(unsigned long data);
130 static void e1000_82547_tx_fifo_stall(unsigned long data);
131 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
132 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
133 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
134 static int e1000_set_mac(struct net_device *netdev, void *p);
135 static inline void e1000_irq_disable(struct e1000_adapter *adapter);
136 static inline void e1000_irq_enable(struct e1000_adapter *adapter);
137 static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
138 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
139 #ifdef CONFIG_E1000_NAPI
140 static int e1000_clean(struct net_device *netdev, int *budget);
141 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 int *work_done, int work_to_do);
144 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
146 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
147 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
148 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
150 void set_ethtool_ops(struct net_device *netdev);
151 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
152 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
153 static inline void e1000_rx_checksum(struct e1000_adapter *adapter,
154 struct e1000_rx_desc *rx_desc,
155 struct sk_buff *skb);
156 static void e1000_tx_timeout(struct net_device *dev);
157 static void e1000_tx_timeout_task(struct net_device *dev);
158 static void e1000_smartspeed(struct e1000_adapter *adapter);
159 static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
160 struct sk_buff *skb);
162 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
163 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
164 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
165 static void e1000_restore_vlan(struct e1000_adapter *adapter);
167 static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
168 static int e1000_suspend(struct pci_dev *pdev, uint32_t state);
170 static int e1000_resume(struct pci_dev *pdev);
173 #ifdef CONFIG_NET_POLL_CONTROLLER
174 /* for netdump / net console */
175 static void e1000_netpoll (struct net_device *dev);
178 struct notifier_block e1000_notifier_reboot = {
179 .notifier_call = e1000_notify_reboot,
184 /* Exported from other modules */
186 extern void e1000_check_options(struct e1000_adapter *adapter);
189 static struct pci_driver e1000_driver = {
190 .name = e1000_driver_name,
191 .id_table = e1000_pci_tbl,
192 .probe = e1000_probe,
193 .remove = __devexit_p(e1000_remove),
194 /* Power Managment Hooks */
196 .suspend = e1000_suspend,
197 .resume = e1000_resume
201 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
202 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
203 MODULE_LICENSE("GPL");
205 static int debug = 3;
206 module_param(debug, int, 0);
207 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
210 * e1000_init_module - Driver Registration Routine
212 * e1000_init_module is the first routine called when the driver is
213 * loaded. All it does is register with the PCI subsystem.
217 e1000_init_module(void)
220 printk(KERN_INFO "%s - version %s\n",
221 e1000_driver_string, e1000_driver_version);
223 printk(KERN_INFO "%s\n", e1000_copyright);
225 ret = pci_module_init(&e1000_driver);
227 register_reboot_notifier(&e1000_notifier_reboot);
232 module_init(e1000_init_module);
235 * e1000_exit_module - Driver Exit Cleanup Routine
237 * e1000_exit_module is called just before the driver is removed
242 e1000_exit_module(void)
244 unregister_reboot_notifier(&e1000_notifier_reboot);
245 pci_unregister_driver(&e1000_driver);
248 module_exit(e1000_exit_module);
252 e1000_up(struct e1000_adapter *adapter)
254 struct net_device *netdev = adapter->netdev;
257 /* hardware has been reset, we need to reload some things */
259 e1000_set_multi(netdev);
261 e1000_restore_vlan(adapter);
263 e1000_configure_tx(adapter);
264 e1000_setup_rctl(adapter);
265 e1000_configure_rx(adapter);
266 e1000_alloc_rx_buffers(adapter);
268 if((err = request_irq(adapter->pdev->irq, &e1000_intr,
269 SA_SHIRQ | SA_SAMPLE_RANDOM,
270 netdev->name, netdev)))
273 mod_timer(&adapter->watchdog_timer, jiffies);
274 e1000_irq_enable(adapter);
280 e1000_down(struct e1000_adapter *adapter)
282 struct net_device *netdev = adapter->netdev;
284 e1000_irq_disable(adapter);
285 free_irq(adapter->pdev->irq, netdev);
286 del_timer_sync(&adapter->tx_fifo_stall_timer);
287 del_timer_sync(&adapter->watchdog_timer);
288 del_timer_sync(&adapter->phy_info_timer);
289 adapter->link_speed = 0;
290 adapter->link_duplex = 0;
291 netif_carrier_off(netdev);
292 netif_stop_queue(netdev);
294 e1000_reset(adapter);
295 e1000_clean_tx_ring(adapter);
296 e1000_clean_rx_ring(adapter);
300 e1000_reset(struct e1000_adapter *adapter)
303 /* Repartition Pba for greater than 9k mtu
304 * To take effect CTRL.RST is required.
307 if(adapter->hw.mac_type < e1000_82547) {
308 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
313 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
317 adapter->tx_fifo_head = 0;
318 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
319 adapter->tx_fifo_size =
320 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
321 atomic_set(&adapter->tx_fifo_stall, 0);
323 E1000_WRITE_REG(&adapter->hw, PBA, pba);
325 /* flow control settings */
326 adapter->hw.fc_high_water =
327 (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_HIGH_DIFF;
328 adapter->hw.fc_low_water =
329 (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_LOW_DIFF;
330 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
331 adapter->hw.fc_send_xon = 1;
332 adapter->hw.fc = adapter->hw.original_fc;
334 e1000_reset_hw(&adapter->hw);
335 if(adapter->hw.mac_type >= e1000_82544)
336 E1000_WRITE_REG(&adapter->hw, WUC, 0);
337 e1000_init_hw(&adapter->hw);
339 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
340 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
342 e1000_reset_adaptive(&adapter->hw);
343 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
347 * e1000_probe - Device Initialization Routine
348 * @pdev: PCI device information struct
349 * @ent: entry in e1000_pci_tbl
351 * Returns 0 on success, negative on failure
353 * e1000_probe initializes an adapter identified by a pci_dev structure.
354 * The OS initialization, configuring of the adapter private structure,
355 * and a hardware reset occur.
359 e1000_probe(struct pci_dev *pdev,
360 const struct pci_device_id *ent)
362 struct net_device *netdev;
363 struct e1000_adapter *adapter;
364 static int cards_found = 0;
365 unsigned long mmio_start;
370 uint16_t eeprom_data;
372 if((err = pci_enable_device(pdev)))
375 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
378 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
379 E1000_ERR("No usable DMA configuration, aborting\n");
385 if((err = pci_request_regions(pdev, e1000_driver_name)))
388 pci_set_master(pdev);
390 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
393 goto err_alloc_etherdev;
396 SET_MODULE_OWNER(netdev);
397 SET_NETDEV_DEV(netdev, &pdev->dev);
399 pci_set_drvdata(pdev, netdev);
400 adapter = netdev->priv;
401 adapter->netdev = netdev;
402 adapter->pdev = pdev;
403 adapter->hw.back = adapter;
404 adapter->msg_enable = (1 << debug) - 1;
407 /* we need to set the name early since the DPRINTK macro needs it set */
408 if (dev_alloc_name(netdev, netdev->name) < 0)
409 goto err_free_unlock;
411 mmio_start = pci_resource_start(pdev, BAR_0);
412 mmio_len = pci_resource_len(pdev, BAR_0);
414 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
415 if(!adapter->hw.hw_addr) {
420 for(i = BAR_1; i <= BAR_5; i++) {
421 if(pci_resource_len(pdev, i) == 0)
423 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
424 adapter->hw.io_base = pci_resource_start(pdev, i);
429 netdev->open = &e1000_open;
430 netdev->stop = &e1000_close;
431 netdev->hard_start_xmit = &e1000_xmit_frame;
432 netdev->get_stats = &e1000_get_stats;
433 netdev->set_multicast_list = &e1000_set_multi;
434 netdev->set_mac_address = &e1000_set_mac;
435 netdev->change_mtu = &e1000_change_mtu;
436 netdev->do_ioctl = &e1000_ioctl;
437 set_ethtool_ops(netdev);
438 netdev->tx_timeout = &e1000_tx_timeout;
439 netdev->watchdog_timeo = 5 * HZ;
440 #ifdef CONFIG_E1000_NAPI
441 netdev->poll = &e1000_clean;
444 netdev->vlan_rx_register = e1000_vlan_rx_register;
445 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
446 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
447 #ifdef CONFIG_NET_POLL_CONTROLLER
448 netdev->poll_controller = e1000_netpoll;
451 netdev->mem_start = mmio_start;
452 netdev->mem_end = mmio_start + mmio_len;
453 netdev->base_addr = adapter->hw.io_base;
455 adapter->bd_number = cards_found;
457 /* setup the private structure */
459 if((err = e1000_sw_init(adapter)))
462 if(adapter->hw.mac_type >= e1000_82543) {
463 netdev->features = NETIF_F_SG |
467 NETIF_F_HW_VLAN_FILTER;
469 netdev->features = NETIF_F_SG;
473 #ifdef BROKEN_ON_NON_IA_ARCHS
474 /* Disbaled for now until root-cause is found for
475 * hangs reported against non-IA archs. TSO can be
476 * enabled using ethtool -K eth<x> tso on */
477 if((adapter->hw.mac_type >= e1000_82544) &&
478 (adapter->hw.mac_type != e1000_82547))
479 netdev->features |= NETIF_F_TSO;
484 netdev->features |= NETIF_F_HIGHDMA;
486 /* before reading the EEPROM, reset the controller to
487 * put the device in a known good starting state */
489 e1000_reset_hw(&adapter->hw);
491 /* make sure the EEPROM is good */
493 if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
494 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
499 /* copy the MAC address out of the EEPROM */
501 e1000_read_mac_addr(&adapter->hw);
502 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
504 if(!is_valid_ether_addr(netdev->dev_addr)) {
509 e1000_read_part_num(&adapter->hw, &(adapter->part_num));
511 e1000_get_bus_info(&adapter->hw);
513 init_timer(&adapter->tx_fifo_stall_timer);
514 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
515 adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
517 init_timer(&adapter->watchdog_timer);
518 adapter->watchdog_timer.function = &e1000_watchdog;
519 adapter->watchdog_timer.data = (unsigned long) adapter;
521 init_timer(&adapter->phy_info_timer);
522 adapter->phy_info_timer.function = &e1000_update_phy_info;
523 adapter->phy_info_timer.data = (unsigned long) adapter;
525 INIT_WORK(&adapter->tx_timeout_task,
526 (void (*)(void *))e1000_tx_timeout_task, netdev);
528 /* we're going to reset, so assume we have no link for now */
530 netif_carrier_off(netdev);
531 netif_stop_queue(netdev);
533 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
534 e1000_check_options(adapter);
536 /* Initial Wake on LAN setting
537 * If APM wake is enabled in the EEPROM,
538 * enable the ACPI Magic Packet filter
541 switch(adapter->hw.mac_type) {
542 case e1000_82542_rev2_0:
543 case e1000_82542_rev2_1:
547 case e1000_82546_rev_3:
548 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
549 && (adapter->hw.media_type == e1000_media_type_copper)) {
550 e1000_read_eeprom(&adapter->hw,
551 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
556 e1000_read_eeprom(&adapter->hw,
557 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
560 if(eeprom_data & E1000_EEPROM_APME)
561 adapter->wol |= E1000_WUFC_MAG;
563 /* reset the hardware with the new settings */
565 e1000_reset(adapter);
567 /* since we are holding the rtnl lock already, call the no-lock version */
568 if((err = register_netdevice(netdev)))
578 iounmap(adapter->hw.hw_addr);
584 pci_release_regions(pdev);
589 * e1000_remove - Device Removal Routine
590 * @pdev: PCI device information struct
592 * e1000_remove is called by the PCI subsystem to alert the driver
593 * that it should release a PCI device. The could be caused by a
594 * Hot-Plug event, or because the driver is going to be removed from
598 static void __devexit
599 e1000_remove(struct pci_dev *pdev)
601 struct net_device *netdev = pci_get_drvdata(pdev);
602 struct e1000_adapter *adapter = netdev->priv;
605 if(adapter->hw.mac_type >= e1000_82540 &&
606 adapter->hw.media_type == e1000_media_type_copper) {
607 manc = E1000_READ_REG(&adapter->hw, MANC);
608 if(manc & E1000_MANC_SMBUS_EN) {
609 manc |= E1000_MANC_ARP_EN;
610 E1000_WRITE_REG(&adapter->hw, MANC, manc);
614 unregister_netdev(netdev);
616 e1000_phy_hw_reset(&adapter->hw);
618 iounmap(adapter->hw.hw_addr);
619 pci_release_regions(pdev);
625 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
626 * @adapter: board private structure to initialize
628 * e1000_sw_init initializes the Adapter private data structure.
629 * Fields are initialized based on PCI device information and
630 * OS network device settings (MTU size).
634 e1000_sw_init(struct e1000_adapter *adapter)
636 struct e1000_hw *hw = &adapter->hw;
637 struct net_device *netdev = adapter->netdev;
638 struct pci_dev *pdev = adapter->pdev;
640 /* PCI config space info */
642 hw->vendor_id = pdev->vendor;
643 hw->device_id = pdev->device;
644 hw->subsystem_vendor_id = pdev->subsystem_vendor;
645 hw->subsystem_id = pdev->subsystem_device;
647 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
649 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
651 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
652 hw->max_frame_size = netdev->mtu +
653 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
654 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
656 /* identify the MAC */
658 if (e1000_set_mac_type(hw)) {
659 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
663 /* initialize eeprom parameters */
665 e1000_init_eeprom_params(hw);
667 if((hw->mac_type == e1000_82541) ||
668 (hw->mac_type == e1000_82547) ||
669 (hw->mac_type == e1000_82541_rev_2) ||
670 (hw->mac_type == e1000_82547_rev_2))
671 hw->phy_init_script = 1;
673 e1000_set_media_type(hw);
675 if(hw->mac_type < e1000_82543)
676 hw->report_tx_early = 0;
678 hw->report_tx_early = 1;
680 hw->wait_autoneg_complete = FALSE;
681 hw->tbi_compatibility_en = TRUE;
682 hw->adaptive_ifs = TRUE;
686 if(hw->media_type == e1000_media_type_copper) {
687 hw->mdix = AUTO_ALL_MODES;
688 hw->disable_polarity_correction = FALSE;
689 hw->master_slave = E1000_MASTER_SLAVE;
692 atomic_set(&adapter->irq_sem, 1);
693 spin_lock_init(&adapter->stats_lock);
694 spin_lock_init(&adapter->tx_lock);
700 * e1000_open - Called when a network interface is made active
701 * @netdev: network interface device structure
703 * Returns 0 on success, negative value on failure
705 * The open entry point is called when a network interface is made
706 * active by the system (IFF_UP). At this point all resources needed
707 * for transmit and receive operations are allocated, the interrupt
708 * handler is registered with the OS, the watchdog timer is started,
709 * and the stack is notified that the interface is ready.
713 e1000_open(struct net_device *netdev)
715 struct e1000_adapter *adapter = netdev->priv;
718 /* allocate transmit descriptors */
720 if((err = e1000_setup_tx_resources(adapter)))
723 /* allocate receive descriptors */
725 if((err = e1000_setup_rx_resources(adapter)))
728 if((err = e1000_up(adapter)))
734 e1000_free_rx_resources(adapter);
736 e1000_free_tx_resources(adapter);
738 e1000_reset(adapter);
744 * e1000_close - Disables a network interface
745 * @netdev: network interface device structure
747 * Returns 0, this is not allowed to fail
749 * The close entry point is called when an interface is de-activated
750 * by the OS. The hardware is still under the drivers control, but
751 * needs to be disabled. A global MAC reset is issued to stop the
752 * hardware, and all transmit and receive resources are freed.
756 e1000_close(struct net_device *netdev)
758 struct e1000_adapter *adapter = netdev->priv;
762 e1000_free_tx_resources(adapter);
763 e1000_free_rx_resources(adapter);
769 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
770 * @adapter: board private structure
772 * Return 0 on success, negative on failure
776 e1000_setup_tx_resources(struct e1000_adapter *adapter)
778 struct e1000_desc_ring *txdr = &adapter->tx_ring;
779 struct pci_dev *pdev = adapter->pdev;
782 size = sizeof(struct e1000_buffer) * txdr->count;
783 txdr->buffer_info = kmalloc(size, GFP_KERNEL);
784 if(!txdr->buffer_info) {
787 memset(txdr->buffer_info, 0, size);
789 /* round up to nearest 4K */
791 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
792 E1000_ROUNDUP(txdr->size, 4096);
794 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
796 kfree(txdr->buffer_info);
799 memset(txdr->desc, 0, txdr->size);
801 txdr->next_to_use = 0;
802 txdr->next_to_clean = 0;
808 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
809 * @adapter: board private structure
811 * Configure the Tx unit of the MAC after a reset.
815 e1000_configure_tx(struct e1000_adapter *adapter)
817 uint64_t tdba = adapter->tx_ring.dma;
818 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
821 E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
822 E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
824 E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
826 /* Setup the HW Tx Head and Tail descriptor pointers */
828 E1000_WRITE_REG(&adapter->hw, TDH, 0);
829 E1000_WRITE_REG(&adapter->hw, TDT, 0);
831 /* Set the default values for the Tx Inter Packet Gap timer */
833 switch (adapter->hw.mac_type) {
834 case e1000_82542_rev2_0:
835 case e1000_82542_rev2_1:
836 tipg = DEFAULT_82542_TIPG_IPGT;
837 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
838 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
841 if(adapter->hw.media_type == e1000_media_type_fiber ||
842 adapter->hw.media_type == e1000_media_type_internal_serdes)
843 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
845 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
846 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
847 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
849 E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
851 /* Set the Tx Interrupt Delay register */
853 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
854 if(adapter->hw.mac_type >= e1000_82540)
855 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
857 /* Program the Transmit Control Register */
859 tctl = E1000_READ_REG(&adapter->hw, TCTL);
861 tctl &= ~E1000_TCTL_CT;
862 tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
863 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
865 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
867 e1000_config_collision_dist(&adapter->hw);
869 /* Setup Transmit Descriptor Settings for eop descriptor */
870 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
873 if(adapter->hw.report_tx_early == 1)
874 adapter->txd_cmd |= E1000_TXD_CMD_RS;
876 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
878 /* Cache if we're 82544 running in PCI-X because we'll
879 * need this to apply a workaround later in the send path. */
880 if(adapter->hw.mac_type == e1000_82544 &&
881 adapter->hw.bus_type == e1000_bus_type_pcix)
882 adapter->pcix_82544 = 1;
886 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
887 * @adapter: board private structure
889 * Returns 0 on success, negative on failure
893 e1000_setup_rx_resources(struct e1000_adapter *adapter)
895 struct e1000_desc_ring *rxdr = &adapter->rx_ring;
896 struct pci_dev *pdev = adapter->pdev;
899 size = sizeof(struct e1000_buffer) * rxdr->count;
900 rxdr->buffer_info = kmalloc(size, GFP_KERNEL);
901 if(!rxdr->buffer_info) {
904 memset(rxdr->buffer_info, 0, size);
906 /* Round up to nearest 4K */
908 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
909 E1000_ROUNDUP(rxdr->size, 4096);
911 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
914 kfree(rxdr->buffer_info);
917 memset(rxdr->desc, 0, rxdr->size);
919 rxdr->next_to_clean = 0;
920 rxdr->next_to_use = 0;
926 * e1000_setup_rctl - configure the receive control register
927 * @adapter: Board private structure
931 e1000_setup_rctl(struct e1000_adapter *adapter)
935 rctl = E1000_READ_REG(&adapter->hw, RCTL);
937 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
939 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
940 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
941 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
943 if(adapter->hw.tbi_compatibility_on == 1)
944 rctl |= E1000_RCTL_SBP;
946 rctl &= ~E1000_RCTL_SBP;
948 rctl &= ~(E1000_RCTL_SZ_4096);
949 switch (adapter->rx_buffer_len) {
950 case E1000_RXBUFFER_2048:
952 rctl |= E1000_RCTL_SZ_2048;
953 rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE);
955 case E1000_RXBUFFER_4096:
956 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
958 case E1000_RXBUFFER_8192:
959 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
961 case E1000_RXBUFFER_16384:
962 rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
966 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
970 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
971 * @adapter: board private structure
973 * Configure the Rx unit of the MAC after a reset.
977 e1000_configure_rx(struct e1000_adapter *adapter)
979 uint64_t rdba = adapter->rx_ring.dma;
980 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
984 /* make sure receives are disabled while setting up the descriptors */
986 rctl = E1000_READ_REG(&adapter->hw, RCTL);
987 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
989 /* set the Receive Delay Timer Register */
991 E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
993 if(adapter->hw.mac_type >= e1000_82540) {
994 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
996 E1000_WRITE_REG(&adapter->hw, ITR,
997 1000000000 / (adapter->itr * 256));
1000 /* Setup the Base and Length of the Rx Descriptor Ring */
1002 E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1003 E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1005 E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
1007 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1008 E1000_WRITE_REG(&adapter->hw, RDH, 0);
1009 E1000_WRITE_REG(&adapter->hw, RDT, 0);
1011 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1012 if((adapter->hw.mac_type >= e1000_82543) &&
1013 (adapter->rx_csum == TRUE)) {
1014 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
1015 rxcsum |= E1000_RXCSUM_TUOFL;
1016 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
1019 /* Enable Receives */
1021 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1025 * e1000_free_tx_resources - Free Tx Resources
1026 * @adapter: board private structure
1028 * Free all transmit software resources
1032 e1000_free_tx_resources(struct e1000_adapter *adapter)
1034 struct pci_dev *pdev = adapter->pdev;
1036 e1000_clean_tx_ring(adapter);
1038 kfree(adapter->tx_ring.buffer_info);
1039 adapter->tx_ring.buffer_info = NULL;
1041 pci_free_consistent(pdev, adapter->tx_ring.size,
1042 adapter->tx_ring.desc, adapter->tx_ring.dma);
1044 adapter->tx_ring.desc = NULL;
1048 * e1000_clean_tx_ring - Free Tx Buffers
1049 * @adapter: board private structure
1053 e1000_clean_tx_ring(struct e1000_adapter *adapter)
1055 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1056 struct e1000_buffer *buffer_info;
1057 struct pci_dev *pdev = adapter->pdev;
1061 /* Free all the Tx ring sk_buffs */
1063 for(i = 0; i < tx_ring->count; i++) {
1064 buffer_info = &tx_ring->buffer_info[i];
1065 if(buffer_info->skb) {
1067 pci_unmap_page(pdev,
1069 buffer_info->length,
1072 dev_kfree_skb(buffer_info->skb);
1074 buffer_info->skb = NULL;
1078 size = sizeof(struct e1000_buffer) * tx_ring->count;
1079 memset(tx_ring->buffer_info, 0, size);
1081 /* Zero out the descriptor ring */
1083 memset(tx_ring->desc, 0, tx_ring->size);
1085 tx_ring->next_to_use = 0;
1086 tx_ring->next_to_clean = 0;
1088 E1000_WRITE_REG(&adapter->hw, TDH, 0);
1089 E1000_WRITE_REG(&adapter->hw, TDT, 0);
1093 * e1000_free_rx_resources - Free Rx Resources
1094 * @adapter: board private structure
1096 * Free all receive software resources
1100 e1000_free_rx_resources(struct e1000_adapter *adapter)
1102 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1103 struct pci_dev *pdev = adapter->pdev;
1105 e1000_clean_rx_ring(adapter);
1107 kfree(rx_ring->buffer_info);
1108 rx_ring->buffer_info = NULL;
1110 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1112 rx_ring->desc = NULL;
1116 * e1000_clean_rx_ring - Free Rx Buffers
1117 * @adapter: board private structure
1121 e1000_clean_rx_ring(struct e1000_adapter *adapter)
1123 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1124 struct e1000_buffer *buffer_info;
1125 struct pci_dev *pdev = adapter->pdev;
1129 /* Free all the Rx ring sk_buffs */
1131 for(i = 0; i < rx_ring->count; i++) {
1132 buffer_info = &rx_ring->buffer_info[i];
1133 if(buffer_info->skb) {
1135 pci_unmap_single(pdev,
1137 buffer_info->length,
1138 PCI_DMA_FROMDEVICE);
1140 dev_kfree_skb(buffer_info->skb);
1142 buffer_info->skb = NULL;
1146 size = sizeof(struct e1000_buffer) * rx_ring->count;
1147 memset(rx_ring->buffer_info, 0, size);
1149 /* Zero out the descriptor ring */
1151 memset(rx_ring->desc, 0, rx_ring->size);
1153 rx_ring->next_to_clean = 0;
1154 rx_ring->next_to_use = 0;
1156 E1000_WRITE_REG(&adapter->hw, RDH, 0);
1157 E1000_WRITE_REG(&adapter->hw, RDT, 0);
1160 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
1161 * and memory write and invalidate disabled for certain operations
1164 e1000_enter_82542_rst(struct e1000_adapter *adapter)
1166 struct net_device *netdev = adapter->netdev;
1169 e1000_pci_clear_mwi(&adapter->hw);
1171 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1172 rctl |= E1000_RCTL_RST;
1173 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1174 E1000_WRITE_FLUSH(&adapter->hw);
1177 if(netif_running(netdev))
1178 e1000_clean_rx_ring(adapter);
1182 e1000_leave_82542_rst(struct e1000_adapter *adapter)
1184 struct net_device *netdev = adapter->netdev;
1187 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1188 rctl &= ~E1000_RCTL_RST;
1189 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1190 E1000_WRITE_FLUSH(&adapter->hw);
1193 if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
1194 e1000_pci_set_mwi(&adapter->hw);
1196 if(netif_running(netdev)) {
1197 e1000_configure_rx(adapter);
1198 e1000_alloc_rx_buffers(adapter);
1203 * e1000_set_mac - Change the Ethernet Address of the NIC
1204 * @netdev: network interface device structure
1205 * @p: pointer to an address structure
1207 * Returns 0 on success, negative on failure
1211 e1000_set_mac(struct net_device *netdev, void *p)
1213 struct e1000_adapter *adapter = netdev->priv;
1214 struct sockaddr *addr = p;
1216 if(!is_valid_ether_addr(addr->sa_data))
1217 return -EADDRNOTAVAIL;
1219 /* 82542 2.0 needs to be in reset to write receive address registers */
1221 if(adapter->hw.mac_type == e1000_82542_rev2_0)
1222 e1000_enter_82542_rst(adapter);
1224 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1225 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
1227 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1229 if(adapter->hw.mac_type == e1000_82542_rev2_0)
1230 e1000_leave_82542_rst(adapter);
1236 * e1000_set_multi - Multicast and Promiscuous mode set
1237 * @netdev: network interface device structure
1239 * The set_multi entry point is called whenever the multicast address
1240 * list or the network interface flags are updated. This routine is
1241 * responsible for configuring the hardware for proper multicast,
1242 * promiscuous mode, and all-multi behavior.
1246 e1000_set_multi(struct net_device *netdev)
1248 struct e1000_adapter *adapter = netdev->priv;
1249 struct e1000_hw *hw = &adapter->hw;
1250 struct dev_mc_list *mc_ptr;
1252 uint32_t hash_value;
1255 /* Check for Promiscuous and All Multicast modes */
1257 rctl = E1000_READ_REG(hw, RCTL);
1259 if(netdev->flags & IFF_PROMISC) {
1260 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1261 } else if(netdev->flags & IFF_ALLMULTI) {
1262 rctl |= E1000_RCTL_MPE;
1263 rctl &= ~E1000_RCTL_UPE;
1265 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
1268 E1000_WRITE_REG(hw, RCTL, rctl);
1270 /* 82542 2.0 needs to be in reset to write receive address registers */
1272 if(hw->mac_type == e1000_82542_rev2_0)
1273 e1000_enter_82542_rst(adapter);
1275 /* load the first 14 multicast address into the exact filters 1-14
1276 * RAR 0 is used for the station MAC adddress
1277 * if there are not 14 addresses, go ahead and clear the filters
1279 mc_ptr = netdev->mc_list;
1281 for(i = 1; i < E1000_RAR_ENTRIES; i++) {
1283 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
1284 mc_ptr = mc_ptr->next;
1286 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
1287 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
1291 /* clear the old settings from the multicast hash table */
1293 for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
1294 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
1296 /* load any remaining addresses into the hash table */
1298 for(; mc_ptr; mc_ptr = mc_ptr->next) {
1299 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
1300 e1000_mta_set(hw, hash_value);
1303 if(hw->mac_type == e1000_82542_rev2_0)
1304 e1000_leave_82542_rst(adapter);
1307 /* need to wait a few seconds after link up to get diagnostic information from the phy */
1310 e1000_update_phy_info(unsigned long data)
1312 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1313 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
1317 * e1000_82547_tx_fifo_stall - Timer Call-back
1318 * @data: pointer to adapter cast into an unsigned long
1322 e1000_82547_tx_fifo_stall(unsigned long data)
1324 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1325 struct net_device *netdev = adapter->netdev;
1328 if(atomic_read(&adapter->tx_fifo_stall)) {
1329 if((E1000_READ_REG(&adapter->hw, TDT) ==
1330 E1000_READ_REG(&adapter->hw, TDH)) &&
1331 (E1000_READ_REG(&adapter->hw, TDFT) ==
1332 E1000_READ_REG(&adapter->hw, TDFH)) &&
1333 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1334 E1000_READ_REG(&adapter->hw, TDFHS))) {
1335 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1336 E1000_WRITE_REG(&adapter->hw, TCTL,
1337 tctl & ~E1000_TCTL_EN);
1338 E1000_WRITE_REG(&adapter->hw, TDFT,
1339 adapter->tx_head_addr);
1340 E1000_WRITE_REG(&adapter->hw, TDFH,
1341 adapter->tx_head_addr);
1342 E1000_WRITE_REG(&adapter->hw, TDFTS,
1343 adapter->tx_head_addr);
1344 E1000_WRITE_REG(&adapter->hw, TDFHS,
1345 adapter->tx_head_addr);
1346 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1347 E1000_WRITE_FLUSH(&adapter->hw);
1349 adapter->tx_fifo_head = 0;
1350 atomic_set(&adapter->tx_fifo_stall, 0);
1351 netif_wake_queue(netdev);
1353 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
1359 * e1000_watchdog - Timer Call-back
1360 * @data: pointer to netdev cast into an unsigned long
1364 e1000_watchdog(unsigned long data)
1366 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1367 struct net_device *netdev = adapter->netdev;
1368 struct e1000_desc_ring *txdr = &adapter->tx_ring;
1372 e1000_check_for_link(&adapter->hw);
1374 if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
1375 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
1376 link = !adapter->hw.serdes_link_down;
1378 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
1381 if(!netif_carrier_ok(netdev)) {
1382 e1000_get_speed_and_duplex(&adapter->hw,
1383 &adapter->link_speed,
1384 &adapter->link_duplex);
1386 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
1387 adapter->link_speed,
1388 adapter->link_duplex == FULL_DUPLEX ?
1389 "Full Duplex" : "Half Duplex");
1391 netif_carrier_on(netdev);
1392 netif_wake_queue(netdev);
1393 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1394 adapter->smartspeed = 0;
1397 if(netif_carrier_ok(netdev)) {
1398 adapter->link_speed = 0;
1399 adapter->link_duplex = 0;
1400 DPRINTK(LINK, INFO, "NIC Link is Down\n");
1401 netif_carrier_off(netdev);
1402 netif_stop_queue(netdev);
1403 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1406 e1000_smartspeed(adapter);
1409 e1000_update_stats(adapter);
1411 adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
1412 adapter->tpt_old = adapter->stats.tpt;
1413 adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
1414 adapter->colc_old = adapter->stats.colc;
1416 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
1417 adapter->gorcl_old = adapter->stats.gorcl;
1418 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
1419 adapter->gotcl_old = adapter->stats.gotcl;
1421 e1000_update_adaptive(&adapter->hw);
1423 if(!netif_carrier_ok(netdev)) {
1424 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1425 /* We've lost link, so the controller stops DMA,
1426 * but we've got queued Tx work that's never going
1427 * to get done, so reset controller to flush Tx.
1428 * (Do the reset outside of interrupt context). */
1429 schedule_work(&adapter->tx_timeout_task);
1433 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
1434 if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
1435 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
1436 * asymmetrical Tx or Rx gets ITR=8000; everyone
1437 * else is between 2000-8000. */
1438 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
1439 uint32_t dif = (adapter->gotcl > adapter->gorcl ?
1440 adapter->gotcl - adapter->gorcl :
1441 adapter->gorcl - adapter->gotcl) / 10000;
1442 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
1443 E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
1446 /* Cause software interrupt to ensure rx ring is cleaned */
1447 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
1449 /* Early detection of hung controller */
1450 i = txdr->next_to_clean;
1451 if(txdr->buffer_info[i].dma &&
1452 time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
1453 !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF))
1454 netif_stop_queue(netdev);
1456 /* Reset the timer */
1457 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1460 #define E1000_TX_FLAGS_CSUM 0x00000001
1461 #define E1000_TX_FLAGS_VLAN 0x00000002
1462 #define E1000_TX_FLAGS_TSO 0x00000004
1463 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
1464 #define E1000_TX_FLAGS_VLAN_SHIFT 16
1466 static inline boolean_t
1467 e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1470 struct e1000_context_desc *context_desc;
1472 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1473 uint16_t ipcse, tucse, mss;
1475 if(skb_shinfo(skb)->tso_size) {
1476 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1477 mss = skb_shinfo(skb)->tso_size;
1478 skb->nh.iph->tot_len = 0;
1479 skb->nh.iph->check = 0;
1480 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1485 ipcss = skb->nh.raw - skb->data;
1486 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1487 ipcse = skb->h.raw - skb->data - 1;
1488 tucss = skb->h.raw - skb->data;
1489 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1492 i = adapter->tx_ring.next_to_use;
1493 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1495 context_desc->lower_setup.ip_fields.ipcss = ipcss;
1496 context_desc->lower_setup.ip_fields.ipcso = ipcso;
1497 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
1498 context_desc->upper_setup.tcp_fields.tucss = tucss;
1499 context_desc->upper_setup.tcp_fields.tucso = tucso;
1500 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
1501 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
1502 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1503 context_desc->cmd_and_length = cpu_to_le32(
1504 E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1505 E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
1506 (skb->len - (hdr_len)));
1508 if(++i == adapter->tx_ring.count) i = 0;
1509 adapter->tx_ring.next_to_use = i;
1518 static inline boolean_t
1519 e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1521 struct e1000_context_desc *context_desc;
1525 if(skb->ip_summed == CHECKSUM_HW) {
1526 css = skb->h.raw - skb->data;
1527 cso = (skb->h.raw + skb->csum) - skb->data;
1529 i = adapter->tx_ring.next_to_use;
1530 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1532 context_desc->upper_setup.tcp_fields.tucss = css;
1533 context_desc->upper_setup.tcp_fields.tucso = cso;
1534 context_desc->upper_setup.tcp_fields.tucse = 0;
1535 context_desc->tcp_seg_setup.data = 0;
1536 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1538 if(++i == adapter->tx_ring.count) i = 0;
1539 adapter->tx_ring.next_to_use = i;
1547 #define E1000_MAX_TXD_PWR 12
1548 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
1551 e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
1552 unsigned int first, unsigned int max_per_txd,
1553 unsigned int nr_frags, unsigned int mss)
1555 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1556 struct e1000_buffer *buffer_info;
1557 unsigned int len = skb->len;
1558 unsigned int offset = 0, size, count = 0, i;
1560 len -= skb->data_len;
1563 i = tx_ring->next_to_use;
1566 buffer_info = &tx_ring->buffer_info[i];
1567 size = min(len, max_per_txd);
1569 /* Workaround for premature desc write-backs
1570 * in TSO mode. Append 4-byte sentinel desc */
1571 if(mss && !nr_frags && size == len && size > 8)
1574 /* Workaround for potential 82544 hang in PCI-X. Avoid
1575 * terminating buffers within evenly-aligned dwords. */
1576 if(adapter->pcix_82544 &&
1577 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
1581 buffer_info->length = size;
1583 pci_map_single(adapter->pdev,
1587 buffer_info->time_stamp = jiffies;
1592 if(++i == tx_ring->count) i = 0;
1595 for(f = 0; f < nr_frags; f++) {
1596 struct skb_frag_struct *frag;
1598 frag = &skb_shinfo(skb)->frags[f];
1600 offset = frag->page_offset;
1603 buffer_info = &tx_ring->buffer_info[i];
1604 size = min(len, max_per_txd);
1606 /* Workaround for premature desc write-backs
1607 * in TSO mode. Append 4-byte sentinel desc */
1608 if(mss && f == (nr_frags-1) && size == len && size > 8)
1611 /* Workaround for potential 82544 hang in PCI-X.
1612 * Avoid terminating buffers within evenly-aligned
1614 if(adapter->pcix_82544 &&
1615 !((unsigned long)(frag->page+offset+size-1) & 4) &&
1619 buffer_info->length = size;
1621 pci_map_page(adapter->pdev,
1626 buffer_info->time_stamp = jiffies;
1631 if(++i == tx_ring->count) i = 0;
1634 i = (i == 0) ? tx_ring->count - 1 : i - 1;
1635 tx_ring->buffer_info[i].skb = skb;
1636 tx_ring->buffer_info[first].next_to_watch = i;
1642 e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
1644 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1645 struct e1000_tx_desc *tx_desc = NULL;
1646 struct e1000_buffer *buffer_info;
1647 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
1650 if(tx_flags & E1000_TX_FLAGS_TSO) {
1651 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
1653 txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
1656 if(tx_flags & E1000_TX_FLAGS_CSUM) {
1657 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
1658 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
1661 if(tx_flags & E1000_TX_FLAGS_VLAN) {
1662 txd_lower |= E1000_TXD_CMD_VLE;
1663 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
1666 i = tx_ring->next_to_use;
1669 buffer_info = &tx_ring->buffer_info[i];
1670 tx_desc = E1000_TX_DESC(*tx_ring, i);
1671 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1672 tx_desc->lower.data =
1673 cpu_to_le32(txd_lower | buffer_info->length);
1674 tx_desc->upper.data = cpu_to_le32(txd_upper);
1675 if(++i == tx_ring->count) i = 0;
1678 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
1680 /* Force memory writes to complete before letting h/w
1681 * know there are new descriptors to fetch. (Only
1682 * applicable for weak-ordered memory model archs,
1683 * such as IA-64). */
1686 tx_ring->next_to_use = i;
1687 E1000_WRITE_REG(&adapter->hw, TDT, i);
1691 * 82547 workaround to avoid controller hang in half-duplex environment.
1692 * The workaround is to avoid queuing a large packet that would span
1693 * the internal Tx FIFO ring boundary by notifying the stack to resend
1694 * the packet at a later time. This gives the Tx FIFO an opportunity to
1695 * flush all packets. When that occurs, we reset the Tx FIFO pointers
1696 * to the beginning of the Tx FIFO.
1699 #define E1000_FIFO_HDR 0x10
1700 #define E1000_82547_PAD_LEN 0x3E0
1703 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
1705 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1706 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
1708 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
1710 if(adapter->link_duplex != HALF_DUPLEX)
1711 goto no_fifo_stall_required;
1713 if(atomic_read(&adapter->tx_fifo_stall))
1716 if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
1717 atomic_set(&adapter->tx_fifo_stall, 1);
1721 no_fifo_stall_required:
1722 adapter->tx_fifo_head += skb_fifo_len;
1723 if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
1724 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1728 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
1730 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1732 struct e1000_adapter *adapter = netdev->priv;
1733 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
1734 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
1735 unsigned int tx_flags = 0;
1736 unsigned long flags;
1737 unsigned int len = skb->len;
1739 unsigned int mss = 0;
1740 unsigned int nr_frags = 0;
1742 nr_frags = skb_shinfo(skb)->nr_frags;
1743 len -= skb->data_len;
1745 dev_kfree_skb_any(skb);
1750 mss = skb_shinfo(skb)->tso_size;
1751 /* The controller does a simple calculation to
1752 * make sure there is enough room in the FIFO before
1753 * initiating the DMA for each buffer. The calc is:
1754 * 4 = ceil(buffer len/mss). To make sure we don't
1755 * overrun the FIFO, adjust the max buffer len if mss
1758 max_per_txd = min(mss << 2, max_per_txd);
1759 max_txd_pwr = fls(max_per_txd) - 1;
1761 if((mss) || (skb->ip_summed == CHECKSUM_HW))
1763 count++; /*for sentinel desc*/
1765 if(skb->ip_summed == CHECKSUM_HW)
1769 count += TXD_USE_COUNT(len, max_txd_pwr);
1770 if(adapter->pcix_82544)
1773 nr_frags = skb_shinfo(skb)->nr_frags;
1774 for(f = 0; f < nr_frags; f++)
1775 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
1777 if(adapter->pcix_82544)
1780 spin_lock_irqsave(&adapter->tx_lock, flags);
1781 /* need: count + 2 desc gap to keep tail from touching
1782 * head, otherwise try next time */
1783 if(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2 ) {
1784 netif_stop_queue(netdev);
1785 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1788 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1790 if(adapter->hw.mac_type == e1000_82547) {
1791 if(e1000_82547_fifo_workaround(adapter, skb)) {
1792 netif_stop_queue(netdev);
1793 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
1798 if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1799 tx_flags |= E1000_TX_FLAGS_VLAN;
1800 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
1803 first = adapter->tx_ring.next_to_use;
1805 if(e1000_tso(adapter, skb))
1806 tx_flags |= E1000_TX_FLAGS_TSO;
1807 else if(e1000_tx_csum(adapter, skb))
1808 tx_flags |= E1000_TX_FLAGS_CSUM;
1810 e1000_tx_queue(adapter,
1811 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
1814 netdev->trans_start = jiffies;
1820 * e1000_tx_timeout - Respond to a Tx Hang
1821 * @netdev: network interface device structure
1825 e1000_tx_timeout(struct net_device *netdev)
1827 struct e1000_adapter *adapter = netdev->priv;
1829 /* Do the reset outside of interrupt context */
1830 schedule_work(&adapter->tx_timeout_task);
1834 e1000_tx_timeout_task(struct net_device *netdev)
1836 struct e1000_adapter *adapter = netdev->priv;
1838 netif_device_detach(netdev);
1839 e1000_down(adapter);
1841 netif_device_attach(netdev);
1845 * e1000_get_stats - Get System Network Statistics
1846 * @netdev: network interface device structure
1848 * Returns the address of the device statistics structure.
1849 * The statistics are actually updated from the timer callback.
1852 static struct net_device_stats *
1853 e1000_get_stats(struct net_device *netdev)
1855 struct e1000_adapter *adapter = netdev->priv;
1857 e1000_update_stats(adapter);
1858 return &adapter->net_stats;
1862 * e1000_change_mtu - Change the Maximum Transfer Unit
1863 * @netdev: network interface device structure
1864 * @new_mtu: new value for maximum frame size
1866 * Returns 0 on success, negative on failure
1870 e1000_change_mtu(struct net_device *netdev, int new_mtu)
1872 struct e1000_adapter *adapter = netdev->priv;
1873 int old_mtu = adapter->rx_buffer_len;
1874 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1876 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
1877 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
1878 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
1882 if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) {
1883 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
1885 } else if(adapter->hw.mac_type < e1000_82543) {
1886 DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n");
1889 } else if(max_frame <= E1000_RXBUFFER_4096) {
1890 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
1892 } else if(max_frame <= E1000_RXBUFFER_8192) {
1893 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
1896 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
1899 if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
1901 e1000_down(adapter);
1905 netdev->mtu = new_mtu;
1906 adapter->hw.max_frame_size = max_frame;
1912 * e1000_update_stats - Update the board statistics counters
1913 * @adapter: board private structure
1917 e1000_update_stats(struct e1000_adapter *adapter)
1919 struct e1000_hw *hw = &adapter->hw;
1920 unsigned long flags;
1923 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
1925 spin_lock_irqsave(&adapter->stats_lock, flags);
1927 /* these counters are modified from e1000_adjust_tbi_stats,
1928 * called from the interrupt context, so they must only
1929 * be written while holding adapter->stats_lock
1932 adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
1933 adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
1934 adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
1935 adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
1936 adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
1937 adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
1938 adapter->stats.roc += E1000_READ_REG(hw, ROC);
1939 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
1940 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
1941 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
1942 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
1943 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
1944 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
1946 /* the rest of the counters are only modified here */
1948 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
1949 adapter->stats.mpc += E1000_READ_REG(hw, MPC);
1950 adapter->stats.scc += E1000_READ_REG(hw, SCC);
1951 adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
1952 adapter->stats.mcc += E1000_READ_REG(hw, MCC);
1953 adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
1954 adapter->stats.dc += E1000_READ_REG(hw, DC);
1955 adapter->stats.sec += E1000_READ_REG(hw, SEC);
1956 adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
1957 adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
1958 adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
1959 adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
1960 adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
1961 adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
1962 adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
1963 adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
1964 adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
1965 adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
1966 adapter->stats.ruc += E1000_READ_REG(hw, RUC);
1967 adapter->stats.rfc += E1000_READ_REG(hw, RFC);
1968 adapter->stats.rjc += E1000_READ_REG(hw, RJC);
1969 adapter->stats.torl += E1000_READ_REG(hw, TORL);
1970 adapter->stats.torh += E1000_READ_REG(hw, TORH);
1971 adapter->stats.totl += E1000_READ_REG(hw, TOTL);
1972 adapter->stats.toth += E1000_READ_REG(hw, TOTH);
1973 adapter->stats.tpr += E1000_READ_REG(hw, TPR);
1974 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
1975 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
1976 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
1977 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
1978 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
1979 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
1980 adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
1981 adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
1983 /* used for adaptive IFS */
1985 hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
1986 adapter->stats.tpt += hw->tx_packet_delta;
1987 hw->collision_delta = E1000_READ_REG(hw, COLC);
1988 adapter->stats.colc += hw->collision_delta;
1990 if(hw->mac_type >= e1000_82543) {
1991 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
1992 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
1993 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
1994 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
1995 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
1996 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
1999 /* Fill out the OS statistics structure */
2001 adapter->net_stats.rx_packets = adapter->stats.gprc;
2002 adapter->net_stats.tx_packets = adapter->stats.gptc;
2003 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2004 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2005 adapter->net_stats.multicast = adapter->stats.mprc;
2006 adapter->net_stats.collisions = adapter->stats.colc;
2010 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2011 adapter->stats.crcerrs + adapter->stats.algnerrc +
2012 adapter->stats.rlec + adapter->stats.rnbc +
2013 adapter->stats.mpc + adapter->stats.cexterr;
2014 adapter->net_stats.rx_dropped = adapter->stats.rnbc;
2015 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2016 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2017 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2018 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
2019 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2023 adapter->net_stats.tx_errors = adapter->stats.ecol +
2024 adapter->stats.latecol;
2025 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2026 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2027 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2029 /* Tx Dropped needs to be maintained elsewhere */
2033 if(hw->media_type == e1000_media_type_copper) {
2034 if((adapter->link_speed == SPEED_1000) &&
2035 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
2036 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2037 adapter->phy_stats.idle_errors += phy_tmp;
2040 if((hw->mac_type <= e1000_82546) &&
2041 (hw->phy_type == e1000_phy_m88) &&
2042 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
2043 adapter->phy_stats.receive_errors += phy_tmp;
2046 spin_unlock_irqrestore(&adapter->stats_lock, flags);
2050 * e1000_irq_disable - Mask off interrupt generation on the NIC
2051 * @adapter: board private structure
2055 e1000_irq_disable(struct e1000_adapter *adapter)
2057 atomic_inc(&adapter->irq_sem);
2058 E1000_WRITE_REG(&adapter->hw, IMC, ~0);
2059 E1000_WRITE_FLUSH(&adapter->hw);
2060 synchronize_irq(adapter->pdev->irq);
2064 * e1000_irq_enable - Enable default interrupt generation settings
2065 * @adapter: board private structure
2069 e1000_irq_enable(struct e1000_adapter *adapter)
2071 if(atomic_dec_and_test(&adapter->irq_sem)) {
2072 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
2073 E1000_WRITE_FLUSH(&adapter->hw);
2078 * e1000_intr - Interrupt Handler
2079 * @irq: interrupt number
2080 * @data: pointer to a network interface device structure
2081 * @pt_regs: CPU registers structure
2085 e1000_intr(int irq, void *data, struct pt_regs *regs)
2087 struct net_device *netdev = data;
2088 struct e1000_adapter *adapter = netdev->priv;
2089 struct e1000_hw *hw = &adapter->hw;
2090 uint32_t icr = E1000_READ_REG(&adapter->hw, ICR);
2091 #ifndef CONFIG_E1000_NAPI
2096 return IRQ_NONE; /* Not our interrupt */
2098 if(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
2099 hw->get_link_status = 1;
2100 mod_timer(&adapter->watchdog_timer, jiffies);
2103 #ifdef CONFIG_E1000_NAPI
2104 if(netif_rx_schedule_prep(netdev)) {
2106 /* Disable interrupts and register for poll. The flush
2107 of the posted write is intentionally left out.
2110 atomic_inc(&adapter->irq_sem);
2111 E1000_WRITE_REG(hw, IMC, ~0);
2112 __netif_rx_schedule(netdev);
2115 for(i = 0; i < E1000_MAX_INTR; i++)
2116 if(!e1000_clean_rx_irq(adapter) &
2117 !e1000_clean_tx_irq(adapter))
2124 #ifdef CONFIG_E1000_NAPI
2126 * e1000_clean - NAPI Rx polling callback
2127 * @adapter: board private structure
2131 e1000_clean(struct net_device *netdev, int *budget)
2133 struct e1000_adapter *adapter = netdev->priv;
2134 int work_to_do = min(*budget, netdev->quota);
2137 e1000_clean_tx_irq(adapter);
2138 e1000_clean_rx_irq(adapter, &work_done, work_to_do);
2140 *budget -= work_done;
2141 netdev->quota -= work_done;
2143 if(work_done < work_to_do || !netif_running(netdev)) {
2144 netif_rx_complete(netdev);
2145 e1000_irq_enable(adapter);
2149 return (work_done >= work_to_do);
2154 * e1000_clean_tx_irq - Reclaim resources after transmit completes
2155 * @adapter: board private structure
2159 e1000_clean_tx_irq(struct e1000_adapter *adapter)
2161 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2162 struct net_device *netdev = adapter->netdev;
2163 struct pci_dev *pdev = adapter->pdev;
2164 struct e1000_tx_desc *tx_desc, *eop_desc;
2165 struct e1000_buffer *buffer_info;
2166 unsigned int i, eop;
2167 boolean_t cleaned = FALSE;
2170 i = tx_ring->next_to_clean;
2171 eop = tx_ring->buffer_info[i].next_to_watch;
2172 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2174 while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2176 for(cleaned = FALSE; !cleaned; ) {
2177 tx_desc = E1000_TX_DESC(*tx_ring, i);
2178 buffer_info = &tx_ring->buffer_info[i];
2180 if(buffer_info->dma) {
2182 pci_unmap_page(pdev,
2184 buffer_info->length,
2187 buffer_info->dma = 0;
2190 if(buffer_info->skb) {
2192 dev_kfree_skb_any(buffer_info->skb);
2194 buffer_info->skb = NULL;
2197 tx_desc->buffer_addr = 0;
2198 tx_desc->lower.data = 0;
2199 tx_desc->upper.data = 0;
2201 cleaned = (i == eop);
2202 if(++i == tx_ring->count) i = 0;
2205 eop = tx_ring->buffer_info[i].next_to_watch;
2206 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2209 tx_ring->next_to_clean = i;
2211 spin_lock(&adapter->tx_lock);
2213 if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
2214 netif_wake_queue(netdev);
2216 spin_unlock(&adapter->tx_lock);
2222 * e1000_clean_rx_irq - Send received data up the network stack,
2223 * @adapter: board private structure
2227 #ifdef CONFIG_E1000_NAPI
2228 e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
2231 e1000_clean_rx_irq(struct e1000_adapter *adapter)
2234 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2235 struct net_device *netdev = adapter->netdev;
2236 struct pci_dev *pdev = adapter->pdev;
2237 struct e1000_rx_desc *rx_desc;
2238 struct e1000_buffer *buffer_info;
2239 struct sk_buff *skb;
2240 unsigned long flags;
2244 boolean_t cleaned = FALSE;
2246 i = rx_ring->next_to_clean;
2247 rx_desc = E1000_RX_DESC(*rx_ring, i);
2249 while(rx_desc->status & E1000_RXD_STAT_DD) {
2250 buffer_info = &rx_ring->buffer_info[i];
2252 #ifdef CONFIG_E1000_NAPI
2253 if(*work_done >= work_to_do)
2261 pci_unmap_single(pdev,
2263 buffer_info->length,
2264 PCI_DMA_FROMDEVICE);
2266 skb = buffer_info->skb;
2267 length = le16_to_cpu(rx_desc->length);
2269 if(!(rx_desc->status & E1000_RXD_STAT_EOP)) {
2271 /* All receives must fit into a single buffer */
2273 E1000_DBG("%s: Receive packet consumed multiple buffers\n",
2276 dev_kfree_skb_irq(skb);
2277 rx_desc->status = 0;
2278 buffer_info->skb = NULL;
2280 if(++i == rx_ring->count) i = 0;
2282 rx_desc = E1000_RX_DESC(*rx_ring, i);
2286 if(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2288 last_byte = *(skb->data + length - 1);
2290 if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
2291 rx_desc->errors, length, last_byte)) {
2293 spin_lock_irqsave(&adapter->stats_lock, flags);
2295 e1000_tbi_adjust_stats(&adapter->hw,
2299 spin_unlock_irqrestore(&adapter->stats_lock,
2304 dev_kfree_skb_irq(skb);
2305 rx_desc->status = 0;
2306 buffer_info->skb = NULL;
2308 if(++i == rx_ring->count) i = 0;
2310 rx_desc = E1000_RX_DESC(*rx_ring, i);
2316 skb_put(skb, length - ETHERNET_FCS_SIZE);
2318 /* Receive Checksum Offload */
2319 e1000_rx_checksum(adapter, rx_desc, skb);
2321 skb->protocol = eth_type_trans(skb, netdev);
2322 #ifdef CONFIG_E1000_NAPI
2323 if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
2324 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2325 le16_to_cpu(rx_desc->special &
2326 E1000_RXD_SPC_VLAN_MASK));
2328 netif_receive_skb(skb);
2330 #else /* CONFIG_E1000_NAPI */
2331 if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
2332 vlan_hwaccel_rx(skb, adapter->vlgrp,
2333 le16_to_cpu(rx_desc->special &
2334 E1000_RXD_SPC_VLAN_MASK));
2338 #endif /* CONFIG_E1000_NAPI */
2339 netdev->last_rx = jiffies;
2341 rx_desc->status = 0;
2342 buffer_info->skb = NULL;
2344 if(++i == rx_ring->count) i = 0;
2346 rx_desc = E1000_RX_DESC(*rx_ring, i);
2349 rx_ring->next_to_clean = i;
2351 e1000_alloc_rx_buffers(adapter);
2357 * e1000_alloc_rx_buffers - Replace used receive buffers
2358 * @adapter: address of board private structure
2362 e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2364 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2365 struct net_device *netdev = adapter->netdev;
2366 struct pci_dev *pdev = adapter->pdev;
2367 struct e1000_rx_desc *rx_desc;
2368 struct e1000_buffer *buffer_info;
2369 struct sk_buff *skb;
2370 int reserve_len = 2;
2373 i = rx_ring->next_to_use;
2374 buffer_info = &rx_ring->buffer_info[i];
2376 while(!buffer_info->skb) {
2377 rx_desc = E1000_RX_DESC(*rx_ring, i);
2379 skb = dev_alloc_skb(adapter->rx_buffer_len + reserve_len);
2382 /* Better luck next round */
2386 /* Make buffer alignment 2 beyond a 16 byte boundary
2387 * this will result in a 16 byte aligned IP header after
2388 * the 14 byte MAC header is removed
2390 skb_reserve(skb, reserve_len);
2394 buffer_info->skb = skb;
2395 buffer_info->length = adapter->rx_buffer_len;
2397 pci_map_single(pdev,
2399 adapter->rx_buffer_len,
2400 PCI_DMA_FROMDEVICE);
2402 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2404 if((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i) {
2405 /* Force memory writes to complete before letting h/w
2406 * know there are new descriptors to fetch. (Only
2407 * applicable for weak-ordered memory model archs,
2408 * such as IA-64). */
2411 E1000_WRITE_REG(&adapter->hw, RDT, i);
2414 if(++i == rx_ring->count) i = 0;
2415 buffer_info = &rx_ring->buffer_info[i];
2418 rx_ring->next_to_use = i;
2422 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
2427 e1000_smartspeed(struct e1000_adapter *adapter)
2429 uint16_t phy_status;
2432 if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
2433 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2436 if(adapter->smartspeed == 0) {
2437 /* If Master/Slave config fault is asserted twice,
2438 * we assume back-to-back */
2439 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2440 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2441 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2442 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2443 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2444 if(phy_ctrl & CR_1000T_MS_ENABLE) {
2445 phy_ctrl &= ~CR_1000T_MS_ENABLE;
2446 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2448 adapter->smartspeed++;
2449 if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2450 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
2452 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2453 MII_CR_RESTART_AUTO_NEG);
2454 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
2459 } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
2460 /* If still no link, perhaps using 2/3 pair cable */
2461 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2462 phy_ctrl |= CR_1000T_MS_ENABLE;
2463 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
2464 if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2465 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
2466 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2467 MII_CR_RESTART_AUTO_NEG);
2468 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
2471 /* Restart process after E1000_SMARTSPEED_MAX iterations */
2472 if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
2473 adapter->smartspeed = 0;
2484 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2490 return e1000_mii_ioctl(netdev, ifr, cmd);
2504 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2506 struct e1000_adapter *adapter = netdev->priv;
2507 struct mii_ioctl_data *data = if_mii(ifr);
2512 if(adapter->hw.media_type != e1000_media_type_copper)
2517 data->phy_id = adapter->hw.phy_addr;
2520 if (!capable(CAP_NET_ADMIN))
2522 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
2527 if (!capable(CAP_NET_ADMIN))
2529 if (data->reg_num & ~(0x1F))
2531 mii_reg = data->val_in;
2532 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
2535 if (adapter->hw.phy_type == e1000_phy_m88) {
2536 switch (data->reg_num) {
2538 if(data->val_in & MII_CR_AUTO_NEG_EN) {
2539 adapter->hw.autoneg = 1;
2540 adapter->hw.autoneg_advertised = 0x2F;
2542 if (data->val_in & 0x40)
2543 spddplx = SPEED_1000;
2544 else if (data->val_in & 0x2000)
2545 spddplx = SPEED_100;
2548 spddplx += (data->val_in & 0x100)
2551 retval = e1000_set_spd_dplx(adapter,
2556 if(netif_running(adapter->netdev)) {
2557 e1000_down(adapter);
2560 e1000_reset(adapter);
2562 case M88E1000_PHY_SPEC_CTRL:
2563 case M88E1000_EXT_PHY_SPEC_CTRL:
2564 if (e1000_phy_reset(&adapter->hw))
2573 return E1000_SUCCESS;
2577 * e1000_rx_checksum - Receive Checksum Offload for 82543
2578 * @adapter: board private structure
2579 * @rx_desc: receive descriptor
2580 * @sk_buff: socket buffer with received data
2584 e1000_rx_checksum(struct e1000_adapter *adapter,
2585 struct e1000_rx_desc *rx_desc,
2586 struct sk_buff *skb)
2588 /* 82543 or newer only */
2589 if((adapter->hw.mac_type < e1000_82543) ||
2590 /* Ignore Checksum bit is set */
2591 (rx_desc->status & E1000_RXD_STAT_IXSM) ||
2592 /* TCP Checksum has not been calculated */
2593 (!(rx_desc->status & E1000_RXD_STAT_TCPCS))) {
2594 skb->ip_summed = CHECKSUM_NONE;
2598 /* At this point we know the hardware did the TCP checksum */
2599 /* now look at the TCP checksum error bit */
2600 if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
2601 /* let the stack verify checksum errors */
2602 skb->ip_summed = CHECKSUM_NONE;
2603 adapter->hw_csum_err++;
2605 /* TCP checksum is good */
2606 skb->ip_summed = CHECKSUM_UNNECESSARY;
2607 adapter->hw_csum_good++;
2612 e1000_pci_set_mwi(struct e1000_hw *hw)
2614 struct e1000_adapter *adapter = hw->back;
2616 pci_set_mwi(adapter->pdev);
2620 e1000_pci_clear_mwi(struct e1000_hw *hw)
2622 struct e1000_adapter *adapter = hw->back;
2624 pci_clear_mwi(adapter->pdev);
2628 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2630 struct e1000_adapter *adapter = hw->back;
2632 pci_read_config_word(adapter->pdev, reg, value);
2636 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2638 struct e1000_adapter *adapter = hw->back;
2640 pci_write_config_word(adapter->pdev, reg, *value);
2644 e1000_io_read(struct e1000_hw *hw, unsigned long port)
2650 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
2656 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2658 struct e1000_adapter *adapter = netdev->priv;
2659 uint32_t ctrl, rctl;
2661 e1000_irq_disable(adapter);
2662 adapter->vlgrp = grp;
2665 /* enable VLAN tag insert/strip */
2667 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2668 ctrl |= E1000_CTRL_VME;
2669 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2671 /* enable VLAN receive filtering */
2673 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2674 rctl |= E1000_RCTL_VFE;
2675 rctl &= ~E1000_RCTL_CFIEN;
2676 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2678 /* disable VLAN tag insert/strip */
2680 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2681 ctrl &= ~E1000_CTRL_VME;
2682 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2684 /* disable VLAN filtering */
2686 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2687 rctl &= ~E1000_RCTL_VFE;
2688 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2691 e1000_irq_enable(adapter);
2695 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2697 struct e1000_adapter *adapter = netdev->priv;
2698 uint32_t vfta, index;
2700 /* add VID to filter table */
2702 index = (vid >> 5) & 0x7F;
2703 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2704 vfta |= (1 << (vid & 0x1F));
2705 e1000_write_vfta(&adapter->hw, index, vfta);
2709 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2711 struct e1000_adapter *adapter = netdev->priv;
2712 uint32_t vfta, index;
2714 e1000_irq_disable(adapter);
2717 adapter->vlgrp->vlan_devices[vid] = NULL;
2719 e1000_irq_enable(adapter);
2721 /* remove VID from filter table*/
2723 index = (vid >> 5) & 0x7F;
2724 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2725 vfta &= ~(1 << (vid & 0x1F));
2726 e1000_write_vfta(&adapter->hw, index, vfta);
2730 e1000_restore_vlan(struct e1000_adapter *adapter)
2732 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2734 if(adapter->vlgrp) {
2736 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2737 if(!adapter->vlgrp->vlan_devices[vid])
2739 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2745 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
2747 adapter->hw.autoneg = 0;
2750 case SPEED_10 + DUPLEX_HALF:
2751 adapter->hw.forced_speed_duplex = e1000_10_half;
2753 case SPEED_10 + DUPLEX_FULL:
2754 adapter->hw.forced_speed_duplex = e1000_10_full;
2756 case SPEED_100 + DUPLEX_HALF:
2757 adapter->hw.forced_speed_duplex = e1000_100_half;
2759 case SPEED_100 + DUPLEX_FULL:
2760 adapter->hw.forced_speed_duplex = e1000_100_full;
2762 case SPEED_1000 + DUPLEX_FULL:
2763 adapter->hw.autoneg = 1;
2764 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
2766 case SPEED_1000 + DUPLEX_HALF: /* not supported */
2774 e1000_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2776 struct pci_dev *pdev = NULL;
2782 while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
2783 if(pci_dev_driver(pdev) == &e1000_driver)
2784 e1000_suspend(pdev, 3);
2791 e1000_suspend(struct pci_dev *pdev, uint32_t state)
2793 struct net_device *netdev = pci_get_drvdata(pdev);
2794 struct e1000_adapter *adapter = netdev->priv;
2795 uint32_t ctrl, ctrl_ext, rctl, manc, status;
2796 uint32_t wufc = adapter->wol;
2798 netif_device_detach(netdev);
2800 if(netif_running(netdev))
2801 e1000_down(adapter);
2803 status = E1000_READ_REG(&adapter->hw, STATUS);
2804 if(status & E1000_STATUS_LU)
2805 wufc &= ~E1000_WUFC_LNKC;
2808 e1000_setup_rctl(adapter);
2809 e1000_set_multi(netdev);
2811 /* turn on all-multi mode if wake on multicast is enabled */
2812 if(adapter->wol & E1000_WUFC_MC) {
2813 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2814 rctl |= E1000_RCTL_MPE;
2815 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2818 if(adapter->hw.mac_type >= e1000_82540) {
2819 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2820 /* advertise wake from D3Cold */
2821 #define E1000_CTRL_ADVD3WUC 0x00100000
2822 /* phy power management enable */
2823 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
2824 ctrl |= E1000_CTRL_ADVD3WUC |
2825 E1000_CTRL_EN_PHY_PWR_MGMT;
2826 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2829 if(adapter->hw.media_type == e1000_media_type_fiber ||
2830 adapter->hw.media_type == e1000_media_type_internal_serdes) {
2831 /* keep the laser running in D3 */
2832 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
2833 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
2834 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
2837 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
2838 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
2839 pci_enable_wake(pdev, 3, 1);
2840 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2842 E1000_WRITE_REG(&adapter->hw, WUC, 0);
2843 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
2844 pci_enable_wake(pdev, 3, 0);
2845 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2848 pci_save_state(pdev, adapter->pci_state);
2850 if(adapter->hw.mac_type >= e1000_82540 &&
2851 adapter->hw.media_type == e1000_media_type_copper) {
2852 manc = E1000_READ_REG(&adapter->hw, MANC);
2853 if(manc & E1000_MANC_SMBUS_EN) {
2854 manc |= E1000_MANC_ARP_EN;
2855 E1000_WRITE_REG(&adapter->hw, MANC, manc);
2856 pci_enable_wake(pdev, 3, 1);
2857 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2861 state = (state > 0) ? 3 : 0;
2862 pci_set_power_state(pdev, state);
2869 e1000_resume(struct pci_dev *pdev)
2871 struct net_device *netdev = pci_get_drvdata(pdev);
2872 struct e1000_adapter *adapter = netdev->priv;
2875 pci_set_power_state(pdev, 0);
2876 pci_restore_state(pdev, adapter->pci_state);
2878 pci_enable_wake(pdev, 3, 0);
2879 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2881 e1000_reset(adapter);
2882 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
2884 if(netif_running(netdev))
2887 netif_device_attach(netdev);
2889 if(adapter->hw.mac_type >= e1000_82540 &&
2890 adapter->hw.media_type == e1000_media_type_copper) {
2891 manc = E1000_READ_REG(&adapter->hw, MANC);
2892 manc &= ~(E1000_MANC_ARP_EN);
2893 E1000_WRITE_REG(&adapter->hw, MANC, manc);
2900 #ifdef CONFIG_NET_POLL_CONTROLLER
2902 * Polling 'interrupt' - used by things like netconsole to send skbs
2903 * without having to re-enable interrupts. It's not called while
2904 * the interrupt routine is executing.
2907 static void e1000_netpoll (struct net_device *dev)
2909 struct e1000_adapter *adapter = dev->priv;
2910 disable_irq(adapter->pdev->irq);
2911 e1000_intr (adapter->pdev->irq, dev, NULL);
2912 enable_irq(adapter->pdev->irq);