1 /*******************************************************************************
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
33 * - kcompat NETIF_MSG for older kernels (2.4.9) <sean.p.mcdermott@intel.com>
34 * - if_mii support and associated kcompat for older kernels
35 * - More errlogging support from Jon Mason <jonmason@us.ibm.com>
36 * - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com>
39 * - Enabling NETIF_F_SG without checksum offload is illegal -
40 John Mason <jdmason@us.ibm.com>
42 * - Remove redundant initialization - Jamal Hadi
43 * - Reset buffer_info->dma in tx resource cleanup logic
45 * - Avoid filling tx_ring completely - shemminger@osdl.org
46 * - Replace schedule_timeout() with msleep()/msleep_interruptible() -
48 * - Sparse cleanup - shemminger@osdl.org
49 * - Fix tx resource cleanup logic
50 * - LLTX support - ak@suse.de and hadi@cyberus.ca
53 char e1000_driver_name[] = "e1000";
54 char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
55 #ifndef CONFIG_E1000_NAPI
58 #define DRIVERNAPI "-NAPI"
60 char e1000_driver_version[] = "5.6.10.1-k2"DRIVERNAPI;
61 char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
63 /* e1000_pci_tbl - PCI Device ID Table
65 * Last entry must be all 0s
68 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
70 static struct pci_device_id e1000_pci_tbl[] = {
71 INTEL_E1000_ETHERNET_DEVICE(0x1000),
72 INTEL_E1000_ETHERNET_DEVICE(0x1001),
73 INTEL_E1000_ETHERNET_DEVICE(0x1004),
74 INTEL_E1000_ETHERNET_DEVICE(0x1008),
75 INTEL_E1000_ETHERNET_DEVICE(0x1009),
76 INTEL_E1000_ETHERNET_DEVICE(0x100C),
77 INTEL_E1000_ETHERNET_DEVICE(0x100D),
78 INTEL_E1000_ETHERNET_DEVICE(0x100E),
79 INTEL_E1000_ETHERNET_DEVICE(0x100F),
80 INTEL_E1000_ETHERNET_DEVICE(0x1010),
81 INTEL_E1000_ETHERNET_DEVICE(0x1011),
82 INTEL_E1000_ETHERNET_DEVICE(0x1012),
83 INTEL_E1000_ETHERNET_DEVICE(0x1013),
84 INTEL_E1000_ETHERNET_DEVICE(0x1015),
85 INTEL_E1000_ETHERNET_DEVICE(0x1016),
86 INTEL_E1000_ETHERNET_DEVICE(0x1017),
87 INTEL_E1000_ETHERNET_DEVICE(0x1018),
88 INTEL_E1000_ETHERNET_DEVICE(0x1019),
89 INTEL_E1000_ETHERNET_DEVICE(0x101D),
90 INTEL_E1000_ETHERNET_DEVICE(0x101E),
91 INTEL_E1000_ETHERNET_DEVICE(0x1026),
92 INTEL_E1000_ETHERNET_DEVICE(0x1027),
93 INTEL_E1000_ETHERNET_DEVICE(0x1028),
94 INTEL_E1000_ETHERNET_DEVICE(0x1075),
95 INTEL_E1000_ETHERNET_DEVICE(0x1076),
96 INTEL_E1000_ETHERNET_DEVICE(0x1077),
97 INTEL_E1000_ETHERNET_DEVICE(0x1078),
98 INTEL_E1000_ETHERNET_DEVICE(0x1079),
99 INTEL_E1000_ETHERNET_DEVICE(0x107A),
100 INTEL_E1000_ETHERNET_DEVICE(0x107B),
101 INTEL_E1000_ETHERNET_DEVICE(0x107C),
102 INTEL_E1000_ETHERNET_DEVICE(0x108A),
103 /* required last entry */
107 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
109 int e1000_up(struct e1000_adapter *adapter);
110 void e1000_down(struct e1000_adapter *adapter);
111 void e1000_reset(struct e1000_adapter *adapter);
112 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
113 int e1000_setup_tx_resources(struct e1000_adapter *adapter);
114 int e1000_setup_rx_resources(struct e1000_adapter *adapter);
115 void e1000_free_tx_resources(struct e1000_adapter *adapter);
116 void e1000_free_rx_resources(struct e1000_adapter *adapter);
117 void e1000_update_stats(struct e1000_adapter *adapter);
119 /* Local Function Prototypes */
121 static int e1000_init_module(void);
122 static void e1000_exit_module(void);
123 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
124 static void __devexit e1000_remove(struct pci_dev *pdev);
125 static int e1000_sw_init(struct e1000_adapter *adapter);
126 static int e1000_open(struct net_device *netdev);
127 static int e1000_close(struct net_device *netdev);
128 static void e1000_configure_tx(struct e1000_adapter *adapter);
129 static void e1000_configure_rx(struct e1000_adapter *adapter);
130 static void e1000_setup_rctl(struct e1000_adapter *adapter);
131 static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
132 static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
133 static void e1000_set_multi(struct net_device *netdev);
134 static void e1000_update_phy_info(unsigned long data);
135 static void e1000_watchdog(unsigned long data);
136 static void e1000_82547_tx_fifo_stall(unsigned long data);
137 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
138 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
139 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
140 static int e1000_set_mac(struct net_device *netdev, void *p);
141 static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
142 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
143 #ifdef CONFIG_E1000_NAPI
144 static int e1000_clean(struct net_device *netdev, int *budget);
145 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
146 int *work_done, int work_to_do);
148 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
150 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
151 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
152 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
154 void e1000_set_ethtool_ops(struct net_device *netdev);
155 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
156 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_tx_timeout(struct net_device *dev);
158 static void e1000_tx_timeout_task(struct net_device *dev);
159 static void e1000_smartspeed(struct e1000_adapter *adapter);
160 static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
161 struct sk_buff *skb);
163 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
164 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
165 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
166 static void e1000_restore_vlan(struct e1000_adapter *adapter);
168 static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
169 static int e1000_suspend(struct pci_dev *pdev, uint32_t state);
171 static int e1000_resume(struct pci_dev *pdev);
174 #ifdef CONFIG_NET_POLL_CONTROLLER
175 /* for netdump / net console */
176 static void e1000_netpoll (struct net_device *netdev);
179 struct notifier_block e1000_notifier_reboot = {
180 .notifier_call = e1000_notify_reboot,
185 /* Exported from other modules */
187 extern void e1000_check_options(struct e1000_adapter *adapter);
189 static struct pci_driver e1000_driver = {
190 .name = e1000_driver_name,
191 .id_table = e1000_pci_tbl,
192 .probe = e1000_probe,
193 .remove = __devexit_p(e1000_remove),
194 /* Power Managment Hooks */
196 .suspend = e1000_suspend,
197 .resume = e1000_resume
201 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
202 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
203 MODULE_LICENSE("GPL");
205 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
206 module_param(debug, int, 0);
207 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
210 * e1000_init_module - Driver Registration Routine
212 * e1000_init_module is the first routine called when the driver is
213 * loaded. All it does is register with the PCI subsystem.
217 e1000_init_module(void)
220 printk(KERN_INFO "%s - version %s\n",
221 e1000_driver_string, e1000_driver_version);
223 printk(KERN_INFO "%s\n", e1000_copyright);
225 ret = pci_module_init(&e1000_driver);
227 register_reboot_notifier(&e1000_notifier_reboot);
232 module_init(e1000_init_module);
235 * e1000_exit_module - Driver Exit Cleanup Routine
237 * e1000_exit_module is called just before the driver is removed
242 e1000_exit_module(void)
244 unregister_reboot_notifier(&e1000_notifier_reboot);
245 pci_unregister_driver(&e1000_driver);
248 module_exit(e1000_exit_module);
251 * e1000_irq_disable - Mask off interrupt generation on the NIC
252 * @adapter: board private structure
256 e1000_irq_disable(struct e1000_adapter *adapter)
258 atomic_inc(&adapter->irq_sem);
259 E1000_WRITE_REG(&adapter->hw, IMC, ~0);
260 E1000_WRITE_FLUSH(&adapter->hw);
261 synchronize_irq(adapter->pdev->irq);
265 * e1000_irq_enable - Enable default interrupt generation settings
266 * @adapter: board private structure
270 e1000_irq_enable(struct e1000_adapter *adapter)
272 if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
273 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
274 E1000_WRITE_FLUSH(&adapter->hw);
279 e1000_up(struct e1000_adapter *adapter)
281 struct net_device *netdev = adapter->netdev;
284 /* hardware has been reset, we need to reload some things */
286 /* Reset the PHY if it was previously powered down */
287 if(adapter->hw.media_type == e1000_media_type_copper) {
289 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
290 if(mii_reg & MII_CR_POWER_DOWN)
291 e1000_phy_reset(&adapter->hw);
294 e1000_set_multi(netdev);
296 e1000_restore_vlan(adapter);
298 e1000_configure_tx(adapter);
299 e1000_setup_rctl(adapter);
300 e1000_configure_rx(adapter);
301 e1000_alloc_rx_buffers(adapter);
303 if((err = request_irq(adapter->pdev->irq, &e1000_intr,
304 SA_SHIRQ | SA_SAMPLE_RANDOM,
305 netdev->name, netdev)))
308 mod_timer(&adapter->watchdog_timer, jiffies);
309 e1000_irq_enable(adapter);
315 e1000_down(struct e1000_adapter *adapter)
317 struct net_device *netdev = adapter->netdev;
319 e1000_irq_disable(adapter);
320 free_irq(adapter->pdev->irq, netdev);
321 del_timer_sync(&adapter->tx_fifo_stall_timer);
322 del_timer_sync(&adapter->watchdog_timer);
323 del_timer_sync(&adapter->phy_info_timer);
324 adapter->link_speed = 0;
325 adapter->link_duplex = 0;
326 netif_carrier_off(netdev);
327 netif_stop_queue(netdev);
329 e1000_reset(adapter);
330 e1000_clean_tx_ring(adapter);
331 e1000_clean_rx_ring(adapter);
333 /* If WoL is not enabled
334 * Power down the PHY so no link is implied when interface is down */
335 if(!adapter->wol && adapter->hw.media_type == e1000_media_type_copper) {
337 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
338 mii_reg |= MII_CR_POWER_DOWN;
339 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
344 e1000_reset(struct e1000_adapter *adapter)
348 /* Repartition Pba for greater than 9k mtu
349 * To take effect CTRL.RST is required.
352 if(adapter->hw.mac_type < e1000_82547) {
353 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
358 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
362 adapter->tx_fifo_head = 0;
363 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
364 adapter->tx_fifo_size =
365 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
366 atomic_set(&adapter->tx_fifo_stall, 0);
368 E1000_WRITE_REG(&adapter->hw, PBA, pba);
370 /* flow control settings */
371 adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
373 adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
375 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
376 adapter->hw.fc_send_xon = 1;
377 adapter->hw.fc = adapter->hw.original_fc;
379 e1000_reset_hw(&adapter->hw);
380 if(adapter->hw.mac_type >= e1000_82544)
381 E1000_WRITE_REG(&adapter->hw, WUC, 0);
382 if(e1000_init_hw(&adapter->hw))
383 DPRINTK(PROBE, ERR, "Hardware Error\n");
385 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
386 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
388 e1000_reset_adaptive(&adapter->hw);
389 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
393 * e1000_probe - Device Initialization Routine
394 * @pdev: PCI device information struct
395 * @ent: entry in e1000_pci_tbl
397 * Returns 0 on success, negative on failure
399 * e1000_probe initializes an adapter identified by a pci_dev structure.
400 * The OS initialization, configuring of the adapter private structure,
401 * and a hardware reset occur.
405 e1000_probe(struct pci_dev *pdev,
406 const struct pci_device_id *ent)
408 struct net_device *netdev;
409 struct e1000_adapter *adapter;
410 static int cards_found = 0;
411 unsigned long mmio_start;
416 uint16_t eeprom_data;
418 if((err = pci_enable_device(pdev)))
421 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
424 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
425 E1000_ERR("No usable DMA configuration, aborting\n");
431 if((err = pci_request_regions(pdev, e1000_driver_name)))
434 pci_set_master(pdev);
436 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
439 goto err_alloc_etherdev;
442 SET_MODULE_OWNER(netdev);
443 SET_NETDEV_DEV(netdev, &pdev->dev);
445 pci_set_drvdata(pdev, netdev);
446 adapter = netdev->priv;
447 adapter->netdev = netdev;
448 adapter->pdev = pdev;
449 adapter->hw.back = adapter;
450 adapter->msg_enable = (1 << debug) - 1;
452 mmio_start = pci_resource_start(pdev, BAR_0);
453 mmio_len = pci_resource_len(pdev, BAR_0);
455 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
456 if(!adapter->hw.hw_addr) {
461 for(i = BAR_1; i <= BAR_5; i++) {
462 if(pci_resource_len(pdev, i) == 0)
464 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
465 adapter->hw.io_base = pci_resource_start(pdev, i);
470 netdev->open = &e1000_open;
471 netdev->stop = &e1000_close;
472 netdev->hard_start_xmit = &e1000_xmit_frame;
473 netdev->get_stats = &e1000_get_stats;
474 netdev->set_multicast_list = &e1000_set_multi;
475 netdev->set_mac_address = &e1000_set_mac;
476 netdev->change_mtu = &e1000_change_mtu;
477 netdev->do_ioctl = &e1000_ioctl;
478 e1000_set_ethtool_ops(netdev);
479 netdev->tx_timeout = &e1000_tx_timeout;
480 netdev->watchdog_timeo = 5 * HZ;
481 #ifdef CONFIG_E1000_NAPI
482 netdev->poll = &e1000_clean;
485 netdev->vlan_rx_register = e1000_vlan_rx_register;
486 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
487 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
488 #ifdef CONFIG_NET_POLL_CONTROLLER
489 netdev->poll_controller = e1000_netpoll;
491 strcpy(netdev->name, pci_name(pdev));
493 netdev->mem_start = mmio_start;
494 netdev->mem_end = mmio_start + mmio_len;
495 netdev->base_addr = adapter->hw.io_base;
497 adapter->bd_number = cards_found;
499 /* setup the private structure */
501 if((err = e1000_sw_init(adapter)))
504 if(adapter->hw.mac_type >= e1000_82543) {
505 netdev->features = NETIF_F_SG |
509 NETIF_F_HW_VLAN_FILTER;
513 /* Disbaled for now until root-cause is found for
514 * hangs reported against non-IA archs. TSO can be
515 * enabled using ethtool -K eth<x> tso on */
516 if((adapter->hw.mac_type >= e1000_82544) &&
517 (adapter->hw.mac_type != e1000_82547))
518 netdev->features |= NETIF_F_TSO;
521 netdev->features |= NETIF_F_HIGHDMA;
523 /* hard_start_xmit is safe against parallel locking */
524 netdev->features |= NETIF_F_LLTX;
526 /* before reading the EEPROM, reset the controller to
527 * put the device in a known good starting state */
529 e1000_reset_hw(&adapter->hw);
531 /* make sure the EEPROM is good */
533 if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
534 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
539 /* copy the MAC address out of the EEPROM */
541 if (e1000_read_mac_addr(&adapter->hw))
542 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
543 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
545 if(!is_valid_ether_addr(netdev->dev_addr)) {
546 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
551 e1000_read_part_num(&adapter->hw, &(adapter->part_num));
553 e1000_get_bus_info(&adapter->hw);
555 init_timer(&adapter->tx_fifo_stall_timer);
556 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
557 adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
559 init_timer(&adapter->watchdog_timer);
560 adapter->watchdog_timer.function = &e1000_watchdog;
561 adapter->watchdog_timer.data = (unsigned long) adapter;
563 init_timer(&adapter->phy_info_timer);
564 adapter->phy_info_timer.function = &e1000_update_phy_info;
565 adapter->phy_info_timer.data = (unsigned long) adapter;
567 INIT_WORK(&adapter->tx_timeout_task,
568 (void (*)(void *))e1000_tx_timeout_task, netdev);
570 /* we're going to reset, so assume we have no link for now */
572 netif_carrier_off(netdev);
573 netif_stop_queue(netdev);
575 e1000_check_options(adapter);
577 /* Initial Wake on LAN setting
578 * If APM wake is enabled in the EEPROM,
579 * enable the ACPI Magic Packet filter
582 switch(adapter->hw.mac_type) {
583 case e1000_82542_rev2_0:
584 case e1000_82542_rev2_1:
588 case e1000_82546_rev_3:
589 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
590 && (adapter->hw.media_type == e1000_media_type_copper)) {
591 e1000_read_eeprom(&adapter->hw,
592 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
597 e1000_read_eeprom(&adapter->hw,
598 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
601 if(eeprom_data & E1000_EEPROM_APME)
602 adapter->wol |= E1000_WUFC_MAG;
604 /* reset the hardware with the new settings */
605 e1000_reset(adapter);
607 strcpy(netdev->name, "eth%d");
608 if((err = register_netdev(netdev)))
611 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
619 iounmap(adapter->hw.hw_addr);
623 pci_release_regions(pdev);
628 * e1000_remove - Device Removal Routine
629 * @pdev: PCI device information struct
631 * e1000_remove is called by the PCI subsystem to alert the driver
632 * that it should release a PCI device. The could be caused by a
633 * Hot-Plug event, or because the driver is going to be removed from
637 static void __devexit
638 e1000_remove(struct pci_dev *pdev)
640 struct net_device *netdev = pci_get_drvdata(pdev);
641 struct e1000_adapter *adapter = netdev->priv;
644 if(adapter->hw.mac_type >= e1000_82540 &&
645 adapter->hw.media_type == e1000_media_type_copper) {
646 manc = E1000_READ_REG(&adapter->hw, MANC);
647 if(manc & E1000_MANC_SMBUS_EN) {
648 manc |= E1000_MANC_ARP_EN;
649 E1000_WRITE_REG(&adapter->hw, MANC, manc);
653 unregister_netdev(netdev);
655 e1000_phy_hw_reset(&adapter->hw);
657 iounmap(adapter->hw.hw_addr);
658 pci_release_regions(pdev);
662 pci_disable_device(pdev);
666 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
667 * @adapter: board private structure to initialize
669 * e1000_sw_init initializes the Adapter private data structure.
670 * Fields are initialized based on PCI device information and
671 * OS network device settings (MTU size).
675 e1000_sw_init(struct e1000_adapter *adapter)
677 struct e1000_hw *hw = &adapter->hw;
678 struct net_device *netdev = adapter->netdev;
679 struct pci_dev *pdev = adapter->pdev;
681 /* PCI config space info */
683 hw->vendor_id = pdev->vendor;
684 hw->device_id = pdev->device;
685 hw->subsystem_vendor_id = pdev->subsystem_vendor;
686 hw->subsystem_id = pdev->subsystem_device;
688 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
690 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
692 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
693 hw->max_frame_size = netdev->mtu +
694 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
695 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
697 /* identify the MAC */
699 if(e1000_set_mac_type(hw)) {
700 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
704 /* initialize eeprom parameters */
706 e1000_init_eeprom_params(hw);
708 switch(hw->mac_type) {
713 case e1000_82541_rev_2:
714 case e1000_82547_rev_2:
715 hw->phy_init_script = 1;
719 e1000_set_media_type(hw);
721 hw->wait_autoneg_complete = FALSE;
722 hw->tbi_compatibility_en = TRUE;
723 hw->adaptive_ifs = TRUE;
727 if(hw->media_type == e1000_media_type_copper) {
728 hw->mdix = AUTO_ALL_MODES;
729 hw->disable_polarity_correction = FALSE;
730 hw->master_slave = E1000_MASTER_SLAVE;
733 atomic_set(&adapter->irq_sem, 1);
734 spin_lock_init(&adapter->stats_lock);
735 spin_lock_init(&adapter->tx_lock);
741 * e1000_open - Called when a network interface is made active
742 * @netdev: network interface device structure
744 * Returns 0 on success, negative value on failure
746 * The open entry point is called when a network interface is made
747 * active by the system (IFF_UP). At this point all resources needed
748 * for transmit and receive operations are allocated, the interrupt
749 * handler is registered with the OS, the watchdog timer is started,
750 * and the stack is notified that the interface is ready.
754 e1000_open(struct net_device *netdev)
756 struct e1000_adapter *adapter = netdev->priv;
759 /* allocate transmit descriptors */
761 if((err = e1000_setup_tx_resources(adapter)))
764 /* allocate receive descriptors */
766 if((err = e1000_setup_rx_resources(adapter)))
769 if((err = e1000_up(adapter)))
772 return E1000_SUCCESS;
775 e1000_free_rx_resources(adapter);
777 e1000_free_tx_resources(adapter);
779 e1000_reset(adapter);
785 * e1000_close - Disables a network interface
786 * @netdev: network interface device structure
788 * Returns 0, this is not allowed to fail
790 * The close entry point is called when an interface is de-activated
791 * by the OS. The hardware is still under the drivers control, but
792 * needs to be disabled. A global MAC reset is issued to stop the
793 * hardware, and all transmit and receive resources are freed.
797 e1000_close(struct net_device *netdev)
799 struct e1000_adapter *adapter = netdev->priv;
803 e1000_free_tx_resources(adapter);
804 e1000_free_rx_resources(adapter);
810 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
811 * @adapter: board private structure
813 * Return 0 on success, negative on failure
817 e1000_setup_tx_resources(struct e1000_adapter *adapter)
819 struct e1000_desc_ring *txdr = &adapter->tx_ring;
820 struct pci_dev *pdev = adapter->pdev;
823 size = sizeof(struct e1000_buffer) * txdr->count;
824 txdr->buffer_info = vmalloc(size);
825 if(!txdr->buffer_info) {
827 "Unble to Allocate Memory for the Transmit descriptor ring\n");
830 memset(txdr->buffer_info, 0, size);
832 /* round up to nearest 4K */
834 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
835 E1000_ROUNDUP(txdr->size, 4096);
837 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
840 "Unble to Allocate Memory for the Transmit descriptor ring\n");
841 vfree(txdr->buffer_info);
844 memset(txdr->desc, 0, txdr->size);
846 txdr->next_to_use = 0;
847 txdr->next_to_clean = 0;
853 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
854 * @adapter: board private structure
856 * Configure the Tx unit of the MAC after a reset.
860 e1000_configure_tx(struct e1000_adapter *adapter)
862 uint64_t tdba = adapter->tx_ring.dma;
863 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
866 E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
867 E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
869 E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
871 /* Setup the HW Tx Head and Tail descriptor pointers */
873 E1000_WRITE_REG(&adapter->hw, TDH, 0);
874 E1000_WRITE_REG(&adapter->hw, TDT, 0);
876 /* Set the default values for the Tx Inter Packet Gap timer */
878 switch (adapter->hw.mac_type) {
879 case e1000_82542_rev2_0:
880 case e1000_82542_rev2_1:
881 tipg = DEFAULT_82542_TIPG_IPGT;
882 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
883 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
886 if(adapter->hw.media_type == e1000_media_type_fiber ||
887 adapter->hw.media_type == e1000_media_type_internal_serdes)
888 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
890 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
891 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
892 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
894 E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
896 /* Set the Tx Interrupt Delay register */
898 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
899 if(adapter->hw.mac_type >= e1000_82540)
900 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
902 /* Program the Transmit Control Register */
904 tctl = E1000_READ_REG(&adapter->hw, TCTL);
906 tctl &= ~E1000_TCTL_CT;
907 tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
908 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
910 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
912 e1000_config_collision_dist(&adapter->hw);
914 /* Setup Transmit Descriptor Settings for eop descriptor */
915 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
918 if(adapter->hw.mac_type < e1000_82543)
919 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
921 adapter->txd_cmd |= E1000_TXD_CMD_RS;
923 /* Cache if we're 82544 running in PCI-X because we'll
924 * need this to apply a workaround later in the send path. */
925 if(adapter->hw.mac_type == e1000_82544 &&
926 adapter->hw.bus_type == e1000_bus_type_pcix)
927 adapter->pcix_82544 = 1;
931 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
932 * @adapter: board private structure
934 * Returns 0 on success, negative on failure
938 e1000_setup_rx_resources(struct e1000_adapter *adapter)
940 struct e1000_desc_ring *rxdr = &adapter->rx_ring;
941 struct pci_dev *pdev = adapter->pdev;
944 size = sizeof(struct e1000_buffer) * rxdr->count;
945 rxdr->buffer_info = vmalloc(size);
946 if(!rxdr->buffer_info) {
948 "Unble to Allocate Memory for the Recieve descriptor ring\n");
951 memset(rxdr->buffer_info, 0, size);
953 /* Round up to nearest 4K */
955 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
956 E1000_ROUNDUP(rxdr->size, 4096);
958 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
962 "Unble to Allocate Memory for the Recieve descriptor ring\n");
963 vfree(rxdr->buffer_info);
966 memset(rxdr->desc, 0, rxdr->size);
968 rxdr->next_to_clean = 0;
969 rxdr->next_to_use = 0;
975 * e1000_setup_rctl - configure the receive control register
976 * @adapter: Board private structure
980 e1000_setup_rctl(struct e1000_adapter *adapter)
984 rctl = E1000_READ_REG(&adapter->hw, RCTL);
986 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
988 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
989 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
990 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
992 if(adapter->hw.tbi_compatibility_on == 1)
993 rctl |= E1000_RCTL_SBP;
995 rctl &= ~E1000_RCTL_SBP;
997 /* Setup buffer sizes */
998 rctl &= ~(E1000_RCTL_SZ_4096);
999 rctl |= (E1000_RCTL_BSEX | E1000_RCTL_LPE);
1000 switch (adapter->rx_buffer_len) {
1001 case E1000_RXBUFFER_2048:
1003 rctl |= E1000_RCTL_SZ_2048;
1004 rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE);
1006 case E1000_RXBUFFER_4096:
1007 rctl |= E1000_RCTL_SZ_4096;
1009 case E1000_RXBUFFER_8192:
1010 rctl |= E1000_RCTL_SZ_8192;
1012 case E1000_RXBUFFER_16384:
1013 rctl |= E1000_RCTL_SZ_16384;
1017 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1021 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1022 * @adapter: board private structure
1024 * Configure the Rx unit of the MAC after a reset.
1028 e1000_configure_rx(struct e1000_adapter *adapter)
1030 uint64_t rdba = adapter->rx_ring.dma;
1031 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
1035 /* disable receives while setting up the descriptors */
1036 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1037 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
1039 /* set the Receive Delay Timer Register */
1040 E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
1042 if(adapter->hw.mac_type >= e1000_82540) {
1043 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
1044 if(adapter->itr > 1)
1045 E1000_WRITE_REG(&adapter->hw, ITR,
1046 1000000000 / (adapter->itr * 256));
1049 /* Setup the Base and Length of the Rx Descriptor Ring */
1050 E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1051 E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1053 E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
1055 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1056 E1000_WRITE_REG(&adapter->hw, RDH, 0);
1057 E1000_WRITE_REG(&adapter->hw, RDT, 0);
1059 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1060 if((adapter->hw.mac_type >= e1000_82543) &&
1061 (adapter->rx_csum == TRUE)) {
1062 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
1063 rxcsum |= E1000_RXCSUM_TUOFL;
1064 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
1067 /* Enable Receives */
1068 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1072 * e1000_free_tx_resources - Free Tx Resources
1073 * @adapter: board private structure
1075 * Free all transmit software resources
1079 e1000_free_tx_resources(struct e1000_adapter *adapter)
1081 struct pci_dev *pdev = adapter->pdev;
1083 e1000_clean_tx_ring(adapter);
1085 vfree(adapter->tx_ring.buffer_info);
1086 adapter->tx_ring.buffer_info = NULL;
1088 pci_free_consistent(pdev, adapter->tx_ring.size,
1089 adapter->tx_ring.desc, adapter->tx_ring.dma);
1091 adapter->tx_ring.desc = NULL;
1095 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1096 struct e1000_buffer *buffer_info)
1098 struct pci_dev *pdev = adapter->pdev;
1099 if(buffer_info->dma) {
1100 pci_unmap_page(pdev,
1102 buffer_info->length,
1104 buffer_info->dma = 0;
1106 if(buffer_info->skb) {
1107 dev_kfree_skb_any(buffer_info->skb);
1108 buffer_info->skb = NULL;
1113 * e1000_clean_tx_ring - Free Tx Buffers
1114 * @adapter: board private structure
1118 e1000_clean_tx_ring(struct e1000_adapter *adapter)
1120 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1121 struct e1000_buffer *buffer_info;
1125 /* Free all the Tx ring sk_buffs */
1127 for(i = 0; i < tx_ring->count; i++) {
1128 buffer_info = &tx_ring->buffer_info[i];
1129 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1132 size = sizeof(struct e1000_buffer) * tx_ring->count;
1133 memset(tx_ring->buffer_info, 0, size);
1135 /* Zero out the descriptor ring */
1137 memset(tx_ring->desc, 0, tx_ring->size);
1139 tx_ring->next_to_use = 0;
1140 tx_ring->next_to_clean = 0;
1142 E1000_WRITE_REG(&adapter->hw, TDH, 0);
1143 E1000_WRITE_REG(&adapter->hw, TDT, 0);
1147 * e1000_free_rx_resources - Free Rx Resources
1148 * @adapter: board private structure
1150 * Free all receive software resources
1154 e1000_free_rx_resources(struct e1000_adapter *adapter)
1156 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1157 struct pci_dev *pdev = adapter->pdev;
1159 e1000_clean_rx_ring(adapter);
1161 vfree(rx_ring->buffer_info);
1162 rx_ring->buffer_info = NULL;
1164 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1166 rx_ring->desc = NULL;
1170 * e1000_clean_rx_ring - Free Rx Buffers
1171 * @adapter: board private structure
1175 e1000_clean_rx_ring(struct e1000_adapter *adapter)
1177 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1178 struct e1000_buffer *buffer_info;
1179 struct pci_dev *pdev = adapter->pdev;
1183 /* Free all the Rx ring sk_buffs */
1185 for(i = 0; i < rx_ring->count; i++) {
1186 buffer_info = &rx_ring->buffer_info[i];
1187 if(buffer_info->skb) {
1189 pci_unmap_single(pdev,
1191 buffer_info->length,
1192 PCI_DMA_FROMDEVICE);
1194 dev_kfree_skb(buffer_info->skb);
1195 buffer_info->skb = NULL;
1199 size = sizeof(struct e1000_buffer) * rx_ring->count;
1200 memset(rx_ring->buffer_info, 0, size);
1202 /* Zero out the descriptor ring */
1204 memset(rx_ring->desc, 0, rx_ring->size);
1206 rx_ring->next_to_clean = 0;
1207 rx_ring->next_to_use = 0;
1209 E1000_WRITE_REG(&adapter->hw, RDH, 0);
1210 E1000_WRITE_REG(&adapter->hw, RDT, 0);
1213 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
1214 * and memory write and invalidate disabled for certain operations
1217 e1000_enter_82542_rst(struct e1000_adapter *adapter)
1219 struct net_device *netdev = adapter->netdev;
1222 e1000_pci_clear_mwi(&adapter->hw);
1224 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1225 rctl |= E1000_RCTL_RST;
1226 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1227 E1000_WRITE_FLUSH(&adapter->hw);
1230 if(netif_running(netdev))
1231 e1000_clean_rx_ring(adapter);
1235 e1000_leave_82542_rst(struct e1000_adapter *adapter)
1237 struct net_device *netdev = adapter->netdev;
1240 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1241 rctl &= ~E1000_RCTL_RST;
1242 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1243 E1000_WRITE_FLUSH(&adapter->hw);
1246 if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
1247 e1000_pci_set_mwi(&adapter->hw);
1249 if(netif_running(netdev)) {
1250 e1000_configure_rx(adapter);
1251 e1000_alloc_rx_buffers(adapter);
1256 * e1000_set_mac - Change the Ethernet Address of the NIC
1257 * @netdev: network interface device structure
1258 * @p: pointer to an address structure
1260 * Returns 0 on success, negative on failure
1264 e1000_set_mac(struct net_device *netdev, void *p)
1266 struct e1000_adapter *adapter = netdev->priv;
1267 struct sockaddr *addr = p;
1269 if(!is_valid_ether_addr(addr->sa_data))
1270 return -EADDRNOTAVAIL;
1272 /* 82542 2.0 needs to be in reset to write receive address registers */
1274 if(adapter->hw.mac_type == e1000_82542_rev2_0)
1275 e1000_enter_82542_rst(adapter);
1277 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1278 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
1280 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1282 if(adapter->hw.mac_type == e1000_82542_rev2_0)
1283 e1000_leave_82542_rst(adapter);
1289 * e1000_set_multi - Multicast and Promiscuous mode set
1290 * @netdev: network interface device structure
1292 * The set_multi entry point is called whenever the multicast address
1293 * list or the network interface flags are updated. This routine is
1294 * responsible for configuring the hardware for proper multicast,
1295 * promiscuous mode, and all-multi behavior.
1299 e1000_set_multi(struct net_device *netdev)
1301 struct e1000_adapter *adapter = netdev->priv;
1302 struct e1000_hw *hw = &adapter->hw;
1303 struct dev_mc_list *mc_ptr;
1305 uint32_t hash_value;
1307 unsigned long flags;
1309 /* Check for Promiscuous and All Multicast modes */
1311 spin_lock_irqsave(&adapter->tx_lock, flags);
1313 rctl = E1000_READ_REG(hw, RCTL);
1315 if(netdev->flags & IFF_PROMISC) {
1316 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1317 } else if(netdev->flags & IFF_ALLMULTI) {
1318 rctl |= E1000_RCTL_MPE;
1319 rctl &= ~E1000_RCTL_UPE;
1321 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
1324 E1000_WRITE_REG(hw, RCTL, rctl);
1326 /* 82542 2.0 needs to be in reset to write receive address registers */
1328 if(hw->mac_type == e1000_82542_rev2_0)
1329 e1000_enter_82542_rst(adapter);
1331 /* load the first 14 multicast address into the exact filters 1-14
1332 * RAR 0 is used for the station MAC adddress
1333 * if there are not 14 addresses, go ahead and clear the filters
1335 mc_ptr = netdev->mc_list;
1337 for(i = 1; i < E1000_RAR_ENTRIES; i++) {
1339 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
1340 mc_ptr = mc_ptr->next;
1342 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
1343 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
1347 /* clear the old settings from the multicast hash table */
1349 for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
1350 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
1352 /* load any remaining addresses into the hash table */
1354 for(; mc_ptr; mc_ptr = mc_ptr->next) {
1355 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
1356 e1000_mta_set(hw, hash_value);
1359 if(hw->mac_type == e1000_82542_rev2_0)
1360 e1000_leave_82542_rst(adapter);
1362 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1365 /* Need to wait a few seconds after link up to get diagnostic information from
1369 e1000_update_phy_info(unsigned long data)
1371 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1372 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
1376 * e1000_82547_tx_fifo_stall - Timer Call-back
1377 * @data: pointer to adapter cast into an unsigned long
1381 e1000_82547_tx_fifo_stall(unsigned long data)
1383 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1384 struct net_device *netdev = adapter->netdev;
1387 if(atomic_read(&adapter->tx_fifo_stall)) {
1388 if((E1000_READ_REG(&adapter->hw, TDT) ==
1389 E1000_READ_REG(&adapter->hw, TDH)) &&
1390 (E1000_READ_REG(&adapter->hw, TDFT) ==
1391 E1000_READ_REG(&adapter->hw, TDFH)) &&
1392 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1393 E1000_READ_REG(&adapter->hw, TDFHS))) {
1394 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1395 E1000_WRITE_REG(&adapter->hw, TCTL,
1396 tctl & ~E1000_TCTL_EN);
1397 E1000_WRITE_REG(&adapter->hw, TDFT,
1398 adapter->tx_head_addr);
1399 E1000_WRITE_REG(&adapter->hw, TDFH,
1400 adapter->tx_head_addr);
1401 E1000_WRITE_REG(&adapter->hw, TDFTS,
1402 adapter->tx_head_addr);
1403 E1000_WRITE_REG(&adapter->hw, TDFHS,
1404 adapter->tx_head_addr);
1405 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1406 E1000_WRITE_FLUSH(&adapter->hw);
1408 adapter->tx_fifo_head = 0;
1409 atomic_set(&adapter->tx_fifo_stall, 0);
1410 netif_wake_queue(netdev);
1412 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
1418 * e1000_watchdog - Timer Call-back
1419 * @data: pointer to netdev cast into an unsigned long
1423 e1000_watchdog(unsigned long data)
1425 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1426 struct net_device *netdev = adapter->netdev;
1427 struct e1000_desc_ring *txdr = &adapter->tx_ring;
1431 e1000_check_for_link(&adapter->hw);
1433 if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
1434 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
1435 link = !adapter->hw.serdes_link_down;
1437 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
1440 if(!netif_carrier_ok(netdev)) {
1441 e1000_get_speed_and_duplex(&adapter->hw,
1442 &adapter->link_speed,
1443 &adapter->link_duplex);
1445 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
1446 adapter->link_speed,
1447 adapter->link_duplex == FULL_DUPLEX ?
1448 "Full Duplex" : "Half Duplex");
1450 netif_carrier_on(netdev);
1451 netif_wake_queue(netdev);
1452 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1453 adapter->smartspeed = 0;
1456 if(netif_carrier_ok(netdev)) {
1457 adapter->link_speed = 0;
1458 adapter->link_duplex = 0;
1459 DPRINTK(LINK, INFO, "NIC Link is Down\n");
1460 netif_carrier_off(netdev);
1461 netif_stop_queue(netdev);
1462 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1465 e1000_smartspeed(adapter);
1468 e1000_update_stats(adapter);
1470 adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
1471 adapter->tpt_old = adapter->stats.tpt;
1472 adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
1473 adapter->colc_old = adapter->stats.colc;
1475 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
1476 adapter->gorcl_old = adapter->stats.gorcl;
1477 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
1478 adapter->gotcl_old = adapter->stats.gotcl;
1480 e1000_update_adaptive(&adapter->hw);
1482 if(!netif_carrier_ok(netdev)) {
1483 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1484 /* We've lost link, so the controller stops DMA,
1485 * but we've got queued Tx work that's never going
1486 * to get done, so reset controller to flush Tx.
1487 * (Do the reset outside of interrupt context). */
1488 schedule_work(&adapter->tx_timeout_task);
1492 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
1493 if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
1494 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
1495 * asymmetrical Tx or Rx gets ITR=8000; everyone
1496 * else is between 2000-8000. */
1497 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
1498 uint32_t dif = (adapter->gotcl > adapter->gorcl ?
1499 adapter->gotcl - adapter->gorcl :
1500 adapter->gorcl - adapter->gotcl) / 10000;
1501 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
1502 E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
1505 /* Cause software interrupt to ensure rx ring is cleaned */
1506 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
1508 /* Early detection of hung controller */
1509 i = txdr->next_to_clean;
1510 if(txdr->buffer_info[i].dma &&
1511 time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
1512 !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF))
1513 netif_stop_queue(netdev);
1515 /* Reset the timer */
1516 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1519 #define E1000_TX_FLAGS_CSUM 0x00000001
1520 #define E1000_TX_FLAGS_VLAN 0x00000002
1521 #define E1000_TX_FLAGS_TSO 0x00000004
1522 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
1523 #define E1000_TX_FLAGS_VLAN_SHIFT 16
1525 static inline boolean_t
1526 e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1529 struct e1000_context_desc *context_desc;
1531 uint32_t cmd_length = 0;
1532 uint16_t ipcse, tucse, mss;
1533 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1535 if(skb_shinfo(skb)->tso_size) {
1536 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1537 mss = skb_shinfo(skb)->tso_size;
1538 skb->nh.iph->tot_len = 0;
1539 skb->nh.iph->check = 0;
1540 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1545 ipcss = skb->nh.raw - skb->data;
1546 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1547 ipcse = skb->h.raw - skb->data - 1;
1548 tucss = skb->h.raw - skb->data;
1549 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1552 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1553 E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
1554 (skb->len - (hdr_len)));
1556 i = adapter->tx_ring.next_to_use;
1557 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1559 context_desc->lower_setup.ip_fields.ipcss = ipcss;
1560 context_desc->lower_setup.ip_fields.ipcso = ipcso;
1561 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
1562 context_desc->upper_setup.tcp_fields.tucss = tucss;
1563 context_desc->upper_setup.tcp_fields.tucso = tucso;
1564 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
1565 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
1566 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1567 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
1569 if(++i == adapter->tx_ring.count) i = 0;
1570 adapter->tx_ring.next_to_use = i;
1579 static inline boolean_t
1580 e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1582 struct e1000_context_desc *context_desc;
1586 if(likely(skb->ip_summed == CHECKSUM_HW)) {
1587 css = skb->h.raw - skb->data;
1589 i = adapter->tx_ring.next_to_use;
1590 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1592 context_desc->upper_setup.tcp_fields.tucss = css;
1593 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
1594 context_desc->upper_setup.tcp_fields.tucse = 0;
1595 context_desc->tcp_seg_setup.data = 0;
1596 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1598 if(unlikely(++i == adapter->tx_ring.count)) i = 0;
1599 adapter->tx_ring.next_to_use = i;
1607 #define E1000_MAX_TXD_PWR 12
1608 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
1611 e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
1612 unsigned int first, unsigned int max_per_txd,
1613 unsigned int nr_frags, unsigned int mss)
1615 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1616 struct e1000_buffer *buffer_info;
1617 unsigned int len = skb->len;
1618 unsigned int offset = 0, size, count = 0, i;
1620 len -= skb->data_len;
1622 i = tx_ring->next_to_use;
1625 buffer_info = &tx_ring->buffer_info[i];
1626 size = min(len, max_per_txd);
1628 /* Workaround for premature desc write-backs
1629 * in TSO mode. Append 4-byte sentinel desc */
1630 if(unlikely(mss && !nr_frags && size == len && size > 8))
1633 /* Workaround for potential 82544 hang in PCI-X. Avoid
1634 * terminating buffers within evenly-aligned dwords. */
1635 if(unlikely(adapter->pcix_82544 &&
1636 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
1640 buffer_info->length = size;
1642 pci_map_single(adapter->pdev,
1646 buffer_info->time_stamp = jiffies;
1651 if(unlikely(++i == tx_ring->count)) i = 0;
1654 for(f = 0; f < nr_frags; f++) {
1655 struct skb_frag_struct *frag;
1657 frag = &skb_shinfo(skb)->frags[f];
1659 offset = frag->page_offset;
1662 buffer_info = &tx_ring->buffer_info[i];
1663 size = min(len, max_per_txd);
1665 /* Workaround for premature desc write-backs
1666 * in TSO mode. Append 4-byte sentinel desc */
1667 if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
1670 /* Workaround for potential 82544 hang in PCI-X.
1671 * Avoid terminating buffers within evenly-aligned
1673 if(unlikely(adapter->pcix_82544 &&
1674 !((unsigned long)(frag->page+offset+size-1) & 4) &&
1678 buffer_info->length = size;
1680 pci_map_page(adapter->pdev,
1685 buffer_info->time_stamp = jiffies;
1690 if(unlikely(++i == tx_ring->count)) i = 0;
1694 i = (i == 0) ? tx_ring->count - 1 : i - 1;
1695 tx_ring->buffer_info[i].skb = skb;
1696 tx_ring->buffer_info[first].next_to_watch = i;
1702 e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
1704 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1705 struct e1000_tx_desc *tx_desc = NULL;
1706 struct e1000_buffer *buffer_info;
1707 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
1710 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
1711 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
1713 txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
1716 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
1717 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
1718 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
1721 if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
1722 txd_lower |= E1000_TXD_CMD_VLE;
1723 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
1726 i = tx_ring->next_to_use;
1729 buffer_info = &tx_ring->buffer_info[i];
1730 tx_desc = E1000_TX_DESC(*tx_ring, i);
1731 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1732 tx_desc->lower.data =
1733 cpu_to_le32(txd_lower | buffer_info->length);
1734 tx_desc->upper.data = cpu_to_le32(txd_upper);
1735 if(unlikely(++i == tx_ring->count)) i = 0;
1738 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
1740 /* Force memory writes to complete before letting h/w
1741 * know there are new descriptors to fetch. (Only
1742 * applicable for weak-ordered memory model archs,
1743 * such as IA-64). */
1746 tx_ring->next_to_use = i;
1747 E1000_WRITE_REG(&adapter->hw, TDT, i);
1751 * 82547 workaround to avoid controller hang in half-duplex environment.
1752 * The workaround is to avoid queuing a large packet that would span
1753 * the internal Tx FIFO ring boundary by notifying the stack to resend
1754 * the packet at a later time. This gives the Tx FIFO an opportunity to
1755 * flush all packets. When that occurs, we reset the Tx FIFO pointers
1756 * to the beginning of the Tx FIFO.
1759 #define E1000_FIFO_HDR 0x10
1760 #define E1000_82547_PAD_LEN 0x3E0
1763 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
1765 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1766 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
1768 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
1770 if(adapter->link_duplex != HALF_DUPLEX)
1771 goto no_fifo_stall_required;
1773 if(atomic_read(&adapter->tx_fifo_stall))
1776 if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
1777 atomic_set(&adapter->tx_fifo_stall, 1);
1781 no_fifo_stall_required:
1782 adapter->tx_fifo_head += skb_fifo_len;
1783 if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
1784 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1788 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
1790 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1792 struct e1000_adapter *adapter = netdev->priv;
1793 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
1794 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
1795 unsigned int tx_flags = 0;
1796 unsigned int len = skb->len;
1797 unsigned long flags;
1798 unsigned int nr_frags = 0;
1799 unsigned int mss = 0;
1802 len -= skb->data_len;
1804 if(unlikely(skb->len <= 0)) {
1805 dev_kfree_skb_any(skb);
1806 return NETDEV_TX_OK;
1810 mss = skb_shinfo(skb)->tso_size;
1811 /* The controller does a simple calculation to
1812 * make sure there is enough room in the FIFO before
1813 * initiating the DMA for each buffer. The calc is:
1814 * 4 = ceil(buffer len/mss). To make sure we don't
1815 * overrun the FIFO, adjust the max buffer len if mss
1818 max_per_txd = min(mss << 2, max_per_txd);
1819 max_txd_pwr = fls(max_per_txd) - 1;
1822 if((mss) || (skb->ip_summed == CHECKSUM_HW))
1824 count++; /* for sentinel desc */
1826 if(skb->ip_summed == CHECKSUM_HW)
1829 count += TXD_USE_COUNT(len, max_txd_pwr);
1831 if(adapter->pcix_82544)
1834 nr_frags = skb_shinfo(skb)->nr_frags;
1835 for(f = 0; f < nr_frags; f++)
1836 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
1838 if(adapter->pcix_82544)
1841 local_irq_save(flags);
1842 if (!spin_trylock(&adapter->tx_lock)) {
1843 /* Collision - tell upper layer to requeue */
1844 local_irq_restore(flags);
1845 return NETDEV_TX_LOCKED;
1848 /* need: count + 2 desc gap to keep tail from touching
1849 * head, otherwise try next time */
1850 if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {
1851 netif_stop_queue(netdev);
1852 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1853 return NETDEV_TX_BUSY;
1856 if(unlikely(adapter->hw.mac_type == e1000_82547)) {
1857 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
1858 netif_stop_queue(netdev);
1859 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
1860 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1861 return NETDEV_TX_BUSY;
1865 if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
1866 tx_flags |= E1000_TX_FLAGS_VLAN;
1867 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
1870 first = adapter->tx_ring.next_to_use;
1872 if(likely(e1000_tso(adapter, skb)))
1873 tx_flags |= E1000_TX_FLAGS_TSO;
1874 else if(likely(e1000_tx_csum(adapter, skb)))
1875 tx_flags |= E1000_TX_FLAGS_CSUM;
1877 e1000_tx_queue(adapter,
1878 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
1881 netdev->trans_start = jiffies;
1883 /* Make sure there is space in the ring for the next send. */
1884 if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2))
1885 netif_stop_queue(netdev);
1887 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1888 return NETDEV_TX_OK;
1892 * e1000_tx_timeout - Respond to a Tx Hang
1893 * @netdev: network interface device structure
1897 e1000_tx_timeout(struct net_device *netdev)
1899 struct e1000_adapter *adapter = netdev->priv;
1901 /* Do the reset outside of interrupt context */
1902 schedule_work(&adapter->tx_timeout_task);
1906 e1000_tx_timeout_task(struct net_device *netdev)
1908 struct e1000_adapter *adapter = netdev->priv;
1910 e1000_down(adapter);
1915 * e1000_get_stats - Get System Network Statistics
1916 * @netdev: network interface device structure
1918 * Returns the address of the device statistics structure.
1919 * The statistics are actually updated from the timer callback.
1922 static struct net_device_stats *
1923 e1000_get_stats(struct net_device *netdev)
1925 struct e1000_adapter *adapter = netdev->priv;
1927 e1000_update_stats(adapter);
1928 return &adapter->net_stats;
1932 * e1000_change_mtu - Change the Maximum Transfer Unit
1933 * @netdev: network interface device structure
1934 * @new_mtu: new value for maximum frame size
1936 * Returns 0 on success, negative on failure
1940 e1000_change_mtu(struct net_device *netdev, int new_mtu)
1942 struct e1000_adapter *adapter = netdev->priv;
1943 int old_mtu = adapter->rx_buffer_len;
1944 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1946 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
1947 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
1948 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
1952 if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) {
1953 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
1955 } else if(adapter->hw.mac_type < e1000_82543) {
1956 DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n");
1959 } else if(max_frame <= E1000_RXBUFFER_4096) {
1960 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
1962 } else if(max_frame <= E1000_RXBUFFER_8192) {
1963 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
1966 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
1969 if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
1970 e1000_down(adapter);
1974 netdev->mtu = new_mtu;
1975 adapter->hw.max_frame_size = max_frame;
1981 * e1000_update_stats - Update the board statistics counters
1982 * @adapter: board private structure
1986 e1000_update_stats(struct e1000_adapter *adapter)
1988 struct e1000_hw *hw = &adapter->hw;
1989 unsigned long flags;
1992 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
1994 spin_lock_irqsave(&adapter->stats_lock, flags);
1996 /* these counters are modified from e1000_adjust_tbi_stats,
1997 * called from the interrupt context, so they must only
1998 * be written while holding adapter->stats_lock
2001 adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
2002 adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
2003 adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
2004 adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
2005 adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
2006 adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
2007 adapter->stats.roc += E1000_READ_REG(hw, ROC);
2008 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
2009 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
2010 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
2011 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
2012 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
2013 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
2015 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
2016 adapter->stats.mpc += E1000_READ_REG(hw, MPC);
2017 adapter->stats.scc += E1000_READ_REG(hw, SCC);
2018 adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
2019 adapter->stats.mcc += E1000_READ_REG(hw, MCC);
2020 adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
2021 adapter->stats.dc += E1000_READ_REG(hw, DC);
2022 adapter->stats.sec += E1000_READ_REG(hw, SEC);
2023 adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
2024 adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
2025 adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
2026 adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
2027 adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
2028 adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
2029 adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
2030 adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
2031 adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
2032 adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
2033 adapter->stats.ruc += E1000_READ_REG(hw, RUC);
2034 adapter->stats.rfc += E1000_READ_REG(hw, RFC);
2035 adapter->stats.rjc += E1000_READ_REG(hw, RJC);
2036 adapter->stats.torl += E1000_READ_REG(hw, TORL);
2037 adapter->stats.torh += E1000_READ_REG(hw, TORH);
2038 adapter->stats.totl += E1000_READ_REG(hw, TOTL);
2039 adapter->stats.toth += E1000_READ_REG(hw, TOTH);
2040 adapter->stats.tpr += E1000_READ_REG(hw, TPR);
2041 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
2042 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
2043 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
2044 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
2045 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
2046 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
2047 adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
2048 adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
2050 /* used for adaptive IFS */
2052 hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
2053 adapter->stats.tpt += hw->tx_packet_delta;
2054 hw->collision_delta = E1000_READ_REG(hw, COLC);
2055 adapter->stats.colc += hw->collision_delta;
2057 if(hw->mac_type >= e1000_82543) {
2058 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
2059 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
2060 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
2061 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
2062 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
2063 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
2066 /* Fill out the OS statistics structure */
2068 adapter->net_stats.rx_packets = adapter->stats.gprc;
2069 adapter->net_stats.tx_packets = adapter->stats.gptc;
2070 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2071 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2072 adapter->net_stats.multicast = adapter->stats.mprc;
2073 adapter->net_stats.collisions = adapter->stats.colc;
2077 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2078 adapter->stats.crcerrs + adapter->stats.algnerrc +
2079 adapter->stats.rlec + adapter->stats.rnbc +
2080 adapter->stats.mpc + adapter->stats.cexterr;
2081 adapter->net_stats.rx_dropped = adapter->stats.rnbc;
2082 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2083 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2084 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2085 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
2086 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2090 adapter->net_stats.tx_errors = adapter->stats.ecol +
2091 adapter->stats.latecol;
2092 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2093 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2094 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2096 /* Tx Dropped needs to be maintained elsewhere */
2100 if(hw->media_type == e1000_media_type_copper) {
2101 if((adapter->link_speed == SPEED_1000) &&
2102 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
2103 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2104 adapter->phy_stats.idle_errors += phy_tmp;
2107 if((hw->mac_type <= e1000_82546) &&
2108 (hw->phy_type == e1000_phy_m88) &&
2109 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
2110 adapter->phy_stats.receive_errors += phy_tmp;
2113 spin_unlock_irqrestore(&adapter->stats_lock, flags);
2117 * e1000_intr - Interrupt Handler
2118 * @irq: interrupt number
2119 * @data: pointer to a network interface device structure
2120 * @pt_regs: CPU registers structure
2124 e1000_intr(int irq, void *data, struct pt_regs *regs)
2126 struct net_device *netdev = data;
2127 struct e1000_adapter *adapter = netdev->priv;
2128 struct e1000_hw *hw = &adapter->hw;
2129 uint32_t icr = E1000_READ_REG(hw, ICR);
2130 #ifndef CONFIG_E1000_NAPI
2135 return IRQ_NONE; /* Not our interrupt */
2137 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
2138 hw->get_link_status = 1;
2139 mod_timer(&adapter->watchdog_timer, jiffies);
2142 #ifdef CONFIG_E1000_NAPI
2143 if(likely(netif_rx_schedule_prep(netdev))) {
2145 /* Disable interrupts and register for poll. The flush
2146 of the posted write is intentionally left out.
2149 atomic_inc(&adapter->irq_sem);
2150 E1000_WRITE_REG(hw, IMC, ~0);
2151 __netif_rx_schedule(netdev);
2154 for(i = 0; i < E1000_MAX_INTR; i++)
2155 if(unlikely(!e1000_clean_rx_irq(adapter) &
2156 !e1000_clean_tx_irq(adapter)))
2163 #ifdef CONFIG_E1000_NAPI
2165 * e1000_clean - NAPI Rx polling callback
2166 * @adapter: board private structure
2170 e1000_clean(struct net_device *netdev, int *budget)
2172 struct e1000_adapter *adapter = netdev->priv;
2173 int work_to_do = min(*budget, netdev->quota);
2177 if (!netif_carrier_ok(netdev))
2180 tx_cleaned = e1000_clean_tx_irq(adapter);
2181 e1000_clean_rx_irq(adapter, &work_done, work_to_do);
2183 *budget -= work_done;
2184 netdev->quota -= work_done;
2186 /* if no Rx and Tx cleanup work was done, exit the polling mode */
2187 if(!tx_cleaned || (work_done < work_to_do) ||
2188 !netif_running(netdev)) {
2189 quit_polling: netif_rx_complete(netdev);
2190 e1000_irq_enable(adapter);
2194 return (work_done >= work_to_do);
2199 * e1000_clean_tx_irq - Reclaim resources after transmit completes
2200 * @adapter: board private structure
2204 e1000_clean_tx_irq(struct e1000_adapter *adapter)
2206 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2207 struct net_device *netdev = adapter->netdev;
2208 struct e1000_tx_desc *tx_desc, *eop_desc;
2209 struct e1000_buffer *buffer_info;
2210 unsigned int i, eop;
2211 boolean_t cleaned = FALSE;
2213 i = tx_ring->next_to_clean;
2214 eop = tx_ring->buffer_info[i].next_to_watch;
2215 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2217 while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2218 for(cleaned = FALSE; !cleaned; ) {
2219 tx_desc = E1000_TX_DESC(*tx_ring, i);
2220 buffer_info = &tx_ring->buffer_info[i];
2222 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2223 tx_desc->buffer_addr = 0;
2224 tx_desc->lower.data = 0;
2225 tx_desc->upper.data = 0;
2227 cleaned = (i == eop);
2228 if(unlikely(++i == tx_ring->count)) i = 0;
2231 eop = tx_ring->buffer_info[i].next_to_watch;
2232 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2235 tx_ring->next_to_clean = i;
2237 spin_lock(&adapter->tx_lock);
2239 if(unlikely(cleaned && netif_queue_stopped(netdev) &&
2240 netif_carrier_ok(netdev)))
2241 netif_wake_queue(netdev);
2243 spin_unlock(&adapter->tx_lock);
2249 * e1000_rx_checksum - Receive Checksum Offload for 82543
2250 * @adapter: board private structure
2251 * @rx_desc: receive descriptor
2252 * @sk_buff: socket buffer with received data
2256 e1000_rx_checksum(struct e1000_adapter *adapter,
2257 struct e1000_rx_desc *rx_desc,
2258 struct sk_buff *skb)
2260 /* 82543 or newer only */
2261 if(unlikely((adapter->hw.mac_type < e1000_82543) ||
2262 /* Ignore Checksum bit is set */
2263 (rx_desc->status & E1000_RXD_STAT_IXSM) ||
2264 /* TCP Checksum has not been calculated */
2265 (!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) {
2266 skb->ip_summed = CHECKSUM_NONE;
2270 /* At this point we know the hardware did the TCP checksum */
2271 /* now look at the TCP checksum error bit */
2272 if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
2273 /* let the stack verify checksum errors */
2274 skb->ip_summed = CHECKSUM_NONE;
2275 adapter->hw_csum_err++;
2277 /* TCP checksum is good */
2278 skb->ip_summed = CHECKSUM_UNNECESSARY;
2279 adapter->hw_csum_good++;
2284 * e1000_clean_rx_irq - Send received data up the network stack
2285 * @adapter: board private structure
2289 #ifdef CONFIG_E1000_NAPI
2290 e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
2293 e1000_clean_rx_irq(struct e1000_adapter *adapter)
2296 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2297 struct net_device *netdev = adapter->netdev;
2298 struct pci_dev *pdev = adapter->pdev;
2299 struct e1000_rx_desc *rx_desc;
2300 struct e1000_buffer *buffer_info;
2301 struct sk_buff *skb;
2302 unsigned long flags;
2306 boolean_t cleaned = FALSE;
2308 i = rx_ring->next_to_clean;
2309 rx_desc = E1000_RX_DESC(*rx_ring, i);
2311 while(rx_desc->status & E1000_RXD_STAT_DD) {
2312 buffer_info = &rx_ring->buffer_info[i];
2313 #ifdef CONFIG_E1000_NAPI
2314 if(*work_done >= work_to_do)
2320 pci_unmap_single(pdev,
2322 buffer_info->length,
2323 PCI_DMA_FROMDEVICE);
2325 skb = buffer_info->skb;
2326 length = le16_to_cpu(rx_desc->length);
2328 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
2329 /* All receives must fit into a single buffer */
2330 E1000_DBG("%s: Receive packet consumed multiple"
2331 " buffers\n", netdev->name);
2332 dev_kfree_skb_irq(skb);
2336 if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
2337 last_byte = *(skb->data + length - 1);
2338 if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
2339 rx_desc->errors, length, last_byte)) {
2340 spin_lock_irqsave(&adapter->stats_lock, flags);
2341 e1000_tbi_adjust_stats(&adapter->hw,
2344 spin_unlock_irqrestore(&adapter->stats_lock,
2348 dev_kfree_skb_irq(skb);
2354 skb_put(skb, length - ETHERNET_FCS_SIZE);
2356 /* Receive Checksum Offload */
2357 e1000_rx_checksum(adapter, rx_desc, skb);
2359 skb->protocol = eth_type_trans(skb, netdev);
2360 #ifdef CONFIG_E1000_NAPI
2361 if(unlikely(adapter->vlgrp &&
2362 (rx_desc->status & E1000_RXD_STAT_VP))) {
2363 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2364 le16_to_cpu(rx_desc->special) &
2365 E1000_RXD_SPC_VLAN_MASK);
2367 netif_receive_skb(skb);
2369 #else /* CONFIG_E1000_NAPI */
2370 if(unlikely(adapter->vlgrp &&
2371 (rx_desc->status & E1000_RXD_STAT_VP))) {
2372 vlan_hwaccel_rx(skb, adapter->vlgrp,
2373 le16_to_cpu(rx_desc->special) &
2374 E1000_RXD_SPC_VLAN_MASK);
2378 #endif /* CONFIG_E1000_NAPI */
2379 netdev->last_rx = jiffies;
2382 rx_desc->status = 0;
2383 buffer_info->skb = NULL;
2384 if(unlikely(++i == rx_ring->count)) i = 0;
2386 rx_desc = E1000_RX_DESC(*rx_ring, i);
2389 rx_ring->next_to_clean = i;
2391 e1000_alloc_rx_buffers(adapter);
2397 * e1000_alloc_rx_buffers - Replace used receive buffers
2398 * @adapter: address of board private structure
2402 e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2404 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2405 struct net_device *netdev = adapter->netdev;
2406 struct pci_dev *pdev = adapter->pdev;
2407 struct e1000_rx_desc *rx_desc;
2408 struct e1000_buffer *buffer_info;
2409 struct sk_buff *skb;
2412 i = rx_ring->next_to_use;
2413 buffer_info = &rx_ring->buffer_info[i];
2415 while(!buffer_info->skb) {
2416 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
2418 if(unlikely(!skb)) {
2419 /* Better luck next round */
2423 /* Make buffer alignment 2 beyond a 16 byte boundary
2424 * this will result in a 16 byte aligned IP header after
2425 * the 14 byte MAC header is removed
2427 skb_reserve(skb, NET_IP_ALIGN);
2431 buffer_info->skb = skb;
2432 buffer_info->length = adapter->rx_buffer_len;
2433 buffer_info->dma = pci_map_single(pdev,
2435 adapter->rx_buffer_len,
2436 PCI_DMA_FROMDEVICE);
2438 rx_desc = E1000_RX_DESC(*rx_ring, i);
2439 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2441 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
2442 /* Force memory writes to complete before letting h/w
2443 * know there are new descriptors to fetch. (Only
2444 * applicable for weak-ordered memory model archs,
2445 * such as IA-64). */
2448 E1000_WRITE_REG(&adapter->hw, RDT, i);
2451 if(unlikely(++i == rx_ring->count)) i = 0;
2452 buffer_info = &rx_ring->buffer_info[i];
2455 rx_ring->next_to_use = i;
2459 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
2464 e1000_smartspeed(struct e1000_adapter *adapter)
2466 uint16_t phy_status;
2469 if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
2470 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2473 if(adapter->smartspeed == 0) {
2474 /* If Master/Slave config fault is asserted twice,
2475 * we assume back-to-back */
2476 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2477 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2478 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2479 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2480 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2481 if(phy_ctrl & CR_1000T_MS_ENABLE) {
2482 phy_ctrl &= ~CR_1000T_MS_ENABLE;
2483 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2485 adapter->smartspeed++;
2486 if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2487 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
2489 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2490 MII_CR_RESTART_AUTO_NEG);
2491 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
2496 } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
2497 /* If still no link, perhaps using 2/3 pair cable */
2498 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2499 phy_ctrl |= CR_1000T_MS_ENABLE;
2500 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
2501 if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2502 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
2503 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2504 MII_CR_RESTART_AUTO_NEG);
2505 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
2508 /* Restart process after E1000_SMARTSPEED_MAX iterations */
2509 if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
2510 adapter->smartspeed = 0;
2521 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2527 return e1000_mii_ioctl(netdev, ifr, cmd);
2541 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2543 struct e1000_adapter *adapter = netdev->priv;
2544 struct mii_ioctl_data *data = if_mii(ifr);
2549 if(adapter->hw.media_type != e1000_media_type_copper)
2554 data->phy_id = adapter->hw.phy_addr;
2557 if (!capable(CAP_NET_ADMIN))
2559 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
2564 if (!capable(CAP_NET_ADMIN))
2566 if (data->reg_num & ~(0x1F))
2568 mii_reg = data->val_in;
2569 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
2572 if (adapter->hw.phy_type == e1000_phy_m88) {
2573 switch (data->reg_num) {
2575 if(mii_reg & MII_CR_POWER_DOWN)
2577 if(mii_reg & MII_CR_AUTO_NEG_EN) {
2578 adapter->hw.autoneg = 1;
2579 adapter->hw.autoneg_advertised = 0x2F;
2582 spddplx = SPEED_1000;
2583 else if (mii_reg & 0x2000)
2584 spddplx = SPEED_100;
2587 spddplx += (mii_reg & 0x100)
2590 retval = e1000_set_spd_dplx(adapter,
2595 if(netif_running(adapter->netdev)) {
2596 e1000_down(adapter);
2599 e1000_reset(adapter);
2601 case M88E1000_PHY_SPEC_CTRL:
2602 case M88E1000_EXT_PHY_SPEC_CTRL:
2603 if (e1000_phy_reset(&adapter->hw))
2608 switch (data->reg_num) {
2610 if(mii_reg & MII_CR_POWER_DOWN)
2612 if(netif_running(adapter->netdev)) {
2613 e1000_down(adapter);
2616 e1000_reset(adapter);
2624 return E1000_SUCCESS;
2628 e1000_pci_set_mwi(struct e1000_hw *hw)
2630 struct e1000_adapter *adapter = hw->back;
2633 ret = pci_set_mwi(adapter->pdev);
2637 e1000_pci_clear_mwi(struct e1000_hw *hw)
2639 struct e1000_adapter *adapter = hw->back;
2641 pci_clear_mwi(adapter->pdev);
2645 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2647 struct e1000_adapter *adapter = hw->back;
2649 pci_read_config_word(adapter->pdev, reg, value);
2653 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2655 struct e1000_adapter *adapter = hw->back;
2657 pci_write_config_word(adapter->pdev, reg, *value);
2661 e1000_io_read(struct e1000_hw *hw, unsigned long port)
2667 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
2673 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2675 struct e1000_adapter *adapter = netdev->priv;
2676 uint32_t ctrl, rctl;
2678 e1000_irq_disable(adapter);
2679 adapter->vlgrp = grp;
2682 /* enable VLAN tag insert/strip */
2683 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2684 ctrl |= E1000_CTRL_VME;
2685 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2687 /* enable VLAN receive filtering */
2688 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2689 rctl |= E1000_RCTL_VFE;
2690 rctl &= ~E1000_RCTL_CFIEN;
2691 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2693 /* disable VLAN tag insert/strip */
2694 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2695 ctrl &= ~E1000_CTRL_VME;
2696 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2698 /* disable VLAN filtering */
2699 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2700 rctl &= ~E1000_RCTL_VFE;
2701 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2704 e1000_irq_enable(adapter);
2708 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2710 struct e1000_adapter *adapter = netdev->priv;
2711 uint32_t vfta, index;
2713 /* add VID to filter table */
2714 index = (vid >> 5) & 0x7F;
2715 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2716 vfta |= (1 << (vid & 0x1F));
2717 e1000_write_vfta(&adapter->hw, index, vfta);
2721 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2723 struct e1000_adapter *adapter = netdev->priv;
2724 uint32_t vfta, index;
2726 e1000_irq_disable(adapter);
2729 adapter->vlgrp->vlan_devices[vid] = NULL;
2731 e1000_irq_enable(adapter);
2733 /* remove VID from filter table */
2734 index = (vid >> 5) & 0x7F;
2735 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2736 vfta &= ~(1 << (vid & 0x1F));
2737 e1000_write_vfta(&adapter->hw, index, vfta);
2741 e1000_restore_vlan(struct e1000_adapter *adapter)
2743 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2745 if(adapter->vlgrp) {
2747 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2748 if(!adapter->vlgrp->vlan_devices[vid])
2750 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2756 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
2758 adapter->hw.autoneg = 0;
2761 case SPEED_10 + DUPLEX_HALF:
2762 adapter->hw.forced_speed_duplex = e1000_10_half;
2764 case SPEED_10 + DUPLEX_FULL:
2765 adapter->hw.forced_speed_duplex = e1000_10_full;
2767 case SPEED_100 + DUPLEX_HALF:
2768 adapter->hw.forced_speed_duplex = e1000_100_half;
2770 case SPEED_100 + DUPLEX_FULL:
2771 adapter->hw.forced_speed_duplex = e1000_100_full;
2773 case SPEED_1000 + DUPLEX_FULL:
2774 adapter->hw.autoneg = 1;
2775 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
2777 case SPEED_1000 + DUPLEX_HALF: /* not supported */
2780 "Unsupported Speed/Duplexity configuration\n");
2787 e1000_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2789 struct pci_dev *pdev = NULL;
2795 while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
2796 if(pci_dev_driver(pdev) == &e1000_driver)
2797 e1000_suspend(pdev, 3);
2804 e1000_suspend(struct pci_dev *pdev, uint32_t state)
2806 struct net_device *netdev = pci_get_drvdata(pdev);
2807 struct e1000_adapter *adapter = netdev->priv;
2808 uint32_t ctrl, ctrl_ext, rctl, manc, status;
2809 uint32_t wufc = adapter->wol;
2811 netif_device_detach(netdev);
2813 if(netif_running(netdev))
2814 e1000_down(adapter);
2816 status = E1000_READ_REG(&adapter->hw, STATUS);
2817 if(status & E1000_STATUS_LU)
2818 wufc &= ~E1000_WUFC_LNKC;
2821 e1000_setup_rctl(adapter);
2822 e1000_set_multi(netdev);
2824 /* turn on all-multi mode if wake on multicast is enabled */
2825 if(adapter->wol & E1000_WUFC_MC) {
2826 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2827 rctl |= E1000_RCTL_MPE;
2828 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2831 if(adapter->hw.mac_type >= e1000_82540) {
2832 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2833 /* advertise wake from D3Cold */
2834 #define E1000_CTRL_ADVD3WUC 0x00100000
2835 /* phy power management enable */
2836 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
2837 ctrl |= E1000_CTRL_ADVD3WUC |
2838 E1000_CTRL_EN_PHY_PWR_MGMT;
2839 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2842 if(adapter->hw.media_type == e1000_media_type_fiber ||
2843 adapter->hw.media_type == e1000_media_type_internal_serdes) {
2844 /* keep the laser running in D3 */
2845 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
2846 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
2847 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
2850 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
2851 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
2852 pci_enable_wake(pdev, 3, 1);
2853 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2855 E1000_WRITE_REG(&adapter->hw, WUC, 0);
2856 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
2857 pci_enable_wake(pdev, 3, 0);
2858 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2861 pci_save_state(pdev);
2863 if(adapter->hw.mac_type >= e1000_82540 &&
2864 adapter->hw.media_type == e1000_media_type_copper) {
2865 manc = E1000_READ_REG(&adapter->hw, MANC);
2866 if(manc & E1000_MANC_SMBUS_EN) {
2867 manc |= E1000_MANC_ARP_EN;
2868 E1000_WRITE_REG(&adapter->hw, MANC, manc);
2869 pci_enable_wake(pdev, 3, 1);
2870 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2874 pci_disable_device(pdev);
2876 state = (state > 0) ? 3 : 0;
2877 pci_set_power_state(pdev, state);
2884 e1000_resume(struct pci_dev *pdev)
2886 struct net_device *netdev = pci_get_drvdata(pdev);
2887 struct e1000_adapter *adapter = netdev->priv;
2890 pci_set_power_state(pdev, 0);
2891 pci_restore_state(pdev);
2892 ret = pci_enable_device(pdev);
2893 if (pdev->is_busmaster)
2894 pci_set_master(pdev);
2896 pci_enable_wake(pdev, 3, 0);
2897 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2899 e1000_reset(adapter);
2900 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
2902 if(netif_running(netdev))
2905 netif_device_attach(netdev);
2907 if(adapter->hw.mac_type >= e1000_82540 &&
2908 adapter->hw.media_type == e1000_media_type_copper) {
2909 manc = E1000_READ_REG(&adapter->hw, MANC);
2910 manc &= ~(E1000_MANC_ARP_EN);
2911 E1000_WRITE_REG(&adapter->hw, MANC, manc);
2918 #ifdef CONFIG_NET_POLL_CONTROLLER
2920 * Polling 'interrupt' - used by things like netconsole to send skbs
2921 * without having to re-enable interrupts. It's not called while
2922 * the interrupt routine is executing.
2925 e1000_netpoll (struct net_device *netdev)
2927 struct e1000_adapter *adapter = netdev->priv;
2928 disable_irq(adapter->pdev->irq);
2929 e1000_intr(adapter->pdev->irq, netdev, NULL);
2930 enable_irq(adapter->pdev->irq);