patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / net / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   
4   Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
5   
6   This program is free software; you can redistribute it and/or modify it 
7   under the terms of the GNU General Public License as published by the Free 
8   Software Foundation; either version 2 of the License, or (at your option) 
9   any later version.
10   
11   This program is distributed in the hope that it will be useful, but WITHOUT 
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
14   more details.
15   
16   You should have received a copy of the GNU General Public License along with
17   this program; if not, write to the Free Software Foundation, Inc., 59 
18   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
19   
20   The full GNU General Public License is included in this distribution in the
21   file called LICENSE.
22   
23   Contact Information:
24   Linux NICS <linux.nics@intel.com>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <linux/rtnetlink.h>
31
32 /* Change Log
33  *
34  * 5.2.51   5/14/04
35  *   o set default configuration to 'NAPI disabled'. NAPI enabled driver
36  *     causes kernel panic when the interface is shutdown while data is being
37  *     transferred.
38  * 5.2.47   5/04/04
39  *   o fixed ethtool -t implementation
40  * 5.2.45   4/29/04
41  *   o fixed ethtool -e implementation
42  *   o Support for ethtool ops [Stephen Hemminger (shemminger@osdl.org)]
43  * 5.2.42   4/26/04
44  *   o Added support for the DPRINTK macro for enhanced error logging.  Some
45  *     parts of the patch were supplied by Jon Mason.
46  *   o Move the register_netdevice() donw in the probe routine due to a 
47  *     loading/unloading test issue.
48  *   o Added a long RX byte count the the extra ethtool data members for BER
49  *     testing purposes.
50  * 5.2.39       3/12/04
51  */
52
53 char e1000_driver_name[] = "e1000";
54 char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
55 char e1000_driver_version[] = "5.2.52-k4";
56 char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
57
58 /* e1000_pci_tbl - PCI Device ID Table
59  *
60  * Wildcard entries (PCI_ANY_ID) should come last
61  * Last entry must be all 0s
62  *
63  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64  *   Class, Class Mask, private data (not used) }
65  */
66 static struct pci_device_id e1000_pci_tbl[] = {
67         {0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
68         {0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
69         {0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
70         {0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
71         {0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
72         {0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
73         {0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
74         {0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
75         {0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
76         {0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
77         {0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
78         {0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
79         {0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
80         {0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
81         {0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
82         {0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
83         {0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
84         {0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
85         {0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
86         {0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
87         {0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
88         {0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
89         {0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
90         {0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
91         {0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
92         {0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
93         {0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
94         {0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
95         {0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
96         {0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
97         /* required last entry */
98         {0,}
99 };
100
101 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
102
103 int e1000_up(struct e1000_adapter *adapter);
104 void e1000_down(struct e1000_adapter *adapter);
105 void e1000_reset(struct e1000_adapter *adapter);
106 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
107 int e1000_setup_tx_resources(struct e1000_adapter *adapter);
108 int e1000_setup_rx_resources(struct e1000_adapter *adapter);
109 void e1000_free_tx_resources(struct e1000_adapter *adapter);
110 void e1000_free_rx_resources(struct e1000_adapter *adapter);
111 void e1000_update_stats(struct e1000_adapter *adapter);
112
113 /* Local Function Prototypes */
114
115 static int e1000_init_module(void);
116 static void e1000_exit_module(void);
117 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
118 static void __devexit e1000_remove(struct pci_dev *pdev);
119 static int e1000_sw_init(struct e1000_adapter *adapter);
120 static int e1000_open(struct net_device *netdev);
121 static int e1000_close(struct net_device *netdev);
122 static void e1000_configure_tx(struct e1000_adapter *adapter);
123 static void e1000_configure_rx(struct e1000_adapter *adapter);
124 static void e1000_setup_rctl(struct e1000_adapter *adapter);
125 static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
127 static void e1000_set_multi(struct net_device *netdev);
128 static void e1000_update_phy_info(unsigned long data);
129 static void e1000_watchdog(unsigned long data);
130 static void e1000_82547_tx_fifo_stall(unsigned long data);
131 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
132 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
133 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
134 static int e1000_set_mac(struct net_device *netdev, void *p);
135 static inline void e1000_irq_disable(struct e1000_adapter *adapter);
136 static inline void e1000_irq_enable(struct e1000_adapter *adapter);
137 static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
138 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
139 #ifdef CONFIG_E1000_NAPI
140 static int e1000_clean(struct net_device *netdev, int *budget);
141 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
142                                     int *work_done, int work_to_do);
143 #else
144 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
145 #endif
146 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
147 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
148 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
149                            int cmd);
150 void set_ethtool_ops(struct net_device *netdev);
151 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
152 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
153 static inline void e1000_rx_checksum(struct e1000_adapter *adapter,
154                                      struct e1000_rx_desc *rx_desc,
155                                      struct sk_buff *skb);
156 static void e1000_tx_timeout(struct net_device *dev);
157 static void e1000_tx_timeout_task(struct net_device *dev);
158 static void e1000_smartspeed(struct e1000_adapter *adapter);
159 static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
160                                               struct sk_buff *skb);
161
162 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
163 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
164 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
165 static void e1000_restore_vlan(struct e1000_adapter *adapter);
166
167 static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
168 static int e1000_suspend(struct pci_dev *pdev, uint32_t state);
169 #ifdef CONFIG_PM
170 static int e1000_resume(struct pci_dev *pdev);
171 #endif
172
173 #ifdef CONFIG_NET_POLL_CONTROLLER
174 /* for netdump / net console */
175 static void e1000_netpoll (struct net_device *dev);
176 #endif
177
178 struct notifier_block e1000_notifier_reboot = {
179         .notifier_call  = e1000_notify_reboot,
180         .next           = NULL,
181         .priority       = 0
182 };
183
184 /* Exported from other modules */
185
186 extern void e1000_check_options(struct e1000_adapter *adapter);
187
188
189 static struct pci_driver e1000_driver = {
190         .name     = e1000_driver_name,
191         .id_table = e1000_pci_tbl,
192         .probe    = e1000_probe,
193         .remove   = __devexit_p(e1000_remove),
194         /* Power Managment Hooks */
195 #ifdef CONFIG_PM
196         .suspend  = e1000_suspend,
197         .resume   = e1000_resume
198 #endif
199 };
200
201 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
202 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
203 MODULE_LICENSE("GPL");
204
205 static int debug = 3;
206 module_param(debug, int, 0);
207 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
208
209 /**
210  * e1000_init_module - Driver Registration Routine
211  *
212  * e1000_init_module is the first routine called when the driver is
213  * loaded. All it does is register with the PCI subsystem.
214  **/
215
216 static int __init
217 e1000_init_module(void)
218 {
219         int ret;
220         printk(KERN_INFO "%s - version %s\n",
221                e1000_driver_string, e1000_driver_version);
222
223         printk(KERN_INFO "%s\n", e1000_copyright);
224
225         ret = pci_module_init(&e1000_driver);
226         if(ret >= 0) {
227                 register_reboot_notifier(&e1000_notifier_reboot);
228         }
229         return ret;
230 }
231
232 module_init(e1000_init_module);
233
234 /**
235  * e1000_exit_module - Driver Exit Cleanup Routine
236  *
237  * e1000_exit_module is called just before the driver is removed
238  * from memory.
239  **/
240
241 static void __exit
242 e1000_exit_module(void)
243 {
244         unregister_reboot_notifier(&e1000_notifier_reboot);
245         pci_unregister_driver(&e1000_driver);
246 }
247
248 module_exit(e1000_exit_module);
249
250
251 int
252 e1000_up(struct e1000_adapter *adapter)
253 {
254         struct net_device *netdev = adapter->netdev;
255         int err;
256
257         /* hardware has been reset, we need to reload some things */
258
259         e1000_set_multi(netdev);
260
261         e1000_restore_vlan(adapter);
262
263         e1000_configure_tx(adapter);
264         e1000_setup_rctl(adapter);
265         e1000_configure_rx(adapter);
266         e1000_alloc_rx_buffers(adapter);
267
268         if((err = request_irq(adapter->pdev->irq, &e1000_intr,
269                               SA_SHIRQ | SA_SAMPLE_RANDOM,
270                               netdev->name, netdev)))
271                 return err;
272
273         mod_timer(&adapter->watchdog_timer, jiffies);
274         e1000_irq_enable(adapter);
275
276         return 0;
277 }
278
279 void
280 e1000_down(struct e1000_adapter *adapter)
281 {
282         struct net_device *netdev = adapter->netdev;
283
284         e1000_irq_disable(adapter);
285         free_irq(adapter->pdev->irq, netdev);
286         del_timer_sync(&adapter->tx_fifo_stall_timer);
287         del_timer_sync(&adapter->watchdog_timer);
288         del_timer_sync(&adapter->phy_info_timer);
289         adapter->link_speed = 0;
290         adapter->link_duplex = 0;
291         netif_carrier_off(netdev);
292         netif_stop_queue(netdev);
293
294         e1000_reset(adapter);
295         e1000_clean_tx_ring(adapter);
296         e1000_clean_rx_ring(adapter);
297 }
298
299 void
300 e1000_reset(struct e1000_adapter *adapter)
301 {
302         uint32_t pba;
303         /* Repartition Pba for greater than 9k mtu
304          * To take effect CTRL.RST is required.
305          */
306
307         if(adapter->hw.mac_type < e1000_82547) {
308                 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
309                         pba = E1000_PBA_40K;
310                 else
311                         pba = E1000_PBA_48K;
312         } else {
313                 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
314                         pba = E1000_PBA_22K;
315                 else
316                         pba = E1000_PBA_30K;
317                 adapter->tx_fifo_head = 0;
318                 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
319                 adapter->tx_fifo_size =
320                         (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
321                 atomic_set(&adapter->tx_fifo_stall, 0);
322         }
323         E1000_WRITE_REG(&adapter->hw, PBA, pba);
324
325         /* flow control settings */
326         adapter->hw.fc_high_water =
327                 (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_HIGH_DIFF;
328         adapter->hw.fc_low_water =
329                 (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_LOW_DIFF;
330         adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
331         adapter->hw.fc_send_xon = 1;
332         adapter->hw.fc = adapter->hw.original_fc;
333
334         e1000_reset_hw(&adapter->hw);
335         if(adapter->hw.mac_type >= e1000_82544)
336                 E1000_WRITE_REG(&adapter->hw, WUC, 0);
337         e1000_init_hw(&adapter->hw);
338
339         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
340         E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
341
342         e1000_reset_adaptive(&adapter->hw);
343         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
344 }
345
346 /**
347  * e1000_probe - Device Initialization Routine
348  * @pdev: PCI device information struct
349  * @ent: entry in e1000_pci_tbl
350  *
351  * Returns 0 on success, negative on failure
352  *
353  * e1000_probe initializes an adapter identified by a pci_dev structure.
354  * The OS initialization, configuring of the adapter private structure,
355  * and a hardware reset occur.
356  **/
357
358 static int __devinit
359 e1000_probe(struct pci_dev *pdev,
360             const struct pci_device_id *ent)
361 {
362         struct net_device *netdev;
363         struct e1000_adapter *adapter;
364         static int cards_found = 0;
365         unsigned long mmio_start;
366         int mmio_len;
367         int pci_using_dac;
368         int i;
369         int err;
370         uint16_t eeprom_data;
371
372         if((err = pci_enable_device(pdev)))
373                 return err;
374
375         if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
376                 pci_using_dac = 1;
377         } else {
378                 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
379                         E1000_ERR("No usable DMA configuration, aborting\n");
380                         return err;
381                 }
382                 pci_using_dac = 0;
383         }
384
385         if((err = pci_request_regions(pdev, e1000_driver_name)))
386                 return err;
387
388         pci_set_master(pdev);
389
390         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
391         if(!netdev) {
392                 err = -ENOMEM;
393                 goto err_alloc_etherdev;
394         }
395
396         SET_MODULE_OWNER(netdev);
397         SET_NETDEV_DEV(netdev, &pdev->dev);
398
399         pci_set_drvdata(pdev, netdev);
400         adapter = netdev->priv;
401         adapter->netdev = netdev;
402         adapter->pdev = pdev;
403         adapter->hw.back = adapter;
404         adapter->msg_enable = (1 << debug) - 1;
405
406         rtnl_lock();
407         /* we need to set the name early since the DPRINTK macro needs it set */
408         if (dev_alloc_name(netdev, netdev->name) < 0) 
409                 goto err_free_unlock;
410
411         mmio_start = pci_resource_start(pdev, BAR_0);
412         mmio_len = pci_resource_len(pdev, BAR_0);
413
414         adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
415         if(!adapter->hw.hw_addr) {
416                 err = -EIO;
417                 goto err_ioremap;
418         }
419
420         for(i = BAR_1; i <= BAR_5; i++) {
421                 if(pci_resource_len(pdev, i) == 0)
422                         continue;
423                 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
424                         adapter->hw.io_base = pci_resource_start(pdev, i);
425                         break;
426                 }
427         }
428
429         netdev->open = &e1000_open;
430         netdev->stop = &e1000_close;
431         netdev->hard_start_xmit = &e1000_xmit_frame;
432         netdev->get_stats = &e1000_get_stats;
433         netdev->set_multicast_list = &e1000_set_multi;
434         netdev->set_mac_address = &e1000_set_mac;
435         netdev->change_mtu = &e1000_change_mtu;
436         netdev->do_ioctl = &e1000_ioctl;
437         set_ethtool_ops(netdev);
438         netdev->tx_timeout = &e1000_tx_timeout;
439         netdev->watchdog_timeo = 5 * HZ;
440 #ifdef CONFIG_E1000_NAPI
441         netdev->poll = &e1000_clean;
442         netdev->weight = 64;
443 #endif
444         netdev->vlan_rx_register = e1000_vlan_rx_register;
445         netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
446         netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
447 #ifdef CONFIG_NET_POLL_CONTROLLER
448         netdev->poll_controller = e1000_netpoll;
449 #endif
450
451         netdev->mem_start = mmio_start;
452         netdev->mem_end = mmio_start + mmio_len;
453         netdev->base_addr = adapter->hw.io_base;
454
455         adapter->bd_number = cards_found;
456
457         /* setup the private structure */
458
459         if((err = e1000_sw_init(adapter)))
460                 goto err_sw_init;
461
462         if(adapter->hw.mac_type >= e1000_82543) {
463                 netdev->features = NETIF_F_SG |
464                                    NETIF_F_HW_CSUM |
465                                    NETIF_F_HW_VLAN_TX |
466                                    NETIF_F_HW_VLAN_RX |
467                                    NETIF_F_HW_VLAN_FILTER;
468         } else {
469                 netdev->features = NETIF_F_SG;
470         }
471
472 #ifdef NETIF_F_TSO
473 #ifdef BROKEN_ON_NON_IA_ARCHS
474         /* Disbaled for now until root-cause is found for
475          * hangs reported against non-IA archs.  TSO can be
476          * enabled using ethtool -K eth<x> tso on */
477         if((adapter->hw.mac_type >= e1000_82544) &&
478            (adapter->hw.mac_type != e1000_82547))
479                 netdev->features |= NETIF_F_TSO;
480 #endif
481 #endif
482
483         if(pci_using_dac)
484                 netdev->features |= NETIF_F_HIGHDMA;
485
486         /* before reading the EEPROM, reset the controller to 
487          * put the device in a known good starting state */
488         
489         e1000_reset_hw(&adapter->hw);
490
491         /* make sure the EEPROM is good */
492
493         if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
494                 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
495                 err = -EIO;
496                 goto err_eeprom;
497         }
498
499         /* copy the MAC address out of the EEPROM */
500
501         e1000_read_mac_addr(&adapter->hw);
502         memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
503
504         if(!is_valid_ether_addr(netdev->dev_addr)) {
505                 err = -EIO;
506                 goto err_eeprom;
507         }
508
509         e1000_read_part_num(&adapter->hw, &(adapter->part_num));
510
511         e1000_get_bus_info(&adapter->hw);
512
513         init_timer(&adapter->tx_fifo_stall_timer);
514         adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
515         adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
516
517         init_timer(&adapter->watchdog_timer);
518         adapter->watchdog_timer.function = &e1000_watchdog;
519         adapter->watchdog_timer.data = (unsigned long) adapter;
520
521         init_timer(&adapter->phy_info_timer);
522         adapter->phy_info_timer.function = &e1000_update_phy_info;
523         adapter->phy_info_timer.data = (unsigned long) adapter;
524
525         INIT_WORK(&adapter->tx_timeout_task,
526                 (void (*)(void *))e1000_tx_timeout_task, netdev);
527
528         /* we're going to reset, so assume we have no link for now */
529
530         netif_carrier_off(netdev);
531         netif_stop_queue(netdev);
532
533         DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
534         e1000_check_options(adapter);
535
536         /* Initial Wake on LAN setting
537          * If APM wake is enabled in the EEPROM,
538          * enable the ACPI Magic Packet filter
539          */
540
541         switch(adapter->hw.mac_type) {
542         case e1000_82542_rev2_0:
543         case e1000_82542_rev2_1:
544         case e1000_82543:
545                 break;
546         case e1000_82546:
547         case e1000_82546_rev_3:
548                 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
549                    && (adapter->hw.media_type == e1000_media_type_copper)) {
550                         e1000_read_eeprom(&adapter->hw,
551                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
552                         break;
553                 }
554                 /* Fall Through */
555         default:
556                 e1000_read_eeprom(&adapter->hw,
557                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
558                 break;
559         }
560         if(eeprom_data & E1000_EEPROM_APME)
561                 adapter->wol |= E1000_WUFC_MAG;
562
563         /* reset the hardware with the new settings */
564
565         e1000_reset(adapter);
566
567         /* since we are holding the rtnl lock already, call the no-lock version */
568         if((err = register_netdevice(netdev)))
569                 goto err_register;
570
571         cards_found++;
572         rtnl_unlock();
573         return 0;
574
575 err_register:
576 err_sw_init:
577 err_eeprom:
578         iounmap(adapter->hw.hw_addr);
579 err_ioremap:
580 err_free_unlock:
581         rtnl_unlock();
582         free_netdev(netdev);
583 err_alloc_etherdev:
584         pci_release_regions(pdev);
585         return err;
586 }
587
588 /**
589  * e1000_remove - Device Removal Routine
590  * @pdev: PCI device information struct
591  *
592  * e1000_remove is called by the PCI subsystem to alert the driver
593  * that it should release a PCI device.  The could be caused by a
594  * Hot-Plug event, or because the driver is going to be removed from
595  * memory.
596  **/
597
598 static void __devexit
599 e1000_remove(struct pci_dev *pdev)
600 {
601         struct net_device *netdev = pci_get_drvdata(pdev);
602         struct e1000_adapter *adapter = netdev->priv;
603         uint32_t manc;
604
605         if(adapter->hw.mac_type >= e1000_82540 &&
606            adapter->hw.media_type == e1000_media_type_copper) {
607                 manc = E1000_READ_REG(&adapter->hw, MANC);
608                 if(manc & E1000_MANC_SMBUS_EN) {
609                         manc |= E1000_MANC_ARP_EN;
610                         E1000_WRITE_REG(&adapter->hw, MANC, manc);
611                 }
612         }
613
614         unregister_netdev(netdev);
615
616         e1000_phy_hw_reset(&adapter->hw);
617
618         iounmap(adapter->hw.hw_addr);
619         pci_release_regions(pdev);
620
621         free_netdev(netdev);
622 }
623
624 /**
625  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
626  * @adapter: board private structure to initialize
627  *
628  * e1000_sw_init initializes the Adapter private data structure.
629  * Fields are initialized based on PCI device information and
630  * OS network device settings (MTU size).
631  **/
632
633 static int __devinit
634 e1000_sw_init(struct e1000_adapter *adapter)
635 {
636         struct e1000_hw *hw = &adapter->hw;
637         struct net_device *netdev = adapter->netdev;
638         struct pci_dev *pdev = adapter->pdev;
639
640         /* PCI config space info */
641
642         hw->vendor_id = pdev->vendor;
643         hw->device_id = pdev->device;
644         hw->subsystem_vendor_id = pdev->subsystem_vendor;
645         hw->subsystem_id = pdev->subsystem_device;
646
647         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
648
649         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
650
651         adapter->rx_buffer_len = E1000_RXBUFFER_2048;
652         hw->max_frame_size = netdev->mtu +
653                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
654         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
655
656         /* identify the MAC */
657
658         if (e1000_set_mac_type(hw)) {
659                 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
660                 return -EIO;
661         }
662
663         /* initialize eeprom parameters */
664
665         e1000_init_eeprom_params(hw);
666
667         if((hw->mac_type == e1000_82541) ||
668            (hw->mac_type == e1000_82547) ||
669            (hw->mac_type == e1000_82541_rev_2) ||
670            (hw->mac_type == e1000_82547_rev_2))
671                 hw->phy_init_script = 1;
672
673         e1000_set_media_type(hw);
674
675         if(hw->mac_type < e1000_82543)
676                 hw->report_tx_early = 0;
677         else
678                 hw->report_tx_early = 1;
679
680         hw->wait_autoneg_complete = FALSE;
681         hw->tbi_compatibility_en = TRUE;
682         hw->adaptive_ifs = TRUE;
683
684         /* Copper options */
685
686         if(hw->media_type == e1000_media_type_copper) {
687                 hw->mdix = AUTO_ALL_MODES;
688                 hw->disable_polarity_correction = FALSE;
689                 hw->master_slave = E1000_MASTER_SLAVE;
690         }
691
692         atomic_set(&adapter->irq_sem, 1);
693         spin_lock_init(&adapter->stats_lock);
694         spin_lock_init(&adapter->tx_lock);
695
696         return 0;
697 }
698
699 /**
700  * e1000_open - Called when a network interface is made active
701  * @netdev: network interface device structure
702  *
703  * Returns 0 on success, negative value on failure
704  *
705  * The open entry point is called when a network interface is made
706  * active by the system (IFF_UP).  At this point all resources needed
707  * for transmit and receive operations are allocated, the interrupt
708  * handler is registered with the OS, the watchdog timer is started,
709  * and the stack is notified that the interface is ready.
710  **/
711
712 static int
713 e1000_open(struct net_device *netdev)
714 {
715         struct e1000_adapter *adapter = netdev->priv;
716         int err;
717
718         /* allocate transmit descriptors */
719
720         if((err = e1000_setup_tx_resources(adapter)))
721                 goto err_setup_tx;
722
723         /* allocate receive descriptors */
724
725         if((err = e1000_setup_rx_resources(adapter)))
726                 goto err_setup_rx;
727
728         if((err = e1000_up(adapter)))
729                 goto err_up;
730
731         return 0;
732
733 err_up:
734         e1000_free_rx_resources(adapter);
735 err_setup_rx:
736         e1000_free_tx_resources(adapter);
737 err_setup_tx:
738         e1000_reset(adapter);
739
740         return err;
741 }
742
743 /**
744  * e1000_close - Disables a network interface
745  * @netdev: network interface device structure
746  *
747  * Returns 0, this is not allowed to fail
748  *
749  * The close entry point is called when an interface is de-activated
750  * by the OS.  The hardware is still under the drivers control, but
751  * needs to be disabled.  A global MAC reset is issued to stop the
752  * hardware, and all transmit and receive resources are freed.
753  **/
754
755 static int
756 e1000_close(struct net_device *netdev)
757 {
758         struct e1000_adapter *adapter = netdev->priv;
759
760         e1000_down(adapter);
761
762         e1000_free_tx_resources(adapter);
763         e1000_free_rx_resources(adapter);
764
765         return 0;
766 }
767
768 /**
769  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
770  * @adapter: board private structure
771  *
772  * Return 0 on success, negative on failure
773  **/
774
775 int
776 e1000_setup_tx_resources(struct e1000_adapter *adapter)
777 {
778         struct e1000_desc_ring *txdr = &adapter->tx_ring;
779         struct pci_dev *pdev = adapter->pdev;
780         int size;
781
782         size = sizeof(struct e1000_buffer) * txdr->count;
783         txdr->buffer_info = kmalloc(size, GFP_KERNEL);
784         if(!txdr->buffer_info) {
785                 return -ENOMEM;
786         }
787         memset(txdr->buffer_info, 0, size);
788
789         /* round up to nearest 4K */
790
791         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
792         E1000_ROUNDUP(txdr->size, 4096);
793
794         txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
795         if(!txdr->desc) {
796                 kfree(txdr->buffer_info);
797                 return -ENOMEM;
798         }
799         memset(txdr->desc, 0, txdr->size);
800
801         txdr->next_to_use = 0;
802         txdr->next_to_clean = 0;
803
804         return 0;
805 }
806
807 /**
808  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
809  * @adapter: board private structure
810  *
811  * Configure the Tx unit of the MAC after a reset.
812  **/
813
814 static void
815 e1000_configure_tx(struct e1000_adapter *adapter)
816 {
817         uint64_t tdba = adapter->tx_ring.dma;
818         uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
819         uint32_t tctl, tipg;
820
821         E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
822         E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
823
824         E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
825
826         /* Setup the HW Tx Head and Tail descriptor pointers */
827
828         E1000_WRITE_REG(&adapter->hw, TDH, 0);
829         E1000_WRITE_REG(&adapter->hw, TDT, 0);
830
831         /* Set the default values for the Tx Inter Packet Gap timer */
832
833         switch (adapter->hw.mac_type) {
834         case e1000_82542_rev2_0:
835         case e1000_82542_rev2_1:
836                 tipg = DEFAULT_82542_TIPG_IPGT;
837                 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
838                 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
839                 break;
840         default:
841                 if(adapter->hw.media_type == e1000_media_type_fiber ||
842                    adapter->hw.media_type == e1000_media_type_internal_serdes)
843                         tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
844                 else
845                         tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
846                 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
847                 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
848         }
849         E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
850
851         /* Set the Tx Interrupt Delay register */
852
853         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
854         if(adapter->hw.mac_type >= e1000_82540)
855                 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
856
857         /* Program the Transmit Control Register */
858
859         tctl = E1000_READ_REG(&adapter->hw, TCTL);
860
861         tctl &= ~E1000_TCTL_CT;
862         tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
863                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
864
865         E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
866
867         e1000_config_collision_dist(&adapter->hw);
868
869         /* Setup Transmit Descriptor Settings for eop descriptor */
870         adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
871                 E1000_TXD_CMD_IFCS;
872
873         if(adapter->hw.report_tx_early == 1)
874                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
875         else
876                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
877
878         /* Cache if we're 82544 running in PCI-X because we'll
879          * need this to apply a workaround later in the send path. */
880         if(adapter->hw.mac_type == e1000_82544 &&
881            adapter->hw.bus_type == e1000_bus_type_pcix)
882                 adapter->pcix_82544 = 1;
883 }
884
885 /**
886  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
887  * @adapter: board private structure
888  *
889  * Returns 0 on success, negative on failure
890  **/
891
892 int
893 e1000_setup_rx_resources(struct e1000_adapter *adapter)
894 {
895         struct e1000_desc_ring *rxdr = &adapter->rx_ring;
896         struct pci_dev *pdev = adapter->pdev;
897         int size;
898
899         size = sizeof(struct e1000_buffer) * rxdr->count;
900         rxdr->buffer_info = kmalloc(size, GFP_KERNEL);
901         if(!rxdr->buffer_info) {
902                 return -ENOMEM;
903         }
904         memset(rxdr->buffer_info, 0, size);
905
906         /* Round up to nearest 4K */
907
908         rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
909         E1000_ROUNDUP(rxdr->size, 4096);
910
911         rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
912
913         if(!rxdr->desc) {
914                 kfree(rxdr->buffer_info);
915                 return -ENOMEM;
916         }
917         memset(rxdr->desc, 0, rxdr->size);
918
919         rxdr->next_to_clean = 0;
920         rxdr->next_to_use = 0;
921
922         return 0;
923 }
924
925 /**
926  * e1000_setup_rctl - configure the receive control register
927  * @adapter: Board private structure
928  **/
929
930 static void
931 e1000_setup_rctl(struct e1000_adapter *adapter)
932 {
933         uint32_t rctl;
934
935         rctl = E1000_READ_REG(&adapter->hw, RCTL);
936
937         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
938
939         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
940                 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
941                 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
942
943         if(adapter->hw.tbi_compatibility_on == 1)
944                 rctl |= E1000_RCTL_SBP;
945         else
946                 rctl &= ~E1000_RCTL_SBP;
947
948         rctl &= ~(E1000_RCTL_SZ_4096);
949         switch (adapter->rx_buffer_len) {
950         case E1000_RXBUFFER_2048:
951         default:
952                 rctl |= E1000_RCTL_SZ_2048;
953                 rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE);
954                 break;
955         case E1000_RXBUFFER_4096:
956                 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
957                 break;
958         case E1000_RXBUFFER_8192:
959                 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
960                 break;
961         case E1000_RXBUFFER_16384:
962                 rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
963                 break;
964         }
965
966         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
967 }
968
969 /**
970  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
971  * @adapter: board private structure
972  *
973  * Configure the Rx unit of the MAC after a reset.
974  **/
975
976 static void
977 e1000_configure_rx(struct e1000_adapter *adapter)
978 {
979         uint64_t rdba = adapter->rx_ring.dma;
980         uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
981         uint32_t rctl;
982         uint32_t rxcsum;
983
984         /* make sure receives are disabled while setting up the descriptors */
985
986         rctl = E1000_READ_REG(&adapter->hw, RCTL);
987         E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
988
989         /* set the Receive Delay Timer Register */
990
991         E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
992
993         if(adapter->hw.mac_type >= e1000_82540) {
994                 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
995                 if(adapter->itr > 1)
996                         E1000_WRITE_REG(&adapter->hw, ITR,
997                                 1000000000 / (adapter->itr * 256));
998         }
999
1000         /* Setup the Base and Length of the Rx Descriptor Ring */
1001
1002         E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1003         E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1004
1005         E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
1006
1007         /* Setup the HW Rx Head and Tail Descriptor Pointers */
1008         E1000_WRITE_REG(&adapter->hw, RDH, 0);
1009         E1000_WRITE_REG(&adapter->hw, RDT, 0);
1010
1011         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1012         if((adapter->hw.mac_type >= e1000_82543) &&
1013            (adapter->rx_csum == TRUE)) {
1014                 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
1015                 rxcsum |= E1000_RXCSUM_TUOFL;
1016                 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
1017         }
1018
1019         /* Enable Receives */
1020
1021         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1022 }
1023
1024 /**
1025  * e1000_free_tx_resources - Free Tx Resources
1026  * @adapter: board private structure
1027  *
1028  * Free all transmit software resources
1029  **/
1030
1031 void
1032 e1000_free_tx_resources(struct e1000_adapter *adapter)
1033 {
1034         struct pci_dev *pdev = adapter->pdev;
1035
1036         e1000_clean_tx_ring(adapter);
1037
1038         kfree(adapter->tx_ring.buffer_info);
1039         adapter->tx_ring.buffer_info = NULL;
1040
1041         pci_free_consistent(pdev, adapter->tx_ring.size,
1042                             adapter->tx_ring.desc, adapter->tx_ring.dma);
1043
1044         adapter->tx_ring.desc = NULL;
1045 }
1046
1047 /**
1048  * e1000_clean_tx_ring - Free Tx Buffers
1049  * @adapter: board private structure
1050  **/
1051
1052 static void
1053 e1000_clean_tx_ring(struct e1000_adapter *adapter)
1054 {
1055         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1056         struct e1000_buffer *buffer_info;
1057         struct pci_dev *pdev = adapter->pdev;
1058         unsigned long size;
1059         unsigned int i;
1060
1061         /* Free all the Tx ring sk_buffs */
1062
1063         for(i = 0; i < tx_ring->count; i++) {
1064                 buffer_info = &tx_ring->buffer_info[i];
1065                 if(buffer_info->skb) {
1066
1067                         pci_unmap_page(pdev,
1068                                        buffer_info->dma,
1069                                        buffer_info->length,
1070                                        PCI_DMA_TODEVICE);
1071
1072                         dev_kfree_skb(buffer_info->skb);
1073
1074                         buffer_info->skb = NULL;
1075                 }
1076         }
1077
1078         size = sizeof(struct e1000_buffer) * tx_ring->count;
1079         memset(tx_ring->buffer_info, 0, size);
1080
1081         /* Zero out the descriptor ring */
1082
1083         memset(tx_ring->desc, 0, tx_ring->size);
1084
1085         tx_ring->next_to_use = 0;
1086         tx_ring->next_to_clean = 0;
1087
1088         E1000_WRITE_REG(&adapter->hw, TDH, 0);
1089         E1000_WRITE_REG(&adapter->hw, TDT, 0);
1090 }
1091
1092 /**
1093  * e1000_free_rx_resources - Free Rx Resources
1094  * @adapter: board private structure
1095  *
1096  * Free all receive software resources
1097  **/
1098
1099 void
1100 e1000_free_rx_resources(struct e1000_adapter *adapter)
1101 {
1102         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1103         struct pci_dev *pdev = adapter->pdev;
1104
1105         e1000_clean_rx_ring(adapter);
1106
1107         kfree(rx_ring->buffer_info);
1108         rx_ring->buffer_info = NULL;
1109
1110         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1111
1112         rx_ring->desc = NULL;
1113 }
1114
1115 /**
1116  * e1000_clean_rx_ring - Free Rx Buffers
1117  * @adapter: board private structure
1118  **/
1119
1120 static void
1121 e1000_clean_rx_ring(struct e1000_adapter *adapter)
1122 {
1123         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1124         struct e1000_buffer *buffer_info;
1125         struct pci_dev *pdev = adapter->pdev;
1126         unsigned long size;
1127         unsigned int i;
1128
1129         /* Free all the Rx ring sk_buffs */
1130
1131         for(i = 0; i < rx_ring->count; i++) {
1132                 buffer_info = &rx_ring->buffer_info[i];
1133                 if(buffer_info->skb) {
1134
1135                         pci_unmap_single(pdev,
1136                                          buffer_info->dma,
1137                                          buffer_info->length,
1138                                          PCI_DMA_FROMDEVICE);
1139
1140                         dev_kfree_skb(buffer_info->skb);
1141
1142                         buffer_info->skb = NULL;
1143                 }
1144         }
1145
1146         size = sizeof(struct e1000_buffer) * rx_ring->count;
1147         memset(rx_ring->buffer_info, 0, size);
1148
1149         /* Zero out the descriptor ring */
1150
1151         memset(rx_ring->desc, 0, rx_ring->size);
1152
1153         rx_ring->next_to_clean = 0;
1154         rx_ring->next_to_use = 0;
1155
1156         E1000_WRITE_REG(&adapter->hw, RDH, 0);
1157         E1000_WRITE_REG(&adapter->hw, RDT, 0);
1158 }
1159
1160 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
1161  * and memory write and invalidate disabled for certain operations
1162  */
1163 static void
1164 e1000_enter_82542_rst(struct e1000_adapter *adapter)
1165 {
1166         struct net_device *netdev = adapter->netdev;
1167         uint32_t rctl;
1168
1169         e1000_pci_clear_mwi(&adapter->hw);
1170
1171         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1172         rctl |= E1000_RCTL_RST;
1173         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1174         E1000_WRITE_FLUSH(&adapter->hw);
1175         mdelay(5);
1176
1177         if(netif_running(netdev))
1178                 e1000_clean_rx_ring(adapter);
1179 }
1180
1181 static void
1182 e1000_leave_82542_rst(struct e1000_adapter *adapter)
1183 {
1184         struct net_device *netdev = adapter->netdev;
1185         uint32_t rctl;
1186
1187         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1188         rctl &= ~E1000_RCTL_RST;
1189         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1190         E1000_WRITE_FLUSH(&adapter->hw);
1191         mdelay(5);
1192
1193         if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
1194                 e1000_pci_set_mwi(&adapter->hw);
1195
1196         if(netif_running(netdev)) {
1197                 e1000_configure_rx(adapter);
1198                 e1000_alloc_rx_buffers(adapter);
1199         }
1200 }
1201
1202 /**
1203  * e1000_set_mac - Change the Ethernet Address of the NIC
1204  * @netdev: network interface device structure
1205  * @p: pointer to an address structure
1206  *
1207  * Returns 0 on success, negative on failure
1208  **/
1209
1210 static int
1211 e1000_set_mac(struct net_device *netdev, void *p)
1212 {
1213         struct e1000_adapter *adapter = netdev->priv;
1214         struct sockaddr *addr = p;
1215
1216         if(!is_valid_ether_addr(addr->sa_data))
1217                 return -EADDRNOTAVAIL;
1218
1219         /* 82542 2.0 needs to be in reset to write receive address registers */
1220
1221         if(adapter->hw.mac_type == e1000_82542_rev2_0)
1222                 e1000_enter_82542_rst(adapter);
1223
1224         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1225         memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
1226
1227         e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1228
1229         if(adapter->hw.mac_type == e1000_82542_rev2_0)
1230                 e1000_leave_82542_rst(adapter);
1231
1232         return 0;
1233 }
1234
1235 /**
1236  * e1000_set_multi - Multicast and Promiscuous mode set
1237  * @netdev: network interface device structure
1238  *
1239  * The set_multi entry point is called whenever the multicast address
1240  * list or the network interface flags are updated.  This routine is
1241  * responsible for configuring the hardware for proper multicast,
1242  * promiscuous mode, and all-multi behavior.
1243  **/
1244
1245 static void
1246 e1000_set_multi(struct net_device *netdev)
1247 {
1248         struct e1000_adapter *adapter = netdev->priv;
1249         struct e1000_hw *hw = &adapter->hw;
1250         struct dev_mc_list *mc_ptr;
1251         uint32_t rctl;
1252         uint32_t hash_value;
1253         int i;
1254
1255         /* Check for Promiscuous and All Multicast modes */
1256
1257         rctl = E1000_READ_REG(hw, RCTL);
1258
1259         if(netdev->flags & IFF_PROMISC) {
1260                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1261         } else if(netdev->flags & IFF_ALLMULTI) {
1262                 rctl |= E1000_RCTL_MPE;
1263                 rctl &= ~E1000_RCTL_UPE;
1264         } else {
1265                 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
1266         }
1267
1268         E1000_WRITE_REG(hw, RCTL, rctl);
1269
1270         /* 82542 2.0 needs to be in reset to write receive address registers */
1271
1272         if(hw->mac_type == e1000_82542_rev2_0)
1273                 e1000_enter_82542_rst(adapter);
1274
1275         /* load the first 14 multicast address into the exact filters 1-14
1276          * RAR 0 is used for the station MAC adddress
1277          * if there are not 14 addresses, go ahead and clear the filters
1278          */
1279         mc_ptr = netdev->mc_list;
1280
1281         for(i = 1; i < E1000_RAR_ENTRIES; i++) {
1282                 if(mc_ptr) {
1283                         e1000_rar_set(hw, mc_ptr->dmi_addr, i);
1284                         mc_ptr = mc_ptr->next;
1285                 } else {
1286                         E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
1287                         E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
1288                 }
1289         }
1290
1291         /* clear the old settings from the multicast hash table */
1292
1293         for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
1294                 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
1295
1296         /* load any remaining addresses into the hash table */
1297
1298         for(; mc_ptr; mc_ptr = mc_ptr->next) {
1299                 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
1300                 e1000_mta_set(hw, hash_value);
1301         }
1302
1303         if(hw->mac_type == e1000_82542_rev2_0)
1304                 e1000_leave_82542_rst(adapter);
1305 }
1306
1307 /* need to wait a few seconds after link up to get diagnostic information from the phy */
1308
1309 static void
1310 e1000_update_phy_info(unsigned long data)
1311 {
1312         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1313         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
1314 }
1315
1316 /**
1317  * e1000_82547_tx_fifo_stall - Timer Call-back
1318  * @data: pointer to adapter cast into an unsigned long
1319  **/
1320
1321 static void
1322 e1000_82547_tx_fifo_stall(unsigned long data)
1323 {
1324         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1325         struct net_device *netdev = adapter->netdev;
1326         uint32_t tctl;
1327
1328         if(atomic_read(&adapter->tx_fifo_stall)) {
1329                 if((E1000_READ_REG(&adapter->hw, TDT) ==
1330                     E1000_READ_REG(&adapter->hw, TDH)) &&
1331                    (E1000_READ_REG(&adapter->hw, TDFT) ==
1332                     E1000_READ_REG(&adapter->hw, TDFH)) &&
1333                    (E1000_READ_REG(&adapter->hw, TDFTS) ==
1334                     E1000_READ_REG(&adapter->hw, TDFHS))) {
1335                         tctl = E1000_READ_REG(&adapter->hw, TCTL);
1336                         E1000_WRITE_REG(&adapter->hw, TCTL,
1337                                         tctl & ~E1000_TCTL_EN);
1338                         E1000_WRITE_REG(&adapter->hw, TDFT,
1339                                         adapter->tx_head_addr);
1340                         E1000_WRITE_REG(&adapter->hw, TDFH,
1341                                         adapter->tx_head_addr);
1342                         E1000_WRITE_REG(&adapter->hw, TDFTS,
1343                                         adapter->tx_head_addr);
1344                         E1000_WRITE_REG(&adapter->hw, TDFHS,
1345                                         adapter->tx_head_addr);
1346                         E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1347                         E1000_WRITE_FLUSH(&adapter->hw);
1348
1349                         adapter->tx_fifo_head = 0;
1350                         atomic_set(&adapter->tx_fifo_stall, 0);
1351                         netif_wake_queue(netdev);
1352                 } else {
1353                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
1354                 }
1355         }
1356 }
1357
1358 /**
1359  * e1000_watchdog - Timer Call-back
1360  * @data: pointer to netdev cast into an unsigned long
1361  **/
1362
1363 static void
1364 e1000_watchdog(unsigned long data)
1365 {
1366         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1367         struct net_device *netdev = adapter->netdev;
1368         struct e1000_desc_ring *txdr = &adapter->tx_ring;
1369         unsigned int i;
1370         uint32_t link;
1371
1372         e1000_check_for_link(&adapter->hw);
1373
1374         if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
1375            !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
1376                 link = !adapter->hw.serdes_link_down;
1377         else
1378                 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
1379
1380         if(link) {
1381                 if(!netif_carrier_ok(netdev)) {
1382                         e1000_get_speed_and_duplex(&adapter->hw,
1383                                                    &adapter->link_speed,
1384                                                    &adapter->link_duplex);
1385
1386                         DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
1387                                adapter->link_speed,
1388                                adapter->link_duplex == FULL_DUPLEX ?
1389                                "Full Duplex" : "Half Duplex");
1390
1391                         netif_carrier_on(netdev);
1392                         netif_wake_queue(netdev);
1393                         mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1394                         adapter->smartspeed = 0;
1395                 }
1396         } else {
1397                 if(netif_carrier_ok(netdev)) {
1398                         adapter->link_speed = 0;
1399                         adapter->link_duplex = 0;
1400                         DPRINTK(LINK, INFO, "NIC Link is Down\n");
1401                         netif_carrier_off(netdev);
1402                         netif_stop_queue(netdev);
1403                         mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1404                 }
1405
1406                 e1000_smartspeed(adapter);
1407         }
1408
1409         e1000_update_stats(adapter);
1410
1411         adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
1412         adapter->tpt_old = adapter->stats.tpt;
1413         adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
1414         adapter->colc_old = adapter->stats.colc;
1415         
1416         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
1417         adapter->gorcl_old = adapter->stats.gorcl;
1418         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
1419         adapter->gotcl_old = adapter->stats.gotcl;
1420
1421         e1000_update_adaptive(&adapter->hw);
1422
1423         if(!netif_carrier_ok(netdev)) {
1424                 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1425                         /* We've lost link, so the controller stops DMA,
1426                          * but we've got queued Tx work that's never going
1427                          * to get done, so reset controller to flush Tx.
1428                          * (Do the reset outside of interrupt context). */
1429                         schedule_work(&adapter->tx_timeout_task);
1430                 }
1431         }
1432
1433         /* Dynamic mode for Interrupt Throttle Rate (ITR) */
1434         if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
1435                 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
1436                  * asymmetrical Tx or Rx gets ITR=8000; everyone
1437                  * else is between 2000-8000. */
1438                 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
1439                 uint32_t dif = (adapter->gotcl > adapter->gorcl ? 
1440                         adapter->gotcl - adapter->gorcl :
1441                         adapter->gorcl - adapter->gotcl) / 10000;
1442                 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
1443                 E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
1444         }
1445
1446         /* Cause software interrupt to ensure rx ring is cleaned */
1447         E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
1448
1449         /* Early detection of hung controller */
1450         i = txdr->next_to_clean;
1451         if(txdr->buffer_info[i].dma &&
1452            time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
1453            !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF))
1454                 netif_stop_queue(netdev);
1455
1456         /* Reset the timer */
1457         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1458 }
1459
1460 #define E1000_TX_FLAGS_CSUM             0x00000001
1461 #define E1000_TX_FLAGS_VLAN             0x00000002
1462 #define E1000_TX_FLAGS_TSO              0x00000004
1463 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
1464 #define E1000_TX_FLAGS_VLAN_SHIFT       16
1465
1466 static inline boolean_t
1467 e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1468 {
1469 #ifdef NETIF_F_TSO
1470         struct e1000_context_desc *context_desc;
1471         unsigned int i;
1472         uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1473         uint16_t ipcse, tucse, mss;
1474
1475         if(skb_shinfo(skb)->tso_size) {
1476                 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1477                 mss = skb_shinfo(skb)->tso_size;
1478                 skb->nh.iph->tot_len = 0;
1479                 skb->nh.iph->check = 0;
1480                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1481                                                       skb->nh.iph->daddr,
1482                                                       0,
1483                                                       IPPROTO_TCP,
1484                                                       0);
1485                 ipcss = skb->nh.raw - skb->data;
1486                 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1487                 ipcse = skb->h.raw - skb->data - 1;
1488                 tucss = skb->h.raw - skb->data;
1489                 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1490                 tucse = 0;
1491
1492                 i = adapter->tx_ring.next_to_use;
1493                 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1494
1495                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
1496                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
1497                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
1498                 context_desc->upper_setup.tcp_fields.tucss = tucss;
1499                 context_desc->upper_setup.tcp_fields.tucso = tucso;
1500                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
1501                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
1502                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1503                 context_desc->cmd_and_length = cpu_to_le32(
1504                         E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1505                         E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
1506                         (skb->len - (hdr_len)));
1507
1508                 if(++i == adapter->tx_ring.count) i = 0;
1509                 adapter->tx_ring.next_to_use = i;
1510
1511                 return TRUE;
1512         }
1513 #endif
1514
1515         return FALSE;
1516 }
1517
1518 static inline boolean_t
1519 e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1520 {
1521         struct e1000_context_desc *context_desc;
1522         unsigned int i;
1523         uint8_t css, cso;
1524
1525         if(skb->ip_summed == CHECKSUM_HW) {
1526                 css = skb->h.raw - skb->data;
1527                 cso = (skb->h.raw + skb->csum) - skb->data;
1528
1529                 i = adapter->tx_ring.next_to_use;
1530                 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1531
1532                 context_desc->upper_setup.tcp_fields.tucss = css;
1533                 context_desc->upper_setup.tcp_fields.tucso = cso;
1534                 context_desc->upper_setup.tcp_fields.tucse = 0;
1535                 context_desc->tcp_seg_setup.data = 0;
1536                 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1537
1538                 if(++i == adapter->tx_ring.count) i = 0;
1539                 adapter->tx_ring.next_to_use = i;
1540
1541                 return TRUE;
1542         }
1543
1544         return FALSE;
1545 }
1546
1547 #define E1000_MAX_TXD_PWR       12
1548 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
1549
1550 static inline int
1551 e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
1552         unsigned int first, unsigned int max_per_txd,
1553         unsigned int nr_frags, unsigned int mss)
1554 {
1555         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1556         struct e1000_buffer *buffer_info;
1557         unsigned int len = skb->len;
1558         unsigned int offset = 0, size, count = 0, i;
1559         unsigned int f;
1560         len -= skb->data_len;
1561
1562
1563         i = tx_ring->next_to_use;
1564
1565         while(len) {
1566                 buffer_info = &tx_ring->buffer_info[i];
1567                 size = min(len, max_per_txd);
1568 #ifdef NETIF_F_TSO
1569                 /* Workaround for premature desc write-backs
1570                  * in TSO mode.  Append 4-byte sentinel desc */
1571                 if(mss && !nr_frags && size == len && size > 8)
1572                         size -= 4;
1573 #endif
1574                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
1575                  * terminating buffers within evenly-aligned dwords. */
1576                 if(adapter->pcix_82544 &&
1577                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
1578                    size > 4)
1579                         size -= 4;
1580
1581                 buffer_info->length = size;
1582                 buffer_info->dma =
1583                         pci_map_single(adapter->pdev,
1584                                 skb->data + offset,
1585                                 size,
1586                                 PCI_DMA_TODEVICE);
1587                 buffer_info->time_stamp = jiffies;
1588
1589                 len -= size;
1590                 offset += size;
1591                 count++;
1592                 if(++i == tx_ring->count) i = 0;
1593         }
1594
1595         for(f = 0; f < nr_frags; f++) {
1596                 struct skb_frag_struct *frag;
1597
1598                 frag = &skb_shinfo(skb)->frags[f];
1599                 len = frag->size;
1600                 offset = frag->page_offset;
1601
1602                 while(len) {
1603                         buffer_info = &tx_ring->buffer_info[i];
1604                         size = min(len, max_per_txd);
1605 #ifdef NETIF_F_TSO
1606                         /* Workaround for premature desc write-backs
1607                          * in TSO mode.  Append 4-byte sentinel desc */
1608                         if(mss && f == (nr_frags-1) && size == len && size > 8)
1609                                 size -= 4;
1610 #endif
1611                         /* Workaround for potential 82544 hang in PCI-X.
1612                          * Avoid terminating buffers within evenly-aligned
1613                          * dwords. */
1614                         if(adapter->pcix_82544 &&
1615                            !((unsigned long)(frag->page+offset+size-1) & 4) &&
1616                            size > 4)
1617                                 size -= 4;
1618
1619                         buffer_info->length = size;
1620                         buffer_info->dma =
1621                                 pci_map_page(adapter->pdev,
1622                                         frag->page,
1623                                         offset,
1624                                         size,
1625                                         PCI_DMA_TODEVICE);
1626                         buffer_info->time_stamp = jiffies;
1627
1628                         len -= size;
1629                         offset += size;
1630                         count++;
1631                         if(++i == tx_ring->count) i = 0;
1632                 }
1633         }
1634         i = (i == 0) ? tx_ring->count - 1 : i - 1;
1635         tx_ring->buffer_info[i].skb = skb;
1636         tx_ring->buffer_info[first].next_to_watch = i;
1637         
1638         return count;
1639 }
1640
1641 static inline void
1642 e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
1643 {
1644         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1645         struct e1000_tx_desc *tx_desc = NULL;
1646         struct e1000_buffer *buffer_info;
1647         uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
1648         unsigned int i;
1649
1650         if(tx_flags & E1000_TX_FLAGS_TSO) {
1651                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
1652                              E1000_TXD_CMD_TSE;
1653                 txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
1654         }
1655
1656         if(tx_flags & E1000_TX_FLAGS_CSUM) {
1657                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
1658                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
1659         }
1660
1661         if(tx_flags & E1000_TX_FLAGS_VLAN) {
1662                 txd_lower |= E1000_TXD_CMD_VLE;
1663                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
1664         }
1665
1666         i = tx_ring->next_to_use;
1667
1668         while(count--) {
1669                 buffer_info = &tx_ring->buffer_info[i];
1670                 tx_desc = E1000_TX_DESC(*tx_ring, i);
1671                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1672                 tx_desc->lower.data =
1673                         cpu_to_le32(txd_lower | buffer_info->length);
1674                 tx_desc->upper.data = cpu_to_le32(txd_upper);
1675                 if(++i == tx_ring->count) i = 0;
1676         }
1677
1678         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
1679
1680         /* Force memory writes to complete before letting h/w
1681          * know there are new descriptors to fetch.  (Only
1682          * applicable for weak-ordered memory model archs,
1683          * such as IA-64). */
1684         wmb();
1685
1686         tx_ring->next_to_use = i;
1687         E1000_WRITE_REG(&adapter->hw, TDT, i);
1688 }
1689
1690 /**
1691  * 82547 workaround to avoid controller hang in half-duplex environment.
1692  * The workaround is to avoid queuing a large packet that would span
1693  * the internal Tx FIFO ring boundary by notifying the stack to resend
1694  * the packet at a later time.  This gives the Tx FIFO an opportunity to
1695  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
1696  * to the beginning of the Tx FIFO.
1697  **/
1698
1699 #define E1000_FIFO_HDR                  0x10
1700 #define E1000_82547_PAD_LEN             0x3E0
1701
1702 static inline int
1703 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
1704 {
1705         uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1706         uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
1707
1708         E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
1709
1710         if(adapter->link_duplex != HALF_DUPLEX)
1711                 goto no_fifo_stall_required;
1712
1713         if(atomic_read(&adapter->tx_fifo_stall))
1714                 return 1;
1715
1716         if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
1717                 atomic_set(&adapter->tx_fifo_stall, 1);
1718                 return 1;
1719         }
1720
1721 no_fifo_stall_required:
1722         adapter->tx_fifo_head += skb_fifo_len;
1723         if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
1724                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1725         return 0;
1726 }
1727
1728 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 
1729 static int
1730 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1731 {
1732         struct e1000_adapter *adapter = netdev->priv;
1733         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
1734         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
1735         unsigned int tx_flags = 0;
1736         unsigned long flags;
1737         unsigned int len = skb->len;
1738         int count = 0;
1739         unsigned int mss = 0;
1740         unsigned int nr_frags = 0;
1741         unsigned int f;
1742         nr_frags = skb_shinfo(skb)->nr_frags;
1743         len -= skb->data_len;
1744         if(skb->len <= 0) {
1745                 dev_kfree_skb_any(skb);
1746                 return 0;
1747         }
1748
1749 #ifdef NETIF_F_TSO
1750         mss = skb_shinfo(skb)->tso_size;
1751         /* The controller does a simple calculation to 
1752          * make sure there is enough room in the FIFO before
1753          * initiating the DMA for each buffer.  The calc is:
1754          * 4 = ceil(buffer len/mss).  To make sure we don't
1755          * overrun the FIFO, adjust the max buffer len if mss
1756          * drops. */
1757         if(mss) {
1758                 max_per_txd = min(mss << 2, max_per_txd);
1759                 max_txd_pwr = fls(max_per_txd) - 1;
1760         }
1761         if((mss) || (skb->ip_summed == CHECKSUM_HW))
1762                 count++;
1763         count++;        /*for sentinel desc*/
1764 #else
1765         if(skb->ip_summed == CHECKSUM_HW)
1766                 count++;
1767 #endif
1768
1769         count += TXD_USE_COUNT(len, max_txd_pwr);
1770         if(adapter->pcix_82544)
1771                 count++;
1772
1773         nr_frags = skb_shinfo(skb)->nr_frags;
1774         for(f = 0; f < nr_frags; f++)
1775                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
1776                                        max_txd_pwr);
1777         if(adapter->pcix_82544)
1778                 count += nr_frags;
1779         
1780         spin_lock_irqsave(&adapter->tx_lock, flags);
1781         /* need: count +  2 desc gap to keep tail from touching 
1782          * head, otherwise try next time */
1783         if(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2 ) {
1784                 netif_stop_queue(netdev);
1785                 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1786                 return 1;
1787         }
1788         spin_unlock_irqrestore(&adapter->tx_lock, flags);
1789
1790         if(adapter->hw.mac_type == e1000_82547) {
1791                 if(e1000_82547_fifo_workaround(adapter, skb)) {
1792                         netif_stop_queue(netdev);
1793                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
1794                         return 1;
1795                 }
1796         }
1797
1798         if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1799                 tx_flags |= E1000_TX_FLAGS_VLAN;
1800                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
1801         }
1802
1803         first = adapter->tx_ring.next_to_use;
1804         
1805         if(e1000_tso(adapter, skb))
1806                 tx_flags |= E1000_TX_FLAGS_TSO;
1807         else if(e1000_tx_csum(adapter, skb))
1808                 tx_flags |= E1000_TX_FLAGS_CSUM;
1809
1810         e1000_tx_queue(adapter, 
1811                 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), 
1812                 tx_flags);
1813
1814         netdev->trans_start = jiffies;
1815
1816         return 0;
1817 }
1818
1819 /**
1820  * e1000_tx_timeout - Respond to a Tx Hang
1821  * @netdev: network interface device structure
1822  **/
1823
1824 static void
1825 e1000_tx_timeout(struct net_device *netdev)
1826 {
1827         struct e1000_adapter *adapter = netdev->priv;
1828
1829         /* Do the reset outside of interrupt context */
1830         schedule_work(&adapter->tx_timeout_task);
1831 }
1832
1833 static void
1834 e1000_tx_timeout_task(struct net_device *netdev)
1835 {
1836         struct e1000_adapter *adapter = netdev->priv;
1837
1838         netif_device_detach(netdev);
1839         e1000_down(adapter);
1840         e1000_up(adapter);
1841         netif_device_attach(netdev);
1842 }
1843
1844 /**
1845  * e1000_get_stats - Get System Network Statistics
1846  * @netdev: network interface device structure
1847  *
1848  * Returns the address of the device statistics structure.
1849  * The statistics are actually updated from the timer callback.
1850  **/
1851
1852 static struct net_device_stats *
1853 e1000_get_stats(struct net_device *netdev)
1854 {
1855         struct e1000_adapter *adapter = netdev->priv;
1856
1857         e1000_update_stats(adapter);
1858         return &adapter->net_stats;
1859 }
1860
1861 /**
1862  * e1000_change_mtu - Change the Maximum Transfer Unit
1863  * @netdev: network interface device structure
1864  * @new_mtu: new value for maximum frame size
1865  *
1866  * Returns 0 on success, negative on failure
1867  **/
1868
1869 static int
1870 e1000_change_mtu(struct net_device *netdev, int new_mtu)
1871 {
1872         struct e1000_adapter *adapter = netdev->priv;
1873         int old_mtu = adapter->rx_buffer_len;
1874         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1875
1876         if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
1877            (max_frame > MAX_JUMBO_FRAME_SIZE)) {
1878                 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
1879                 return -EINVAL;
1880         }
1881
1882         if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) {
1883                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
1884
1885         } else if(adapter->hw.mac_type < e1000_82543) {
1886                 DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n");
1887                 return -EINVAL;
1888
1889         } else if(max_frame <= E1000_RXBUFFER_4096) {
1890                 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
1891
1892         } else if(max_frame <= E1000_RXBUFFER_8192) {
1893                 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
1894
1895         } else {
1896                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
1897         }
1898
1899         if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
1900
1901                 e1000_down(adapter);
1902                 e1000_up(adapter);
1903         }
1904
1905         netdev->mtu = new_mtu;
1906         adapter->hw.max_frame_size = max_frame;
1907
1908         return 0;
1909 }
1910
1911 /**
1912  * e1000_update_stats - Update the board statistics counters
1913  * @adapter: board private structure
1914  **/
1915
1916 void
1917 e1000_update_stats(struct e1000_adapter *adapter)
1918 {
1919         struct e1000_hw *hw = &adapter->hw;
1920         unsigned long flags;
1921         uint16_t phy_tmp;
1922
1923 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
1924
1925         spin_lock_irqsave(&adapter->stats_lock, flags);
1926
1927         /* these counters are modified from e1000_adjust_tbi_stats,
1928          * called from the interrupt context, so they must only
1929          * be written while holding adapter->stats_lock
1930          */
1931
1932         adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
1933         adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
1934         adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
1935         adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
1936         adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
1937         adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
1938         adapter->stats.roc += E1000_READ_REG(hw, ROC);
1939         adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
1940         adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
1941         adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
1942         adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
1943         adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
1944         adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
1945
1946         /* the rest of the counters are only modified here */
1947
1948         adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
1949         adapter->stats.mpc += E1000_READ_REG(hw, MPC);
1950         adapter->stats.scc += E1000_READ_REG(hw, SCC);
1951         adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
1952         adapter->stats.mcc += E1000_READ_REG(hw, MCC);
1953         adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
1954         adapter->stats.dc += E1000_READ_REG(hw, DC);
1955         adapter->stats.sec += E1000_READ_REG(hw, SEC);
1956         adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
1957         adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
1958         adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
1959         adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
1960         adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
1961         adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
1962         adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
1963         adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
1964         adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
1965         adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
1966         adapter->stats.ruc += E1000_READ_REG(hw, RUC);
1967         adapter->stats.rfc += E1000_READ_REG(hw, RFC);
1968         adapter->stats.rjc += E1000_READ_REG(hw, RJC);
1969         adapter->stats.torl += E1000_READ_REG(hw, TORL);
1970         adapter->stats.torh += E1000_READ_REG(hw, TORH);
1971         adapter->stats.totl += E1000_READ_REG(hw, TOTL);
1972         adapter->stats.toth += E1000_READ_REG(hw, TOTH);
1973         adapter->stats.tpr += E1000_READ_REG(hw, TPR);
1974         adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
1975         adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
1976         adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
1977         adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
1978         adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
1979         adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
1980         adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
1981         adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
1982
1983         /* used for adaptive IFS */
1984
1985         hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
1986         adapter->stats.tpt += hw->tx_packet_delta;
1987         hw->collision_delta = E1000_READ_REG(hw, COLC);
1988         adapter->stats.colc += hw->collision_delta;
1989
1990         if(hw->mac_type >= e1000_82543) {
1991                 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
1992                 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
1993                 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
1994                 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
1995                 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
1996                 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
1997         }
1998
1999         /* Fill out the OS statistics structure */
2000
2001         adapter->net_stats.rx_packets = adapter->stats.gprc;
2002         adapter->net_stats.tx_packets = adapter->stats.gptc;
2003         adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2004         adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2005         adapter->net_stats.multicast = adapter->stats.mprc;
2006         adapter->net_stats.collisions = adapter->stats.colc;
2007
2008         /* Rx Errors */
2009
2010         adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2011                 adapter->stats.crcerrs + adapter->stats.algnerrc +
2012                 adapter->stats.rlec + adapter->stats.rnbc +
2013                 adapter->stats.mpc + adapter->stats.cexterr;
2014         adapter->net_stats.rx_dropped = adapter->stats.rnbc;
2015         adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2016         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2017         adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2018         adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
2019         adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2020
2021         /* Tx Errors */
2022
2023         adapter->net_stats.tx_errors = adapter->stats.ecol +
2024                                        adapter->stats.latecol;
2025         adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2026         adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2027         adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2028
2029         /* Tx Dropped needs to be maintained elsewhere */
2030
2031         /* Phy Stats */
2032
2033         if(hw->media_type == e1000_media_type_copper) {
2034                 if((adapter->link_speed == SPEED_1000) &&
2035                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
2036                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2037                         adapter->phy_stats.idle_errors += phy_tmp;
2038                 }
2039
2040                 if((hw->mac_type <= e1000_82546) &&
2041                    (hw->phy_type == e1000_phy_m88) &&
2042                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
2043                         adapter->phy_stats.receive_errors += phy_tmp;
2044         }
2045
2046         spin_unlock_irqrestore(&adapter->stats_lock, flags);
2047 }
2048
2049 /**
2050  * e1000_irq_disable - Mask off interrupt generation on the NIC
2051  * @adapter: board private structure
2052  **/
2053
2054 static inline void
2055 e1000_irq_disable(struct e1000_adapter *adapter)
2056 {
2057         atomic_inc(&adapter->irq_sem);
2058         E1000_WRITE_REG(&adapter->hw, IMC, ~0);
2059         E1000_WRITE_FLUSH(&adapter->hw);
2060         synchronize_irq(adapter->pdev->irq);
2061 }
2062
2063 /**
2064  * e1000_irq_enable - Enable default interrupt generation settings
2065  * @adapter: board private structure
2066  **/
2067
2068 static inline void
2069 e1000_irq_enable(struct e1000_adapter *adapter)
2070 {
2071         if(atomic_dec_and_test(&adapter->irq_sem)) {
2072                 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
2073                 E1000_WRITE_FLUSH(&adapter->hw);
2074         }
2075 }
2076
2077 /**
2078  * e1000_intr - Interrupt Handler
2079  * @irq: interrupt number
2080  * @data: pointer to a network interface device structure
2081  * @pt_regs: CPU registers structure
2082  **/
2083
2084 static irqreturn_t
2085 e1000_intr(int irq, void *data, struct pt_regs *regs)
2086 {
2087         struct net_device *netdev = data;
2088         struct e1000_adapter *adapter = netdev->priv;
2089         struct e1000_hw *hw = &adapter->hw;
2090         uint32_t icr = E1000_READ_REG(&adapter->hw, ICR);
2091 #ifndef CONFIG_E1000_NAPI
2092         unsigned int i;
2093 #endif
2094
2095         if(!icr)
2096                 return IRQ_NONE;  /* Not our interrupt */
2097
2098         if(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
2099                 hw->get_link_status = 1;
2100                 mod_timer(&adapter->watchdog_timer, jiffies);
2101         }
2102
2103 #ifdef CONFIG_E1000_NAPI
2104         if(netif_rx_schedule_prep(netdev)) {
2105
2106                 /* Disable interrupts and register for poll. The flush 
2107                   of the posted write is intentionally left out.
2108                 */
2109
2110                 atomic_inc(&adapter->irq_sem);
2111                 E1000_WRITE_REG(hw, IMC, ~0);
2112                 __netif_rx_schedule(netdev);
2113         }
2114 #else
2115         for(i = 0; i < E1000_MAX_INTR; i++)
2116                 if(!e1000_clean_rx_irq(adapter) &
2117                    !e1000_clean_tx_irq(adapter))
2118                         break;
2119 #endif
2120
2121         return IRQ_HANDLED;
2122 }
2123
2124 #ifdef CONFIG_E1000_NAPI
2125 /**
2126  * e1000_clean - NAPI Rx polling callback
2127  * @adapter: board private structure
2128  **/
2129
2130 static int
2131 e1000_clean(struct net_device *netdev, int *budget)
2132 {
2133         struct e1000_adapter *adapter = netdev->priv;
2134         int work_to_do = min(*budget, netdev->quota);
2135         int work_done = 0;
2136         
2137         e1000_clean_tx_irq(adapter);
2138         e1000_clean_rx_irq(adapter, &work_done, work_to_do);
2139
2140         *budget -= work_done;
2141         netdev->quota -= work_done;
2142         
2143         if(work_done < work_to_do || !netif_running(netdev)) {
2144                 netif_rx_complete(netdev);
2145                 e1000_irq_enable(adapter);
2146                 return 0;
2147         }
2148
2149         return (work_done >= work_to_do);
2150 }
2151 #endif
2152
2153 /**
2154  * e1000_clean_tx_irq - Reclaim resources after transmit completes
2155  * @adapter: board private structure
2156  **/
2157
2158 static boolean_t
2159 e1000_clean_tx_irq(struct e1000_adapter *adapter)
2160 {
2161         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2162         struct net_device *netdev = adapter->netdev;
2163         struct pci_dev *pdev = adapter->pdev;
2164         struct e1000_tx_desc *tx_desc, *eop_desc;
2165         struct e1000_buffer *buffer_info;
2166         unsigned int i, eop;
2167         boolean_t cleaned = FALSE;
2168
2169
2170         i = tx_ring->next_to_clean;
2171         eop = tx_ring->buffer_info[i].next_to_watch;
2172         eop_desc = E1000_TX_DESC(*tx_ring, eop);
2173
2174         while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2175
2176                 for(cleaned = FALSE; !cleaned; ) {
2177                         tx_desc = E1000_TX_DESC(*tx_ring, i);
2178                         buffer_info = &tx_ring->buffer_info[i];
2179
2180                         if(buffer_info->dma) {
2181
2182                                 pci_unmap_page(pdev,
2183                                                buffer_info->dma,
2184                                                buffer_info->length,
2185                                                PCI_DMA_TODEVICE);
2186
2187                                 buffer_info->dma = 0;
2188                         }
2189
2190                         if(buffer_info->skb) {
2191
2192                                 dev_kfree_skb_any(buffer_info->skb);
2193
2194                                 buffer_info->skb = NULL;
2195                         }
2196
2197                         tx_desc->buffer_addr = 0;
2198                         tx_desc->lower.data = 0;
2199                         tx_desc->upper.data = 0;
2200
2201                         cleaned = (i == eop);
2202                         if(++i == tx_ring->count) i = 0;
2203                 }
2204                 
2205                 eop = tx_ring->buffer_info[i].next_to_watch;
2206                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2207         }
2208
2209         tx_ring->next_to_clean = i;
2210
2211         spin_lock(&adapter->tx_lock);
2212
2213         if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
2214                 netif_wake_queue(netdev);
2215
2216         spin_unlock(&adapter->tx_lock);
2217
2218         return cleaned;
2219 }
2220
2221 /**
2222  * e1000_clean_rx_irq - Send received data up the network stack,
2223  * @adapter: board private structure
2224  **/
2225
2226 static boolean_t
2227 #ifdef CONFIG_E1000_NAPI
2228 e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
2229                    int work_to_do)
2230 #else
2231 e1000_clean_rx_irq(struct e1000_adapter *adapter)
2232 #endif
2233 {
2234         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2235         struct net_device *netdev = adapter->netdev;
2236         struct pci_dev *pdev = adapter->pdev;
2237         struct e1000_rx_desc *rx_desc;
2238         struct e1000_buffer *buffer_info;
2239         struct sk_buff *skb;
2240         unsigned long flags;
2241         uint32_t length;
2242         uint8_t last_byte;
2243         unsigned int i;
2244         boolean_t cleaned = FALSE;
2245
2246         i = rx_ring->next_to_clean;
2247         rx_desc = E1000_RX_DESC(*rx_ring, i);
2248
2249         while(rx_desc->status & E1000_RXD_STAT_DD) {
2250                 buffer_info = &rx_ring->buffer_info[i];
2251
2252 #ifdef CONFIG_E1000_NAPI
2253                 if(*work_done >= work_to_do)
2254                         break;
2255
2256                 (*work_done)++;
2257 #endif
2258
2259                 cleaned = TRUE;
2260
2261                 pci_unmap_single(pdev,
2262                                  buffer_info->dma,
2263                                  buffer_info->length,
2264                                  PCI_DMA_FROMDEVICE);
2265
2266                 skb = buffer_info->skb;
2267                 length = le16_to_cpu(rx_desc->length);
2268
2269                 if(!(rx_desc->status & E1000_RXD_STAT_EOP)) {
2270
2271                         /* All receives must fit into a single buffer */
2272
2273                         E1000_DBG("%s: Receive packet consumed multiple buffers\n",
2274                                 netdev->name);
2275
2276                         dev_kfree_skb_irq(skb);
2277                         rx_desc->status = 0;
2278                         buffer_info->skb = NULL;
2279
2280                         if(++i == rx_ring->count) i = 0;
2281
2282                         rx_desc = E1000_RX_DESC(*rx_ring, i);
2283                         continue;
2284                 }
2285
2286                 if(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2287
2288                         last_byte = *(skb->data + length - 1);
2289
2290                         if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
2291                                       rx_desc->errors, length, last_byte)) {
2292
2293                                 spin_lock_irqsave(&adapter->stats_lock, flags);
2294
2295                                 e1000_tbi_adjust_stats(&adapter->hw,
2296                                                        &adapter->stats,
2297                                                        length, skb->data);
2298
2299                                 spin_unlock_irqrestore(&adapter->stats_lock,
2300                                                        flags);
2301                                 length--;
2302                         } else {
2303
2304                                 dev_kfree_skb_irq(skb);
2305                                 rx_desc->status = 0;
2306                                 buffer_info->skb = NULL;
2307
2308                                 if(++i == rx_ring->count) i = 0;
2309
2310                                 rx_desc = E1000_RX_DESC(*rx_ring, i);
2311                                 continue;
2312                         }
2313                 }
2314
2315                 /* Good Receive */
2316                 skb_put(skb, length - ETHERNET_FCS_SIZE);
2317
2318                 /* Receive Checksum Offload */
2319                 e1000_rx_checksum(adapter, rx_desc, skb);
2320
2321                 skb->protocol = eth_type_trans(skb, netdev);
2322 #ifdef CONFIG_E1000_NAPI
2323                 if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
2324                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2325                                 le16_to_cpu(rx_desc->special &
2326                                         E1000_RXD_SPC_VLAN_MASK));
2327                 } else {
2328                         netif_receive_skb(skb);
2329                 }
2330 #else /* CONFIG_E1000_NAPI */
2331                 if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
2332                         vlan_hwaccel_rx(skb, adapter->vlgrp,
2333                                 le16_to_cpu(rx_desc->special &
2334                                         E1000_RXD_SPC_VLAN_MASK));
2335                 } else {
2336                         netif_rx(skb);
2337                 }
2338 #endif /* CONFIG_E1000_NAPI */
2339                 netdev->last_rx = jiffies;
2340
2341                 rx_desc->status = 0;
2342                 buffer_info->skb = NULL;
2343
2344                 if(++i == rx_ring->count) i = 0;
2345
2346                 rx_desc = E1000_RX_DESC(*rx_ring, i);
2347         }
2348
2349         rx_ring->next_to_clean = i;
2350
2351         e1000_alloc_rx_buffers(adapter);
2352
2353         return cleaned;
2354 }
2355
2356 /**
2357  * e1000_alloc_rx_buffers - Replace used receive buffers
2358  * @adapter: address of board private structure
2359  **/
2360
2361 static void
2362 e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2363 {
2364         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2365         struct net_device *netdev = adapter->netdev;
2366         struct pci_dev *pdev = adapter->pdev;
2367         struct e1000_rx_desc *rx_desc;
2368         struct e1000_buffer *buffer_info;
2369         struct sk_buff *skb;
2370         int reserve_len = 2;
2371         unsigned int i;
2372
2373         i = rx_ring->next_to_use;
2374         buffer_info = &rx_ring->buffer_info[i];
2375
2376         while(!buffer_info->skb) {
2377                 rx_desc = E1000_RX_DESC(*rx_ring, i);
2378
2379                 skb = dev_alloc_skb(adapter->rx_buffer_len + reserve_len);
2380
2381                 if(!skb) {
2382                         /* Better luck next round */
2383                         break;
2384                 }
2385
2386                 /* Make buffer alignment 2 beyond a 16 byte boundary
2387                  * this will result in a 16 byte aligned IP header after
2388                  * the 14 byte MAC header is removed
2389                  */
2390                 skb_reserve(skb, reserve_len);
2391
2392                 skb->dev = netdev;
2393
2394                 buffer_info->skb = skb;
2395                 buffer_info->length = adapter->rx_buffer_len;
2396                 buffer_info->dma =
2397                         pci_map_single(pdev,
2398                                        skb->data,
2399                                        adapter->rx_buffer_len,
2400                                        PCI_DMA_FROMDEVICE);
2401
2402                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2403
2404                 if((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i) {
2405                         /* Force memory writes to complete before letting h/w
2406                          * know there are new descriptors to fetch.  (Only
2407                          * applicable for weak-ordered memory model archs,
2408                          * such as IA-64). */
2409                         wmb();
2410
2411                         E1000_WRITE_REG(&adapter->hw, RDT, i);
2412                 }
2413
2414                 if(++i == rx_ring->count) i = 0;
2415                 buffer_info = &rx_ring->buffer_info[i];
2416         }
2417
2418         rx_ring->next_to_use = i;
2419 }
2420
2421 /**
2422  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
2423  * @adapter:
2424  **/
2425
2426 static void
2427 e1000_smartspeed(struct e1000_adapter *adapter)
2428 {
2429         uint16_t phy_status;
2430         uint16_t phy_ctrl;
2431
2432         if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
2433            !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2434                 return;
2435
2436         if(adapter->smartspeed == 0) {
2437                 /* If Master/Slave config fault is asserted twice,
2438                  * we assume back-to-back */
2439                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2440                 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2441                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2442                 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2443                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2444                 if(phy_ctrl & CR_1000T_MS_ENABLE) {
2445                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
2446                         e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2447                                             phy_ctrl);
2448                         adapter->smartspeed++;
2449                         if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2450                            !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
2451                                                &phy_ctrl)) {
2452                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2453                                              MII_CR_RESTART_AUTO_NEG);
2454                                 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
2455                                                     phy_ctrl);
2456                         }
2457                 }
2458                 return;
2459         } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
2460                 /* If still no link, perhaps using 2/3 pair cable */
2461                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2462                 phy_ctrl |= CR_1000T_MS_ENABLE;
2463                 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
2464                 if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2465                    !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
2466                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2467                                      MII_CR_RESTART_AUTO_NEG);
2468                         e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
2469                 }
2470         }
2471         /* Restart process after E1000_SMARTSPEED_MAX iterations */
2472         if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
2473                 adapter->smartspeed = 0;
2474 }
2475
2476 /**
2477  * e1000_ioctl -
2478  * @netdev:
2479  * @ifreq:
2480  * @cmd:
2481  **/
2482
2483 static int
2484 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2485 {
2486         switch (cmd) {
2487         case SIOCGMIIPHY:
2488         case SIOCGMIIREG:
2489         case SIOCSMIIREG:
2490                 return e1000_mii_ioctl(netdev, ifr, cmd);
2491         default:
2492                 return -EOPNOTSUPP;
2493         }
2494 }
2495
2496 /**
2497  * e1000_mii_ioctl -
2498  * @netdev:
2499  * @ifreq:
2500  * @cmd:
2501  **/
2502
2503 static int
2504 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2505 {
2506         struct e1000_adapter *adapter = netdev->priv;
2507         struct mii_ioctl_data *data = if_mii(ifr);
2508         int retval;
2509         uint16_t mii_reg;
2510         uint16_t spddplx;
2511
2512         if(adapter->hw.media_type != e1000_media_type_copper)
2513                 return -EOPNOTSUPP;
2514
2515         switch (cmd) {
2516         case SIOCGMIIPHY:
2517                 data->phy_id = adapter->hw.phy_addr;
2518                 break;
2519         case SIOCGMIIREG:
2520                 if (!capable(CAP_NET_ADMIN))
2521                         return -EPERM;
2522                 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
2523                                    &data->val_out))
2524                         return -EIO;
2525                 break;
2526         case SIOCSMIIREG:
2527                 if (!capable(CAP_NET_ADMIN))
2528                         return -EPERM;
2529                 if (data->reg_num & ~(0x1F))
2530                         return -EFAULT;
2531                 mii_reg = data->val_in;
2532                 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
2533                                         data->val_in))
2534                         return -EIO;
2535                 if (adapter->hw.phy_type == e1000_phy_m88) {
2536                         switch (data->reg_num) {
2537                         case PHY_CTRL:
2538                                 if(data->val_in & MII_CR_AUTO_NEG_EN) {
2539                                         adapter->hw.autoneg = 1;
2540                                         adapter->hw.autoneg_advertised = 0x2F;
2541                                 } else {
2542                                         if (data->val_in & 0x40)
2543                                                 spddplx = SPEED_1000;
2544                                         else if (data->val_in & 0x2000)
2545                                                 spddplx = SPEED_100;
2546                                         else
2547                                                 spddplx = SPEED_10;
2548                                         spddplx += (data->val_in & 0x100)
2549                                                    ? FULL_DUPLEX :
2550                                                    HALF_DUPLEX;
2551                                         retval = e1000_set_spd_dplx(adapter,
2552                                                                     spddplx);
2553                                         if(retval)
2554                                                 return retval;
2555                                 }
2556                                 if(netif_running(adapter->netdev)) {
2557                                         e1000_down(adapter);
2558                                         e1000_up(adapter);
2559                                 } else
2560                                         e1000_reset(adapter);
2561                                 break;
2562                         case M88E1000_PHY_SPEC_CTRL:
2563                         case M88E1000_EXT_PHY_SPEC_CTRL:
2564                                 if (e1000_phy_reset(&adapter->hw))
2565                                         return -EIO;
2566                                 break;
2567                         }
2568                 }
2569                 break;
2570         default:
2571                 return -EOPNOTSUPP;
2572         }
2573         return E1000_SUCCESS;
2574 }
2575
2576 /**
2577  * e1000_rx_checksum - Receive Checksum Offload for 82543
2578  * @adapter: board private structure
2579  * @rx_desc: receive descriptor
2580  * @sk_buff: socket buffer with received data
2581  **/
2582
2583 static inline void
2584 e1000_rx_checksum(struct e1000_adapter *adapter,
2585                   struct e1000_rx_desc *rx_desc,
2586                   struct sk_buff *skb)
2587 {
2588         /* 82543 or newer only */
2589         if((adapter->hw.mac_type < e1000_82543) ||
2590         /* Ignore Checksum bit is set */
2591         (rx_desc->status & E1000_RXD_STAT_IXSM) ||
2592         /* TCP Checksum has not been calculated */
2593         (!(rx_desc->status & E1000_RXD_STAT_TCPCS))) {
2594                 skb->ip_summed = CHECKSUM_NONE;
2595                 return;
2596         }
2597
2598         /* At this point we know the hardware did the TCP checksum */
2599         /* now look at the TCP checksum error bit */
2600         if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
2601                 /* let the stack verify checksum errors */
2602                 skb->ip_summed = CHECKSUM_NONE;
2603                 adapter->hw_csum_err++;
2604         } else {
2605         /* TCP checksum is good */
2606                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2607                 adapter->hw_csum_good++;
2608         }
2609 }
2610
2611 void
2612 e1000_pci_set_mwi(struct e1000_hw *hw)
2613 {
2614         struct e1000_adapter *adapter = hw->back;
2615
2616         pci_set_mwi(adapter->pdev);
2617 }
2618
2619 void
2620 e1000_pci_clear_mwi(struct e1000_hw *hw)
2621 {
2622         struct e1000_adapter *adapter = hw->back;
2623
2624         pci_clear_mwi(adapter->pdev);
2625 }
2626
2627 void
2628 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2629 {
2630         struct e1000_adapter *adapter = hw->back;
2631
2632         pci_read_config_word(adapter->pdev, reg, value);
2633 }
2634
2635 void
2636 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2637 {
2638         struct e1000_adapter *adapter = hw->back;
2639
2640         pci_write_config_word(adapter->pdev, reg, *value);
2641 }
2642
2643 uint32_t
2644 e1000_io_read(struct e1000_hw *hw, unsigned long port)
2645 {
2646         return inl(port);
2647 }
2648
2649 void
2650 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
2651 {
2652         outl(value, port);
2653 }
2654
2655 static void
2656 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2657 {
2658         struct e1000_adapter *adapter = netdev->priv;
2659         uint32_t ctrl, rctl;
2660
2661         e1000_irq_disable(adapter);
2662         adapter->vlgrp = grp;
2663
2664         if(grp) {
2665                 /* enable VLAN tag insert/strip */
2666
2667                 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2668                 ctrl |= E1000_CTRL_VME;
2669                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2670
2671                 /* enable VLAN receive filtering */
2672
2673                 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2674                 rctl |= E1000_RCTL_VFE;
2675                 rctl &= ~E1000_RCTL_CFIEN;
2676                 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2677         } else {
2678                 /* disable VLAN tag insert/strip */
2679
2680                 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2681                 ctrl &= ~E1000_CTRL_VME;
2682                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2683
2684                 /* disable VLAN filtering */
2685
2686                 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2687                 rctl &= ~E1000_RCTL_VFE;
2688                 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2689         }
2690
2691         e1000_irq_enable(adapter);
2692 }
2693
2694 static void
2695 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2696 {
2697         struct e1000_adapter *adapter = netdev->priv;
2698         uint32_t vfta, index;
2699
2700         /* add VID to filter table */
2701
2702         index = (vid >> 5) & 0x7F;
2703         vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2704         vfta |= (1 << (vid & 0x1F));
2705         e1000_write_vfta(&adapter->hw, index, vfta);
2706 }
2707
2708 static void
2709 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2710 {
2711         struct e1000_adapter *adapter = netdev->priv;
2712         uint32_t vfta, index;
2713
2714         e1000_irq_disable(adapter);
2715
2716         if(adapter->vlgrp)
2717                 adapter->vlgrp->vlan_devices[vid] = NULL;
2718
2719         e1000_irq_enable(adapter);
2720
2721         /* remove VID from filter table*/
2722
2723         index = (vid >> 5) & 0x7F;
2724         vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2725         vfta &= ~(1 << (vid & 0x1F));
2726         e1000_write_vfta(&adapter->hw, index, vfta);
2727 }
2728
2729 static void
2730 e1000_restore_vlan(struct e1000_adapter *adapter)
2731 {
2732         e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2733
2734         if(adapter->vlgrp) {
2735                 uint16_t vid;
2736                 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2737                         if(!adapter->vlgrp->vlan_devices[vid])
2738                                 continue;
2739                         e1000_vlan_rx_add_vid(adapter->netdev, vid);
2740                 }
2741         }
2742 }
2743
2744 int
2745 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
2746 {
2747         adapter->hw.autoneg = 0;
2748
2749         switch(spddplx) {
2750         case SPEED_10 + DUPLEX_HALF:
2751                 adapter->hw.forced_speed_duplex = e1000_10_half;
2752                 break;
2753         case SPEED_10 + DUPLEX_FULL:
2754                 adapter->hw.forced_speed_duplex = e1000_10_full;
2755                 break;
2756         case SPEED_100 + DUPLEX_HALF:
2757                 adapter->hw.forced_speed_duplex = e1000_100_half;
2758                 break;
2759         case SPEED_100 + DUPLEX_FULL:
2760                 adapter->hw.forced_speed_duplex = e1000_100_full;
2761                 break;
2762         case SPEED_1000 + DUPLEX_FULL:
2763                 adapter->hw.autoneg = 1;
2764                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
2765                 break;
2766         case SPEED_1000 + DUPLEX_HALF: /* not supported */
2767         default:
2768                 return -EINVAL;
2769         }
2770         return 0;
2771 }
2772
2773 static int
2774 e1000_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2775 {
2776         struct pci_dev *pdev = NULL;
2777
2778         switch(event) {
2779         case SYS_DOWN:
2780         case SYS_HALT:
2781         case SYS_POWER_OFF:
2782                 while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
2783                         if(pci_dev_driver(pdev) == &e1000_driver)
2784                                 e1000_suspend(pdev, 3);
2785                 }
2786         }
2787         return NOTIFY_DONE;
2788 }
2789
2790 static int
2791 e1000_suspend(struct pci_dev *pdev, uint32_t state)
2792 {
2793         struct net_device *netdev = pci_get_drvdata(pdev);
2794         struct e1000_adapter *adapter = netdev->priv;
2795         uint32_t ctrl, ctrl_ext, rctl, manc, status;
2796         uint32_t wufc = adapter->wol;
2797
2798         netif_device_detach(netdev);
2799
2800         if(netif_running(netdev))
2801                 e1000_down(adapter);
2802
2803         status = E1000_READ_REG(&adapter->hw, STATUS);
2804         if(status & E1000_STATUS_LU)
2805                 wufc &= ~E1000_WUFC_LNKC;
2806
2807         if(wufc) {
2808                 e1000_setup_rctl(adapter);
2809                 e1000_set_multi(netdev);
2810
2811                 /* turn on all-multi mode if wake on multicast is enabled */
2812                 if(adapter->wol & E1000_WUFC_MC) {
2813                         rctl = E1000_READ_REG(&adapter->hw, RCTL);
2814                         rctl |= E1000_RCTL_MPE;
2815                         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2816                 }
2817
2818                 if(adapter->hw.mac_type >= e1000_82540) {
2819                         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2820                         /* advertise wake from D3Cold */
2821                         #define E1000_CTRL_ADVD3WUC 0x00100000
2822                         /* phy power management enable */
2823                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
2824                         ctrl |= E1000_CTRL_ADVD3WUC |
2825                                 E1000_CTRL_EN_PHY_PWR_MGMT;
2826                         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2827                 }
2828
2829                 if(adapter->hw.media_type == e1000_media_type_fiber ||
2830                    adapter->hw.media_type == e1000_media_type_internal_serdes) {
2831                         /* keep the laser running in D3 */
2832                         ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
2833                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
2834                         E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
2835                 }
2836
2837                 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
2838                 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
2839                 pci_enable_wake(pdev, 3, 1);
2840                 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2841         } else {
2842                 E1000_WRITE_REG(&adapter->hw, WUC, 0);
2843                 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
2844                 pci_enable_wake(pdev, 3, 0);
2845                 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2846         }
2847
2848         pci_save_state(pdev, adapter->pci_state);
2849
2850         if(adapter->hw.mac_type >= e1000_82540 &&
2851            adapter->hw.media_type == e1000_media_type_copper) {
2852                 manc = E1000_READ_REG(&adapter->hw, MANC);
2853                 if(manc & E1000_MANC_SMBUS_EN) {
2854                         manc |= E1000_MANC_ARP_EN;
2855                         E1000_WRITE_REG(&adapter->hw, MANC, manc);
2856                         pci_enable_wake(pdev, 3, 1);
2857                         pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2858                 }
2859         }
2860
2861         state = (state > 0) ? 3 : 0;
2862         pci_set_power_state(pdev, state);
2863
2864         return 0;
2865 }
2866
2867 #ifdef CONFIG_PM
2868 static int
2869 e1000_resume(struct pci_dev *pdev)
2870 {
2871         struct net_device *netdev = pci_get_drvdata(pdev);
2872         struct e1000_adapter *adapter = netdev->priv;
2873         uint32_t manc;
2874
2875         pci_set_power_state(pdev, 0);
2876         pci_restore_state(pdev, adapter->pci_state);
2877
2878         pci_enable_wake(pdev, 3, 0);
2879         pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2880
2881         e1000_reset(adapter);
2882         E1000_WRITE_REG(&adapter->hw, WUS, ~0);
2883
2884         if(netif_running(netdev))
2885                 e1000_up(adapter);
2886
2887         netif_device_attach(netdev);
2888
2889         if(adapter->hw.mac_type >= e1000_82540 &&
2890            adapter->hw.media_type == e1000_media_type_copper) {
2891                 manc = E1000_READ_REG(&adapter->hw, MANC);
2892                 manc &= ~(E1000_MANC_ARP_EN);
2893                 E1000_WRITE_REG(&adapter->hw, MANC, manc);
2894         }
2895
2896         return 0;
2897 }
2898 #endif
2899
2900 #ifdef CONFIG_NET_POLL_CONTROLLER
2901 /*
2902  * Polling 'interrupt' - used by things like netconsole to send skbs
2903  * without having to re-enable interrupts. It's not called while
2904  * the interrupt routine is executing.
2905  */
2906
2907 static void e1000_netpoll (struct net_device *dev)
2908 {
2909         struct e1000_adapter *adapter = dev->priv;
2910         disable_irq(adapter->pdev->irq);
2911         e1000_intr (adapter->pdev->irq, dev, NULL);
2912         enable_irq(adapter->pdev->irq);
2913 }
2914 #endif
2915
2916 /* e1000_main.c */