vserver 1.9.5.x5
[linux-2.6.git] / drivers / net / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   
4   Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
5   
6   This program is free software; you can redistribute it and/or modify it 
7   under the terms of the GNU General Public License as published by the Free 
8   Software Foundation; either version 2 of the License, or (at your option) 
9   any later version.
10   
11   This program is distributed in the hope that it will be useful, but WITHOUT 
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
14   more details.
15   
16   You should have received a copy of the GNU General Public License along with
17   this program; if not, write to the Free Software Foundation, Inc., 59 
18   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
19   
20   The full GNU General Public License is included in this distribution in the
21   file called LICENSE.
22   
23   Contact Information:
24   Linux NICS <linux.nics@intel.com>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30
31 /* Change Log
32  * 5.3.12       6/7/04
33  * - kcompat NETIF_MSG for older kernels (2.4.9) <sean.p.mcdermott@intel.com>
34  * - if_mii support and associated kcompat for older kernels
35  * - More errlogging support from Jon Mason <jonmason@us.ibm.com>
36  * - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com>
37  *
38  * 5.6.5        11/01/04
39  * - Enabling NETIF_F_SG without checksum offload is illegal - 
40      John Mason <jdmason@us.ibm.com>
41  * 5.6.3        10/26/04
42  * - Remove redundant initialization - Jamal Hadi
43  * - Reset buffer_info->dma in tx resource cleanup logic
44  * 5.6.2        10/12/04
45  * - Avoid filling tx_ring completely - shemminger@osdl.org
46  * - Replace schedule_timeout() with msleep()/msleep_interruptible() -
47  *   nacc@us.ibm.com
48  * - Sparse cleanup - shemminger@osdl.org
49  * - Fix tx resource cleanup logic
50  * - LLTX support - ak@suse.de and hadi@cyberus.ca
51  */
52
53 char e1000_driver_name[] = "e1000";
54 char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
55 #ifndef CONFIG_E1000_NAPI
56 #define DRIVERNAPI
57 #else
58 #define DRIVERNAPI "-NAPI"
59 #endif
60 char e1000_driver_version[] = "5.6.10.1-k2"DRIVERNAPI;
61 char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
62
63 /* e1000_pci_tbl - PCI Device ID Table
64  *
65  * Last entry must be all 0s
66  *
67  * Macro expands to...
68  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
69  */
70 static struct pci_device_id e1000_pci_tbl[] = {
71         INTEL_E1000_ETHERNET_DEVICE(0x1000),
72         INTEL_E1000_ETHERNET_DEVICE(0x1001),
73         INTEL_E1000_ETHERNET_DEVICE(0x1004),
74         INTEL_E1000_ETHERNET_DEVICE(0x1008),
75         INTEL_E1000_ETHERNET_DEVICE(0x1009),
76         INTEL_E1000_ETHERNET_DEVICE(0x100C),
77         INTEL_E1000_ETHERNET_DEVICE(0x100D),
78         INTEL_E1000_ETHERNET_DEVICE(0x100E),
79         INTEL_E1000_ETHERNET_DEVICE(0x100F),
80         INTEL_E1000_ETHERNET_DEVICE(0x1010),
81         INTEL_E1000_ETHERNET_DEVICE(0x1011),
82         INTEL_E1000_ETHERNET_DEVICE(0x1012),
83         INTEL_E1000_ETHERNET_DEVICE(0x1013),
84         INTEL_E1000_ETHERNET_DEVICE(0x1015),
85         INTEL_E1000_ETHERNET_DEVICE(0x1016),
86         INTEL_E1000_ETHERNET_DEVICE(0x1017),
87         INTEL_E1000_ETHERNET_DEVICE(0x1018),
88         INTEL_E1000_ETHERNET_DEVICE(0x1019),
89         INTEL_E1000_ETHERNET_DEVICE(0x101D),
90         INTEL_E1000_ETHERNET_DEVICE(0x101E),
91         INTEL_E1000_ETHERNET_DEVICE(0x1026),
92         INTEL_E1000_ETHERNET_DEVICE(0x1027),
93         INTEL_E1000_ETHERNET_DEVICE(0x1028),
94         INTEL_E1000_ETHERNET_DEVICE(0x1075),
95         INTEL_E1000_ETHERNET_DEVICE(0x1076),
96         INTEL_E1000_ETHERNET_DEVICE(0x1077),
97         INTEL_E1000_ETHERNET_DEVICE(0x1078),
98         INTEL_E1000_ETHERNET_DEVICE(0x1079),
99         INTEL_E1000_ETHERNET_DEVICE(0x107A),
100         INTEL_E1000_ETHERNET_DEVICE(0x107B),
101         INTEL_E1000_ETHERNET_DEVICE(0x107C),
102         INTEL_E1000_ETHERNET_DEVICE(0x108A),
103         /* required last entry */
104         {0,}
105 };
106
107 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
108
109 int e1000_up(struct e1000_adapter *adapter);
110 void e1000_down(struct e1000_adapter *adapter);
111 void e1000_reset(struct e1000_adapter *adapter);
112 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
113 int e1000_setup_tx_resources(struct e1000_adapter *adapter);
114 int e1000_setup_rx_resources(struct e1000_adapter *adapter);
115 void e1000_free_tx_resources(struct e1000_adapter *adapter);
116 void e1000_free_rx_resources(struct e1000_adapter *adapter);
117 void e1000_update_stats(struct e1000_adapter *adapter);
118
119 /* Local Function Prototypes */
120
121 static int e1000_init_module(void);
122 static void e1000_exit_module(void);
123 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
124 static void __devexit e1000_remove(struct pci_dev *pdev);
125 static int e1000_sw_init(struct e1000_adapter *adapter);
126 static int e1000_open(struct net_device *netdev);
127 static int e1000_close(struct net_device *netdev);
128 static void e1000_configure_tx(struct e1000_adapter *adapter);
129 static void e1000_configure_rx(struct e1000_adapter *adapter);
130 static void e1000_setup_rctl(struct e1000_adapter *adapter);
131 static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
132 static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
133 static void e1000_set_multi(struct net_device *netdev);
134 static void e1000_update_phy_info(unsigned long data);
135 static void e1000_watchdog(unsigned long data);
136 static void e1000_82547_tx_fifo_stall(unsigned long data);
137 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
138 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
139 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
140 static int e1000_set_mac(struct net_device *netdev, void *p);
141 static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
142 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
143 #ifdef CONFIG_E1000_NAPI
144 static int e1000_clean(struct net_device *netdev, int *budget);
145 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
146                                     int *work_done, int work_to_do);
147 #else
148 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
149 #endif
150 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
151 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
152 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
153                            int cmd);
154 void e1000_set_ethtool_ops(struct net_device *netdev);
155 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
156 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_tx_timeout(struct net_device *dev);
158 static void e1000_tx_timeout_task(struct net_device *dev);
159 static void e1000_smartspeed(struct e1000_adapter *adapter);
160 static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
161                                               struct sk_buff *skb);
162
163 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
164 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
165 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
166 static void e1000_restore_vlan(struct e1000_adapter *adapter);
167
168 static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
169 static int e1000_suspend(struct pci_dev *pdev, uint32_t state);
170 #ifdef CONFIG_PM
171 static int e1000_resume(struct pci_dev *pdev);
172 #endif
173
174 #ifdef CONFIG_NET_POLL_CONTROLLER
175 /* for netdump / net console */
176 static void e1000_netpoll (struct net_device *netdev);
177 #endif
178
179 struct notifier_block e1000_notifier_reboot = {
180         .notifier_call  = e1000_notify_reboot,
181         .next           = NULL,
182         .priority       = 0
183 };
184
185 /* Exported from other modules */
186
187 extern void e1000_check_options(struct e1000_adapter *adapter);
188
189 static struct pci_driver e1000_driver = {
190         .name     = e1000_driver_name,
191         .id_table = e1000_pci_tbl,
192         .probe    = e1000_probe,
193         .remove   = __devexit_p(e1000_remove),
194         /* Power Managment Hooks */
195 #ifdef CONFIG_PM
196         .suspend  = e1000_suspend,
197         .resume   = e1000_resume
198 #endif
199 };
200
201 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
202 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
203 MODULE_LICENSE("GPL");
204
205 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
206 module_param(debug, int, 0);
207 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
208
209 /**
210  * e1000_init_module - Driver Registration Routine
211  *
212  * e1000_init_module is the first routine called when the driver is
213  * loaded. All it does is register with the PCI subsystem.
214  **/
215
216 static int __init
217 e1000_init_module(void)
218 {
219         int ret;
220         printk(KERN_INFO "%s - version %s\n",
221                e1000_driver_string, e1000_driver_version);
222
223         printk(KERN_INFO "%s\n", e1000_copyright);
224
225         ret = pci_module_init(&e1000_driver);
226         if(ret >= 0) {
227                 register_reboot_notifier(&e1000_notifier_reboot);
228         }
229         return ret;
230 }
231
232 module_init(e1000_init_module);
233
234 /**
235  * e1000_exit_module - Driver Exit Cleanup Routine
236  *
237  * e1000_exit_module is called just before the driver is removed
238  * from memory.
239  **/
240
241 static void __exit
242 e1000_exit_module(void)
243 {
244         unregister_reboot_notifier(&e1000_notifier_reboot);
245         pci_unregister_driver(&e1000_driver);
246 }
247
248 module_exit(e1000_exit_module);
249
250 /**
251  * e1000_irq_disable - Mask off interrupt generation on the NIC
252  * @adapter: board private structure
253  **/
254
255 static inline void
256 e1000_irq_disable(struct e1000_adapter *adapter)
257 {
258         atomic_inc(&adapter->irq_sem);
259         E1000_WRITE_REG(&adapter->hw, IMC, ~0);
260         E1000_WRITE_FLUSH(&adapter->hw);
261         synchronize_irq(adapter->pdev->irq);
262 }
263
264 /**
265  * e1000_irq_enable - Enable default interrupt generation settings
266  * @adapter: board private structure
267  **/
268
269 static inline void
270 e1000_irq_enable(struct e1000_adapter *adapter)
271 {
272         if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
273                 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
274                 E1000_WRITE_FLUSH(&adapter->hw);
275         }
276 }
277
278 int
279 e1000_up(struct e1000_adapter *adapter)
280 {
281         struct net_device *netdev = adapter->netdev;
282         int err;
283
284         /* hardware has been reset, we need to reload some things */
285
286         /* Reset the PHY if it was previously powered down */
287         if(adapter->hw.media_type == e1000_media_type_copper) {
288                 uint16_t mii_reg;
289                 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
290                 if(mii_reg & MII_CR_POWER_DOWN)
291                         e1000_phy_reset(&adapter->hw);
292         }
293
294         e1000_set_multi(netdev);
295
296         e1000_restore_vlan(adapter);
297
298         e1000_configure_tx(adapter);
299         e1000_setup_rctl(adapter);
300         e1000_configure_rx(adapter);
301         e1000_alloc_rx_buffers(adapter);
302
303         if((err = request_irq(adapter->pdev->irq, &e1000_intr,
304                               SA_SHIRQ | SA_SAMPLE_RANDOM,
305                               netdev->name, netdev)))
306                 return err;
307
308         mod_timer(&adapter->watchdog_timer, jiffies);
309         e1000_irq_enable(adapter);
310
311         return 0;
312 }
313
314 void
315 e1000_down(struct e1000_adapter *adapter)
316 {
317         struct net_device *netdev = adapter->netdev;
318
319         e1000_irq_disable(adapter);
320         free_irq(adapter->pdev->irq, netdev);
321         del_timer_sync(&adapter->tx_fifo_stall_timer);
322         del_timer_sync(&adapter->watchdog_timer);
323         del_timer_sync(&adapter->phy_info_timer);
324         adapter->link_speed = 0;
325         adapter->link_duplex = 0;
326         netif_carrier_off(netdev);
327         netif_stop_queue(netdev);
328
329         e1000_reset(adapter);
330         e1000_clean_tx_ring(adapter);
331         e1000_clean_rx_ring(adapter);
332
333         /* If WoL is not enabled
334          * Power down the PHY so no link is implied when interface is down */
335         if(!adapter->wol && adapter->hw.media_type == e1000_media_type_copper) {
336                 uint16_t mii_reg;
337                 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
338                 mii_reg |= MII_CR_POWER_DOWN;
339                 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
340         }
341 }
342
343 void
344 e1000_reset(struct e1000_adapter *adapter)
345 {
346         uint32_t pba;
347
348         /* Repartition Pba for greater than 9k mtu
349          * To take effect CTRL.RST is required.
350          */
351
352         if(adapter->hw.mac_type < e1000_82547) {
353                 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
354                         pba = E1000_PBA_40K;
355                 else
356                         pba = E1000_PBA_48K;
357         } else {
358                 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
359                         pba = E1000_PBA_22K;
360                 else
361                         pba = E1000_PBA_30K;
362                 adapter->tx_fifo_head = 0;
363                 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
364                 adapter->tx_fifo_size =
365                         (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
366                 atomic_set(&adapter->tx_fifo_stall, 0);
367         }
368         E1000_WRITE_REG(&adapter->hw, PBA, pba);
369
370         /* flow control settings */
371         adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
372                                     E1000_FC_HIGH_DIFF;
373         adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
374                                    E1000_FC_LOW_DIFF;
375         adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
376         adapter->hw.fc_send_xon = 1;
377         adapter->hw.fc = adapter->hw.original_fc;
378
379         e1000_reset_hw(&adapter->hw);
380         if(adapter->hw.mac_type >= e1000_82544)
381                 E1000_WRITE_REG(&adapter->hw, WUC, 0);
382         if(e1000_init_hw(&adapter->hw))
383                 DPRINTK(PROBE, ERR, "Hardware Error\n");
384
385         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
386         E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
387
388         e1000_reset_adaptive(&adapter->hw);
389         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
390 }
391
392 /**
393  * e1000_probe - Device Initialization Routine
394  * @pdev: PCI device information struct
395  * @ent: entry in e1000_pci_tbl
396  *
397  * Returns 0 on success, negative on failure
398  *
399  * e1000_probe initializes an adapter identified by a pci_dev structure.
400  * The OS initialization, configuring of the adapter private structure,
401  * and a hardware reset occur.
402  **/
403
404 static int __devinit
405 e1000_probe(struct pci_dev *pdev,
406             const struct pci_device_id *ent)
407 {
408         struct net_device *netdev;
409         struct e1000_adapter *adapter;
410         static int cards_found = 0;
411         unsigned long mmio_start;
412         int mmio_len;
413         int pci_using_dac;
414         int i;
415         int err;
416         uint16_t eeprom_data;
417
418         if((err = pci_enable_device(pdev)))
419                 return err;
420
421         if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
422                 pci_using_dac = 1;
423         } else {
424                 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
425                         E1000_ERR("No usable DMA configuration, aborting\n");
426                         return err;
427                 }
428                 pci_using_dac = 0;
429         }
430
431         if((err = pci_request_regions(pdev, e1000_driver_name)))
432                 return err;
433
434         pci_set_master(pdev);
435
436         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
437         if(!netdev) {
438                 err = -ENOMEM;
439                 goto err_alloc_etherdev;
440         }
441
442         SET_MODULE_OWNER(netdev);
443         SET_NETDEV_DEV(netdev, &pdev->dev);
444
445         pci_set_drvdata(pdev, netdev);
446         adapter = netdev->priv;
447         adapter->netdev = netdev;
448         adapter->pdev = pdev;
449         adapter->hw.back = adapter;
450         adapter->msg_enable = (1 << debug) - 1;
451
452         mmio_start = pci_resource_start(pdev, BAR_0);
453         mmio_len = pci_resource_len(pdev, BAR_0);
454
455         adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
456         if(!adapter->hw.hw_addr) {
457                 err = -EIO;
458                 goto err_ioremap;
459         }
460
461         for(i = BAR_1; i <= BAR_5; i++) {
462                 if(pci_resource_len(pdev, i) == 0)
463                         continue;
464                 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
465                         adapter->hw.io_base = pci_resource_start(pdev, i);
466                         break;
467                 }
468         }
469
470         netdev->open = &e1000_open;
471         netdev->stop = &e1000_close;
472         netdev->hard_start_xmit = &e1000_xmit_frame;
473         netdev->get_stats = &e1000_get_stats;
474         netdev->set_multicast_list = &e1000_set_multi;
475         netdev->set_mac_address = &e1000_set_mac;
476         netdev->change_mtu = &e1000_change_mtu;
477         netdev->do_ioctl = &e1000_ioctl;
478         e1000_set_ethtool_ops(netdev);
479         netdev->tx_timeout = &e1000_tx_timeout;
480         netdev->watchdog_timeo = 5 * HZ;
481 #ifdef CONFIG_E1000_NAPI
482         netdev->poll = &e1000_clean;
483         netdev->weight = 64;
484 #endif
485         netdev->vlan_rx_register = e1000_vlan_rx_register;
486         netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
487         netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
488 #ifdef CONFIG_NET_POLL_CONTROLLER
489         netdev->poll_controller = e1000_netpoll;
490 #endif
491         strcpy(netdev->name, pci_name(pdev));
492
493         netdev->mem_start = mmio_start;
494         netdev->mem_end = mmio_start + mmio_len;
495         netdev->base_addr = adapter->hw.io_base;
496
497         adapter->bd_number = cards_found;
498
499         /* setup the private structure */
500
501         if((err = e1000_sw_init(adapter)))
502                 goto err_sw_init;
503
504         if(adapter->hw.mac_type >= e1000_82543) {
505                 netdev->features = NETIF_F_SG |
506                                    NETIF_F_HW_CSUM |
507                                    NETIF_F_HW_VLAN_TX |
508                                    NETIF_F_HW_VLAN_RX |
509                                    NETIF_F_HW_VLAN_FILTER;
510         }
511
512 #ifdef NETIF_F_TSO
513         /* Disbaled for now until root-cause is found for
514          * hangs reported against non-IA archs.  TSO can be
515          * enabled using ethtool -K eth<x> tso on */
516         if((adapter->hw.mac_type >= e1000_82544) &&
517            (adapter->hw.mac_type != e1000_82547))
518                 netdev->features |= NETIF_F_TSO;
519 #endif
520         if(pci_using_dac)
521                 netdev->features |= NETIF_F_HIGHDMA;
522
523         /* hard_start_xmit is safe against parallel locking */
524         netdev->features |= NETIF_F_LLTX; 
525  
526         /* before reading the EEPROM, reset the controller to 
527          * put the device in a known good starting state */
528         
529         e1000_reset_hw(&adapter->hw);
530
531         /* make sure the EEPROM is good */
532
533         if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
534                 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
535                 err = -EIO;
536                 goto err_eeprom;
537         }
538
539         /* copy the MAC address out of the EEPROM */
540
541         if (e1000_read_mac_addr(&adapter->hw))
542                 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
543         memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
544
545         if(!is_valid_ether_addr(netdev->dev_addr)) {
546                 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
547                 err = -EIO;
548                 goto err_eeprom;
549         }
550
551         e1000_read_part_num(&adapter->hw, &(adapter->part_num));
552
553         e1000_get_bus_info(&adapter->hw);
554
555         init_timer(&adapter->tx_fifo_stall_timer);
556         adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
557         adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
558
559         init_timer(&adapter->watchdog_timer);
560         adapter->watchdog_timer.function = &e1000_watchdog;
561         adapter->watchdog_timer.data = (unsigned long) adapter;
562
563         init_timer(&adapter->phy_info_timer);
564         adapter->phy_info_timer.function = &e1000_update_phy_info;
565         adapter->phy_info_timer.data = (unsigned long) adapter;
566
567         INIT_WORK(&adapter->tx_timeout_task,
568                 (void (*)(void *))e1000_tx_timeout_task, netdev);
569
570         /* we're going to reset, so assume we have no link for now */
571
572         netif_carrier_off(netdev);
573         netif_stop_queue(netdev);
574
575         e1000_check_options(adapter);
576
577         /* Initial Wake on LAN setting
578          * If APM wake is enabled in the EEPROM,
579          * enable the ACPI Magic Packet filter
580          */
581
582         switch(adapter->hw.mac_type) {
583         case e1000_82542_rev2_0:
584         case e1000_82542_rev2_1:
585         case e1000_82543:
586                 break;
587         case e1000_82546:
588         case e1000_82546_rev_3:
589                 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
590                    && (adapter->hw.media_type == e1000_media_type_copper)) {
591                         e1000_read_eeprom(&adapter->hw,
592                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
593                         break;
594                 }
595                 /* Fall Through */
596         default:
597                 e1000_read_eeprom(&adapter->hw,
598                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
599                 break;
600         }
601         if(eeprom_data & E1000_EEPROM_APME)
602                 adapter->wol |= E1000_WUFC_MAG;
603
604         /* reset the hardware with the new settings */
605         e1000_reset(adapter);
606
607         strcpy(netdev->name, "eth%d");
608         if((err = register_netdev(netdev)))
609                 goto err_register;
610
611         DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
612
613         cards_found++;
614         return 0;
615
616 err_register:
617 err_sw_init:
618 err_eeprom:
619         iounmap(adapter->hw.hw_addr);
620 err_ioremap:
621         free_netdev(netdev);
622 err_alloc_etherdev:
623         pci_release_regions(pdev);
624         return err;
625 }
626
627 /**
628  * e1000_remove - Device Removal Routine
629  * @pdev: PCI device information struct
630  *
631  * e1000_remove is called by the PCI subsystem to alert the driver
632  * that it should release a PCI device.  The could be caused by a
633  * Hot-Plug event, or because the driver is going to be removed from
634  * memory.
635  **/
636
637 static void __devexit
638 e1000_remove(struct pci_dev *pdev)
639 {
640         struct net_device *netdev = pci_get_drvdata(pdev);
641         struct e1000_adapter *adapter = netdev->priv;
642         uint32_t manc;
643
644         if(adapter->hw.mac_type >= e1000_82540 &&
645            adapter->hw.media_type == e1000_media_type_copper) {
646                 manc = E1000_READ_REG(&adapter->hw, MANC);
647                 if(manc & E1000_MANC_SMBUS_EN) {
648                         manc |= E1000_MANC_ARP_EN;
649                         E1000_WRITE_REG(&adapter->hw, MANC, manc);
650                 }
651         }
652
653         unregister_netdev(netdev);
654
655         e1000_phy_hw_reset(&adapter->hw);
656
657         iounmap(adapter->hw.hw_addr);
658         pci_release_regions(pdev);
659
660         free_netdev(netdev);
661
662         pci_disable_device(pdev);
663 }
664
665 /**
666  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
667  * @adapter: board private structure to initialize
668  *
669  * e1000_sw_init initializes the Adapter private data structure.
670  * Fields are initialized based on PCI device information and
671  * OS network device settings (MTU size).
672  **/
673
674 static int __devinit
675 e1000_sw_init(struct e1000_adapter *adapter)
676 {
677         struct e1000_hw *hw = &adapter->hw;
678         struct net_device *netdev = adapter->netdev;
679         struct pci_dev *pdev = adapter->pdev;
680
681         /* PCI config space info */
682
683         hw->vendor_id = pdev->vendor;
684         hw->device_id = pdev->device;
685         hw->subsystem_vendor_id = pdev->subsystem_vendor;
686         hw->subsystem_id = pdev->subsystem_device;
687
688         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
689
690         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
691
692         adapter->rx_buffer_len = E1000_RXBUFFER_2048;
693         hw->max_frame_size = netdev->mtu +
694                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
695         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
696
697         /* identify the MAC */
698
699         if(e1000_set_mac_type(hw)) {
700                 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
701                 return -EIO;
702         }
703
704         /* initialize eeprom parameters */
705
706         e1000_init_eeprom_params(hw);
707
708         switch(hw->mac_type) {
709         default:
710                 break;
711         case e1000_82541:
712         case e1000_82547:
713         case e1000_82541_rev_2:
714         case e1000_82547_rev_2:
715                 hw->phy_init_script = 1;
716                 break;
717         }
718
719         e1000_set_media_type(hw);
720
721         hw->wait_autoneg_complete = FALSE;
722         hw->tbi_compatibility_en = TRUE;
723         hw->adaptive_ifs = TRUE;
724
725         /* Copper options */
726
727         if(hw->media_type == e1000_media_type_copper) {
728                 hw->mdix = AUTO_ALL_MODES;
729                 hw->disable_polarity_correction = FALSE;
730                 hw->master_slave = E1000_MASTER_SLAVE;
731         }
732
733         atomic_set(&adapter->irq_sem, 1);
734         spin_lock_init(&adapter->stats_lock);
735         spin_lock_init(&adapter->tx_lock);
736
737         return 0;
738 }
739
740 /**
741  * e1000_open - Called when a network interface is made active
742  * @netdev: network interface device structure
743  *
744  * Returns 0 on success, negative value on failure
745  *
746  * The open entry point is called when a network interface is made
747  * active by the system (IFF_UP).  At this point all resources needed
748  * for transmit and receive operations are allocated, the interrupt
749  * handler is registered with the OS, the watchdog timer is started,
750  * and the stack is notified that the interface is ready.
751  **/
752
753 static int
754 e1000_open(struct net_device *netdev)
755 {
756         struct e1000_adapter *adapter = netdev->priv;
757         int err;
758
759         /* allocate transmit descriptors */
760
761         if((err = e1000_setup_tx_resources(adapter)))
762                 goto err_setup_tx;
763
764         /* allocate receive descriptors */
765
766         if((err = e1000_setup_rx_resources(adapter)))
767                 goto err_setup_rx;
768
769         if((err = e1000_up(adapter)))
770                 goto err_up;
771
772         return E1000_SUCCESS;
773
774 err_up:
775         e1000_free_rx_resources(adapter);
776 err_setup_rx:
777         e1000_free_tx_resources(adapter);
778 err_setup_tx:
779         e1000_reset(adapter);
780
781         return err;
782 }
783
784 /**
785  * e1000_close - Disables a network interface
786  * @netdev: network interface device structure
787  *
788  * Returns 0, this is not allowed to fail
789  *
790  * The close entry point is called when an interface is de-activated
791  * by the OS.  The hardware is still under the drivers control, but
792  * needs to be disabled.  A global MAC reset is issued to stop the
793  * hardware, and all transmit and receive resources are freed.
794  **/
795
796 static int
797 e1000_close(struct net_device *netdev)
798 {
799         struct e1000_adapter *adapter = netdev->priv;
800
801         e1000_down(adapter);
802
803         e1000_free_tx_resources(adapter);
804         e1000_free_rx_resources(adapter);
805
806         return 0;
807 }
808
809 /**
810  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
811  * @adapter: board private structure
812  *
813  * Return 0 on success, negative on failure
814  **/
815
816 int
817 e1000_setup_tx_resources(struct e1000_adapter *adapter)
818 {
819         struct e1000_desc_ring *txdr = &adapter->tx_ring;
820         struct pci_dev *pdev = adapter->pdev;
821         int size;
822
823         size = sizeof(struct e1000_buffer) * txdr->count;
824         txdr->buffer_info = vmalloc(size);
825         if(!txdr->buffer_info) {
826                 DPRINTK(PROBE, ERR, 
827                 "Unble to Allocate Memory for the Transmit descriptor ring\n");
828                 return -ENOMEM;
829         }
830         memset(txdr->buffer_info, 0, size);
831
832         /* round up to nearest 4K */
833
834         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
835         E1000_ROUNDUP(txdr->size, 4096);
836
837         txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
838         if(!txdr->desc) {
839                 DPRINTK(PROBE, ERR, 
840                 "Unble to Allocate Memory for the Transmit descriptor ring\n");
841                 vfree(txdr->buffer_info);
842                 return -ENOMEM;
843         }
844         memset(txdr->desc, 0, txdr->size);
845
846         txdr->next_to_use = 0;
847         txdr->next_to_clean = 0;
848
849         return 0;
850 }
851
852 /**
853  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
854  * @adapter: board private structure
855  *
856  * Configure the Tx unit of the MAC after a reset.
857  **/
858
859 static void
860 e1000_configure_tx(struct e1000_adapter *adapter)
861 {
862         uint64_t tdba = adapter->tx_ring.dma;
863         uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
864         uint32_t tctl, tipg;
865
866         E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
867         E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
868
869         E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
870
871         /* Setup the HW Tx Head and Tail descriptor pointers */
872
873         E1000_WRITE_REG(&adapter->hw, TDH, 0);
874         E1000_WRITE_REG(&adapter->hw, TDT, 0);
875
876         /* Set the default values for the Tx Inter Packet Gap timer */
877
878         switch (adapter->hw.mac_type) {
879         case e1000_82542_rev2_0:
880         case e1000_82542_rev2_1:
881                 tipg = DEFAULT_82542_TIPG_IPGT;
882                 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
883                 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
884                 break;
885         default:
886                 if(adapter->hw.media_type == e1000_media_type_fiber ||
887                    adapter->hw.media_type == e1000_media_type_internal_serdes)
888                         tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
889                 else
890                         tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
891                 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
892                 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
893         }
894         E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
895
896         /* Set the Tx Interrupt Delay register */
897
898         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
899         if(adapter->hw.mac_type >= e1000_82540)
900                 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
901
902         /* Program the Transmit Control Register */
903
904         tctl = E1000_READ_REG(&adapter->hw, TCTL);
905
906         tctl &= ~E1000_TCTL_CT;
907         tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
908                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
909
910         E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
911
912         e1000_config_collision_dist(&adapter->hw);
913
914         /* Setup Transmit Descriptor Settings for eop descriptor */
915         adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
916                 E1000_TXD_CMD_IFCS;
917
918         if(adapter->hw.mac_type < e1000_82543)
919                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
920         else
921                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
922
923         /* Cache if we're 82544 running in PCI-X because we'll
924          * need this to apply a workaround later in the send path. */
925         if(adapter->hw.mac_type == e1000_82544 &&
926            adapter->hw.bus_type == e1000_bus_type_pcix)
927                 adapter->pcix_82544 = 1;
928 }
929
930 /**
931  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
932  * @adapter: board private structure
933  *
934  * Returns 0 on success, negative on failure
935  **/
936
937 int
938 e1000_setup_rx_resources(struct e1000_adapter *adapter)
939 {
940         struct e1000_desc_ring *rxdr = &adapter->rx_ring;
941         struct pci_dev *pdev = adapter->pdev;
942         int size;
943
944         size = sizeof(struct e1000_buffer) * rxdr->count;
945         rxdr->buffer_info = vmalloc(size);
946         if(!rxdr->buffer_info) {
947                 DPRINTK(PROBE, ERR, 
948                 "Unble to Allocate Memory for the Recieve descriptor ring\n");
949                 return -ENOMEM;
950         }
951         memset(rxdr->buffer_info, 0, size);
952
953         /* Round up to nearest 4K */
954
955         rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
956         E1000_ROUNDUP(rxdr->size, 4096);
957
958         rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
959
960         if(!rxdr->desc) {
961                 DPRINTK(PROBE, ERR, 
962                 "Unble to Allocate Memory for the Recieve descriptor ring\n");
963                 vfree(rxdr->buffer_info);
964                 return -ENOMEM;
965         }
966         memset(rxdr->desc, 0, rxdr->size);
967
968         rxdr->next_to_clean = 0;
969         rxdr->next_to_use = 0;
970
971         return 0;
972 }
973
974 /**
975  * e1000_setup_rctl - configure the receive control register
976  * @adapter: Board private structure
977  **/
978
979 static void
980 e1000_setup_rctl(struct e1000_adapter *adapter)
981 {
982         uint32_t rctl;
983
984         rctl = E1000_READ_REG(&adapter->hw, RCTL);
985
986         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
987
988         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
989                 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
990                 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
991
992         if(adapter->hw.tbi_compatibility_on == 1)
993                 rctl |= E1000_RCTL_SBP;
994         else
995                 rctl &= ~E1000_RCTL_SBP;
996
997         /* Setup buffer sizes */
998         rctl &= ~(E1000_RCTL_SZ_4096);
999         rctl |= (E1000_RCTL_BSEX | E1000_RCTL_LPE);
1000         switch (adapter->rx_buffer_len) {
1001         case E1000_RXBUFFER_2048:
1002         default:
1003                 rctl |= E1000_RCTL_SZ_2048;
1004                 rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE);
1005                 break;
1006         case E1000_RXBUFFER_4096:
1007                 rctl |= E1000_RCTL_SZ_4096;
1008                 break;
1009         case E1000_RXBUFFER_8192:
1010                 rctl |= E1000_RCTL_SZ_8192;
1011                 break;
1012         case E1000_RXBUFFER_16384:
1013                 rctl |= E1000_RCTL_SZ_16384;
1014                 break;
1015         }
1016
1017         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1018 }
1019
1020 /**
1021  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1022  * @adapter: board private structure
1023  *
1024  * Configure the Rx unit of the MAC after a reset.
1025  **/
1026
1027 static void
1028 e1000_configure_rx(struct e1000_adapter *adapter)
1029 {
1030         uint64_t rdba = adapter->rx_ring.dma;
1031         uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
1032         uint32_t rctl;
1033         uint32_t rxcsum;
1034
1035         /* disable receives while setting up the descriptors */
1036         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1037         E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
1038
1039         /* set the Receive Delay Timer Register */
1040         E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
1041
1042         if(adapter->hw.mac_type >= e1000_82540) {
1043                 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
1044                 if(adapter->itr > 1)
1045                         E1000_WRITE_REG(&adapter->hw, ITR,
1046                                 1000000000 / (adapter->itr * 256));
1047         }
1048
1049         /* Setup the Base and Length of the Rx Descriptor Ring */
1050         E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1051         E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1052
1053         E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
1054
1055         /* Setup the HW Rx Head and Tail Descriptor Pointers */
1056         E1000_WRITE_REG(&adapter->hw, RDH, 0);
1057         E1000_WRITE_REG(&adapter->hw, RDT, 0);
1058
1059         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1060         if((adapter->hw.mac_type >= e1000_82543) &&
1061            (adapter->rx_csum == TRUE)) {
1062                 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
1063                 rxcsum |= E1000_RXCSUM_TUOFL;
1064                 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
1065         }
1066
1067         /* Enable Receives */
1068         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1069 }
1070
1071 /**
1072  * e1000_free_tx_resources - Free Tx Resources
1073  * @adapter: board private structure
1074  *
1075  * Free all transmit software resources
1076  **/
1077
1078 void
1079 e1000_free_tx_resources(struct e1000_adapter *adapter)
1080 {
1081         struct pci_dev *pdev = adapter->pdev;
1082
1083         e1000_clean_tx_ring(adapter);
1084
1085         vfree(adapter->tx_ring.buffer_info);
1086         adapter->tx_ring.buffer_info = NULL;
1087
1088         pci_free_consistent(pdev, adapter->tx_ring.size,
1089                             adapter->tx_ring.desc, adapter->tx_ring.dma);
1090
1091         adapter->tx_ring.desc = NULL;
1092 }
1093
1094 static inline void
1095 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1096                         struct e1000_buffer *buffer_info)
1097 {
1098         struct pci_dev *pdev = adapter->pdev;
1099         if(buffer_info->dma) {
1100                 pci_unmap_page(pdev,
1101                                buffer_info->dma,
1102                                buffer_info->length,
1103                                PCI_DMA_TODEVICE);
1104                 buffer_info->dma = 0;
1105         }
1106         if(buffer_info->skb) {
1107                 dev_kfree_skb_any(buffer_info->skb);
1108                 buffer_info->skb = NULL;
1109         }
1110 }
1111
1112 /**
1113  * e1000_clean_tx_ring - Free Tx Buffers
1114  * @adapter: board private structure
1115  **/
1116
1117 static void
1118 e1000_clean_tx_ring(struct e1000_adapter *adapter)
1119 {
1120         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1121         struct e1000_buffer *buffer_info;
1122         unsigned long size;
1123         unsigned int i;
1124
1125         /* Free all the Tx ring sk_buffs */
1126
1127         for(i = 0; i < tx_ring->count; i++) {
1128                 buffer_info = &tx_ring->buffer_info[i];
1129                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1130         }
1131
1132         size = sizeof(struct e1000_buffer) * tx_ring->count;
1133         memset(tx_ring->buffer_info, 0, size);
1134
1135         /* Zero out the descriptor ring */
1136
1137         memset(tx_ring->desc, 0, tx_ring->size);
1138
1139         tx_ring->next_to_use = 0;
1140         tx_ring->next_to_clean = 0;
1141
1142         E1000_WRITE_REG(&adapter->hw, TDH, 0);
1143         E1000_WRITE_REG(&adapter->hw, TDT, 0);
1144 }
1145
1146 /**
1147  * e1000_free_rx_resources - Free Rx Resources
1148  * @adapter: board private structure
1149  *
1150  * Free all receive software resources
1151  **/
1152
1153 void
1154 e1000_free_rx_resources(struct e1000_adapter *adapter)
1155 {
1156         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1157         struct pci_dev *pdev = adapter->pdev;
1158
1159         e1000_clean_rx_ring(adapter);
1160
1161         vfree(rx_ring->buffer_info);
1162         rx_ring->buffer_info = NULL;
1163
1164         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1165
1166         rx_ring->desc = NULL;
1167 }
1168
1169 /**
1170  * e1000_clean_rx_ring - Free Rx Buffers
1171  * @adapter: board private structure
1172  **/
1173
1174 static void
1175 e1000_clean_rx_ring(struct e1000_adapter *adapter)
1176 {
1177         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1178         struct e1000_buffer *buffer_info;
1179         struct pci_dev *pdev = adapter->pdev;
1180         unsigned long size;
1181         unsigned int i;
1182
1183         /* Free all the Rx ring sk_buffs */
1184
1185         for(i = 0; i < rx_ring->count; i++) {
1186                 buffer_info = &rx_ring->buffer_info[i];
1187                 if(buffer_info->skb) {
1188
1189                         pci_unmap_single(pdev,
1190                                          buffer_info->dma,
1191                                          buffer_info->length,
1192                                          PCI_DMA_FROMDEVICE);
1193
1194                         dev_kfree_skb(buffer_info->skb);
1195                         buffer_info->skb = NULL;
1196                 }
1197         }
1198
1199         size = sizeof(struct e1000_buffer) * rx_ring->count;
1200         memset(rx_ring->buffer_info, 0, size);
1201
1202         /* Zero out the descriptor ring */
1203
1204         memset(rx_ring->desc, 0, rx_ring->size);
1205
1206         rx_ring->next_to_clean = 0;
1207         rx_ring->next_to_use = 0;
1208
1209         E1000_WRITE_REG(&adapter->hw, RDH, 0);
1210         E1000_WRITE_REG(&adapter->hw, RDT, 0);
1211 }
1212
1213 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
1214  * and memory write and invalidate disabled for certain operations
1215  */
1216 static void
1217 e1000_enter_82542_rst(struct e1000_adapter *adapter)
1218 {
1219         struct net_device *netdev = adapter->netdev;
1220         uint32_t rctl;
1221
1222         e1000_pci_clear_mwi(&adapter->hw);
1223
1224         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1225         rctl |= E1000_RCTL_RST;
1226         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1227         E1000_WRITE_FLUSH(&adapter->hw);
1228         mdelay(5);
1229
1230         if(netif_running(netdev))
1231                 e1000_clean_rx_ring(adapter);
1232 }
1233
1234 static void
1235 e1000_leave_82542_rst(struct e1000_adapter *adapter)
1236 {
1237         struct net_device *netdev = adapter->netdev;
1238         uint32_t rctl;
1239
1240         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1241         rctl &= ~E1000_RCTL_RST;
1242         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1243         E1000_WRITE_FLUSH(&adapter->hw);
1244         mdelay(5);
1245
1246         if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
1247                 e1000_pci_set_mwi(&adapter->hw);
1248
1249         if(netif_running(netdev)) {
1250                 e1000_configure_rx(adapter);
1251                 e1000_alloc_rx_buffers(adapter);
1252         }
1253 }
1254
1255 /**
1256  * e1000_set_mac - Change the Ethernet Address of the NIC
1257  * @netdev: network interface device structure
1258  * @p: pointer to an address structure
1259  *
1260  * Returns 0 on success, negative on failure
1261  **/
1262
1263 static int
1264 e1000_set_mac(struct net_device *netdev, void *p)
1265 {
1266         struct e1000_adapter *adapter = netdev->priv;
1267         struct sockaddr *addr = p;
1268
1269         if(!is_valid_ether_addr(addr->sa_data))
1270                 return -EADDRNOTAVAIL;
1271
1272         /* 82542 2.0 needs to be in reset to write receive address registers */
1273
1274         if(adapter->hw.mac_type == e1000_82542_rev2_0)
1275                 e1000_enter_82542_rst(adapter);
1276
1277         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1278         memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
1279
1280         e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1281
1282         if(adapter->hw.mac_type == e1000_82542_rev2_0)
1283                 e1000_leave_82542_rst(adapter);
1284
1285         return 0;
1286 }
1287
1288 /**
1289  * e1000_set_multi - Multicast and Promiscuous mode set
1290  * @netdev: network interface device structure
1291  *
1292  * The set_multi entry point is called whenever the multicast address
1293  * list or the network interface flags are updated.  This routine is
1294  * responsible for configuring the hardware for proper multicast,
1295  * promiscuous mode, and all-multi behavior.
1296  **/
1297
1298 static void
1299 e1000_set_multi(struct net_device *netdev)
1300 {
1301         struct e1000_adapter *adapter = netdev->priv;
1302         struct e1000_hw *hw = &adapter->hw;
1303         struct dev_mc_list *mc_ptr;
1304         uint32_t rctl;
1305         uint32_t hash_value;
1306         int i;
1307         unsigned long flags;
1308
1309         /* Check for Promiscuous and All Multicast modes */
1310
1311         spin_lock_irqsave(&adapter->tx_lock, flags);
1312
1313         rctl = E1000_READ_REG(hw, RCTL);
1314
1315         if(netdev->flags & IFF_PROMISC) {
1316                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1317         } else if(netdev->flags & IFF_ALLMULTI) {
1318                 rctl |= E1000_RCTL_MPE;
1319                 rctl &= ~E1000_RCTL_UPE;
1320         } else {
1321                 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
1322         }
1323
1324         E1000_WRITE_REG(hw, RCTL, rctl);
1325
1326         /* 82542 2.0 needs to be in reset to write receive address registers */
1327
1328         if(hw->mac_type == e1000_82542_rev2_0)
1329                 e1000_enter_82542_rst(adapter);
1330
1331         /* load the first 14 multicast address into the exact filters 1-14
1332          * RAR 0 is used for the station MAC adddress
1333          * if there are not 14 addresses, go ahead and clear the filters
1334          */
1335         mc_ptr = netdev->mc_list;
1336
1337         for(i = 1; i < E1000_RAR_ENTRIES; i++) {
1338                 if(mc_ptr) {
1339                         e1000_rar_set(hw, mc_ptr->dmi_addr, i);
1340                         mc_ptr = mc_ptr->next;
1341                 } else {
1342                         E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
1343                         E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
1344                 }
1345         }
1346
1347         /* clear the old settings from the multicast hash table */
1348
1349         for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
1350                 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
1351
1352         /* load any remaining addresses into the hash table */
1353
1354         for(; mc_ptr; mc_ptr = mc_ptr->next) {
1355                 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
1356                 e1000_mta_set(hw, hash_value);
1357         }
1358
1359         if(hw->mac_type == e1000_82542_rev2_0)
1360                 e1000_leave_82542_rst(adapter);
1361
1362         spin_unlock_irqrestore(&adapter->tx_lock, flags);
1363 }
1364
1365 /* Need to wait a few seconds after link up to get diagnostic information from
1366  * the phy */
1367
1368 static void
1369 e1000_update_phy_info(unsigned long data)
1370 {
1371         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1372         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
1373 }
1374
1375 /**
1376  * e1000_82547_tx_fifo_stall - Timer Call-back
1377  * @data: pointer to adapter cast into an unsigned long
1378  **/
1379
1380 static void
1381 e1000_82547_tx_fifo_stall(unsigned long data)
1382 {
1383         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1384         struct net_device *netdev = adapter->netdev;
1385         uint32_t tctl;
1386
1387         if(atomic_read(&adapter->tx_fifo_stall)) {
1388                 if((E1000_READ_REG(&adapter->hw, TDT) ==
1389                     E1000_READ_REG(&adapter->hw, TDH)) &&
1390                    (E1000_READ_REG(&adapter->hw, TDFT) ==
1391                     E1000_READ_REG(&adapter->hw, TDFH)) &&
1392                    (E1000_READ_REG(&adapter->hw, TDFTS) ==
1393                     E1000_READ_REG(&adapter->hw, TDFHS))) {
1394                         tctl = E1000_READ_REG(&adapter->hw, TCTL);
1395                         E1000_WRITE_REG(&adapter->hw, TCTL,
1396                                         tctl & ~E1000_TCTL_EN);
1397                         E1000_WRITE_REG(&adapter->hw, TDFT,
1398                                         adapter->tx_head_addr);
1399                         E1000_WRITE_REG(&adapter->hw, TDFH,
1400                                         adapter->tx_head_addr);
1401                         E1000_WRITE_REG(&adapter->hw, TDFTS,
1402                                         adapter->tx_head_addr);
1403                         E1000_WRITE_REG(&adapter->hw, TDFHS,
1404                                         adapter->tx_head_addr);
1405                         E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1406                         E1000_WRITE_FLUSH(&adapter->hw);
1407
1408                         adapter->tx_fifo_head = 0;
1409                         atomic_set(&adapter->tx_fifo_stall, 0);
1410                         netif_wake_queue(netdev);
1411                 } else {
1412                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
1413                 }
1414         }
1415 }
1416
1417 /**
1418  * e1000_watchdog - Timer Call-back
1419  * @data: pointer to netdev cast into an unsigned long
1420  **/
1421
1422 static void
1423 e1000_watchdog(unsigned long data)
1424 {
1425         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1426         struct net_device *netdev = adapter->netdev;
1427         struct e1000_desc_ring *txdr = &adapter->tx_ring;
1428         unsigned int i;
1429         uint32_t link;
1430
1431         e1000_check_for_link(&adapter->hw);
1432
1433         if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
1434            !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
1435                 link = !adapter->hw.serdes_link_down;
1436         else
1437                 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
1438
1439         if(link) {
1440                 if(!netif_carrier_ok(netdev)) {
1441                         e1000_get_speed_and_duplex(&adapter->hw,
1442                                                    &adapter->link_speed,
1443                                                    &adapter->link_duplex);
1444
1445                         DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
1446                                adapter->link_speed,
1447                                adapter->link_duplex == FULL_DUPLEX ?
1448                                "Full Duplex" : "Half Duplex");
1449
1450                         netif_carrier_on(netdev);
1451                         netif_wake_queue(netdev);
1452                         mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1453                         adapter->smartspeed = 0;
1454                 }
1455         } else {
1456                 if(netif_carrier_ok(netdev)) {
1457                         adapter->link_speed = 0;
1458                         adapter->link_duplex = 0;
1459                         DPRINTK(LINK, INFO, "NIC Link is Down\n");
1460                         netif_carrier_off(netdev);
1461                         netif_stop_queue(netdev);
1462                         mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1463                 }
1464
1465                 e1000_smartspeed(adapter);
1466         }
1467
1468         e1000_update_stats(adapter);
1469
1470         adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
1471         adapter->tpt_old = adapter->stats.tpt;
1472         adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
1473         adapter->colc_old = adapter->stats.colc;
1474
1475         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
1476         adapter->gorcl_old = adapter->stats.gorcl;
1477         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
1478         adapter->gotcl_old = adapter->stats.gotcl;
1479
1480         e1000_update_adaptive(&adapter->hw);
1481
1482         if(!netif_carrier_ok(netdev)) {
1483                 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1484                         /* We've lost link, so the controller stops DMA,
1485                          * but we've got queued Tx work that's never going
1486                          * to get done, so reset controller to flush Tx.
1487                          * (Do the reset outside of interrupt context). */
1488                         schedule_work(&adapter->tx_timeout_task);
1489                 }
1490         }
1491
1492         /* Dynamic mode for Interrupt Throttle Rate (ITR) */
1493         if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
1494                 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
1495                  * asymmetrical Tx or Rx gets ITR=8000; everyone
1496                  * else is between 2000-8000. */
1497                 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
1498                 uint32_t dif = (adapter->gotcl > adapter->gorcl ? 
1499                         adapter->gotcl - adapter->gorcl :
1500                         adapter->gorcl - adapter->gotcl) / 10000;
1501                 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
1502                 E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
1503         }
1504
1505         /* Cause software interrupt to ensure rx ring is cleaned */
1506         E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
1507
1508         /* Early detection of hung controller */
1509         i = txdr->next_to_clean;
1510         if(txdr->buffer_info[i].dma &&
1511            time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
1512            !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF))
1513                 netif_stop_queue(netdev);
1514
1515         /* Reset the timer */
1516         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1517 }
1518
1519 #define E1000_TX_FLAGS_CSUM             0x00000001
1520 #define E1000_TX_FLAGS_VLAN             0x00000002
1521 #define E1000_TX_FLAGS_TSO              0x00000004
1522 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
1523 #define E1000_TX_FLAGS_VLAN_SHIFT       16
1524
1525 static inline boolean_t
1526 e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1527 {
1528 #ifdef NETIF_F_TSO
1529         struct e1000_context_desc *context_desc;
1530         unsigned int i;
1531         uint32_t cmd_length = 0;
1532         uint16_t ipcse, tucse, mss;
1533         uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1534
1535         if(skb_shinfo(skb)->tso_size) {
1536                 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1537                 mss = skb_shinfo(skb)->tso_size;
1538                 skb->nh.iph->tot_len = 0;
1539                 skb->nh.iph->check = 0;
1540                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1541                                                       skb->nh.iph->daddr,
1542                                                       0,
1543                                                       IPPROTO_TCP,
1544                                                       0);
1545                 ipcss = skb->nh.raw - skb->data;
1546                 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1547                 ipcse = skb->h.raw - skb->data - 1;
1548                 tucss = skb->h.raw - skb->data;
1549                 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1550                 tucse = 0;
1551
1552                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1553                                E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
1554                                (skb->len - (hdr_len)));
1555
1556                 i = adapter->tx_ring.next_to_use;
1557                 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1558
1559                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
1560                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
1561                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
1562                 context_desc->upper_setup.tcp_fields.tucss = tucss;
1563                 context_desc->upper_setup.tcp_fields.tucso = tucso;
1564                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
1565                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
1566                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1567                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
1568
1569                 if(++i == adapter->tx_ring.count) i = 0;
1570                 adapter->tx_ring.next_to_use = i;
1571
1572                 return TRUE;
1573         }
1574 #endif
1575
1576         return FALSE;
1577 }
1578
1579 static inline boolean_t
1580 e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1581 {
1582         struct e1000_context_desc *context_desc;
1583         unsigned int i;
1584         uint8_t css;
1585
1586         if(likely(skb->ip_summed == CHECKSUM_HW)) {
1587                 css = skb->h.raw - skb->data;
1588
1589                 i = adapter->tx_ring.next_to_use;
1590                 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1591
1592                 context_desc->upper_setup.tcp_fields.tucss = css;
1593                 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
1594                 context_desc->upper_setup.tcp_fields.tucse = 0;
1595                 context_desc->tcp_seg_setup.data = 0;
1596                 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1597
1598                 if(unlikely(++i == adapter->tx_ring.count)) i = 0;
1599                 adapter->tx_ring.next_to_use = i;
1600
1601                 return TRUE;
1602         }
1603
1604         return FALSE;
1605 }
1606
1607 #define E1000_MAX_TXD_PWR       12
1608 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
1609
1610 static inline int
1611 e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
1612         unsigned int first, unsigned int max_per_txd,
1613         unsigned int nr_frags, unsigned int mss)
1614 {
1615         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1616         struct e1000_buffer *buffer_info;
1617         unsigned int len = skb->len;
1618         unsigned int offset = 0, size, count = 0, i;
1619         unsigned int f;
1620         len -= skb->data_len;
1621
1622         i = tx_ring->next_to_use;
1623
1624         while(len) {
1625                 buffer_info = &tx_ring->buffer_info[i];
1626                 size = min(len, max_per_txd);
1627 #ifdef NETIF_F_TSO
1628                 /* Workaround for premature desc write-backs
1629                  * in TSO mode.  Append 4-byte sentinel desc */
1630                 if(unlikely(mss && !nr_frags && size == len && size > 8))
1631                         size -= 4;
1632 #endif
1633                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
1634                  * terminating buffers within evenly-aligned dwords. */
1635                 if(unlikely(adapter->pcix_82544 &&
1636                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
1637                    size > 4))
1638                         size -= 4;
1639
1640                 buffer_info->length = size;
1641                 buffer_info->dma =
1642                         pci_map_single(adapter->pdev,
1643                                 skb->data + offset,
1644                                 size,
1645                                 PCI_DMA_TODEVICE);
1646                 buffer_info->time_stamp = jiffies;
1647
1648                 len -= size;
1649                 offset += size;
1650                 count++;
1651                 if(unlikely(++i == tx_ring->count)) i = 0;
1652         }
1653
1654         for(f = 0; f < nr_frags; f++) {
1655                 struct skb_frag_struct *frag;
1656
1657                 frag = &skb_shinfo(skb)->frags[f];
1658                 len = frag->size;
1659                 offset = frag->page_offset;
1660
1661                 while(len) {
1662                         buffer_info = &tx_ring->buffer_info[i];
1663                         size = min(len, max_per_txd);
1664 #ifdef NETIF_F_TSO
1665                         /* Workaround for premature desc write-backs
1666                          * in TSO mode.  Append 4-byte sentinel desc */
1667                         if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
1668                                 size -= 4;
1669 #endif
1670                         /* Workaround for potential 82544 hang in PCI-X.
1671                          * Avoid terminating buffers within evenly-aligned
1672                          * dwords. */
1673                         if(unlikely(adapter->pcix_82544 &&
1674                            !((unsigned long)(frag->page+offset+size-1) & 4) &&
1675                            size > 4))
1676                                 size -= 4;
1677
1678                         buffer_info->length = size;
1679                         buffer_info->dma =
1680                                 pci_map_page(adapter->pdev,
1681                                         frag->page,
1682                                         offset,
1683                                         size,
1684                                         PCI_DMA_TODEVICE);
1685                         buffer_info->time_stamp = jiffies;
1686
1687                         len -= size;
1688                         offset += size;
1689                         count++;
1690                         if(unlikely(++i == tx_ring->count)) i = 0;
1691                 }
1692         }
1693
1694         i = (i == 0) ? tx_ring->count - 1 : i - 1;
1695         tx_ring->buffer_info[i].skb = skb;
1696         tx_ring->buffer_info[first].next_to_watch = i;
1697
1698         return count;
1699 }
1700
1701 static inline void
1702 e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
1703 {
1704         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1705         struct e1000_tx_desc *tx_desc = NULL;
1706         struct e1000_buffer *buffer_info;
1707         uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
1708         unsigned int i;
1709
1710         if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
1711                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
1712                              E1000_TXD_CMD_TSE;
1713                 txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
1714         }
1715
1716         if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
1717                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
1718                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
1719         }
1720
1721         if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
1722                 txd_lower |= E1000_TXD_CMD_VLE;
1723                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
1724         }
1725
1726         i = tx_ring->next_to_use;
1727
1728         while(count--) {
1729                 buffer_info = &tx_ring->buffer_info[i];
1730                 tx_desc = E1000_TX_DESC(*tx_ring, i);
1731                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1732                 tx_desc->lower.data =
1733                         cpu_to_le32(txd_lower | buffer_info->length);
1734                 tx_desc->upper.data = cpu_to_le32(txd_upper);
1735                 if(unlikely(++i == tx_ring->count)) i = 0;
1736         }
1737
1738         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
1739
1740         /* Force memory writes to complete before letting h/w
1741          * know there are new descriptors to fetch.  (Only
1742          * applicable for weak-ordered memory model archs,
1743          * such as IA-64). */
1744         wmb();
1745
1746         tx_ring->next_to_use = i;
1747         E1000_WRITE_REG(&adapter->hw, TDT, i);
1748 }
1749
1750 /**
1751  * 82547 workaround to avoid controller hang in half-duplex environment.
1752  * The workaround is to avoid queuing a large packet that would span
1753  * the internal Tx FIFO ring boundary by notifying the stack to resend
1754  * the packet at a later time.  This gives the Tx FIFO an opportunity to
1755  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
1756  * to the beginning of the Tx FIFO.
1757  **/
1758
1759 #define E1000_FIFO_HDR                  0x10
1760 #define E1000_82547_PAD_LEN             0x3E0
1761
1762 static inline int
1763 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
1764 {
1765         uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1766         uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
1767
1768         E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
1769
1770         if(adapter->link_duplex != HALF_DUPLEX)
1771                 goto no_fifo_stall_required;
1772
1773         if(atomic_read(&adapter->tx_fifo_stall))
1774                 return 1;
1775
1776         if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
1777                 atomic_set(&adapter->tx_fifo_stall, 1);
1778                 return 1;
1779         }
1780
1781 no_fifo_stall_required:
1782         adapter->tx_fifo_head += skb_fifo_len;
1783         if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
1784                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1785         return 0;
1786 }
1787
1788 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
1789 static int
1790 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1791 {
1792         struct e1000_adapter *adapter = netdev->priv;
1793         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
1794         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
1795         unsigned int tx_flags = 0;
1796         unsigned int len = skb->len;
1797         unsigned long flags;
1798         unsigned int nr_frags = 0;
1799         unsigned int mss = 0;
1800         int count = 0;
1801         unsigned int f;
1802         len -= skb->data_len;
1803
1804         if(unlikely(skb->len <= 0)) {
1805                 dev_kfree_skb_any(skb);
1806                 return NETDEV_TX_OK;
1807         }
1808
1809 #ifdef NETIF_F_TSO
1810         mss = skb_shinfo(skb)->tso_size;
1811         /* The controller does a simple calculation to
1812          * make sure there is enough room in the FIFO before
1813          * initiating the DMA for each buffer.  The calc is:
1814          * 4 = ceil(buffer len/mss).  To make sure we don't
1815          * overrun the FIFO, adjust the max buffer len if mss
1816          * drops. */
1817         if(mss) {
1818                 max_per_txd = min(mss << 2, max_per_txd);
1819                 max_txd_pwr = fls(max_per_txd) - 1;
1820         }
1821
1822         if((mss) || (skb->ip_summed == CHECKSUM_HW))
1823                 count++;
1824         count++;        /* for sentinel desc */
1825 #else
1826         if(skb->ip_summed == CHECKSUM_HW)
1827                 count++;
1828 #endif
1829         count += TXD_USE_COUNT(len, max_txd_pwr);
1830
1831         if(adapter->pcix_82544)
1832                 count++;
1833
1834         nr_frags = skb_shinfo(skb)->nr_frags;
1835         for(f = 0; f < nr_frags; f++)
1836                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
1837                                        max_txd_pwr);
1838         if(adapter->pcix_82544)
1839                 count += nr_frags;
1840
1841         local_irq_save(flags); 
1842         if (!spin_trylock(&adapter->tx_lock)) { 
1843                 /* Collision - tell upper layer to requeue */ 
1844                 local_irq_restore(flags); 
1845                 return NETDEV_TX_LOCKED; 
1846         } 
1847
1848         /* need: count + 2 desc gap to keep tail from touching
1849          * head, otherwise try next time */
1850         if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {
1851                 netif_stop_queue(netdev);
1852                 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1853                 return NETDEV_TX_BUSY;
1854         }
1855
1856         if(unlikely(adapter->hw.mac_type == e1000_82547)) {
1857                 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
1858                         netif_stop_queue(netdev);
1859                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
1860                         spin_unlock_irqrestore(&adapter->tx_lock, flags);
1861                         return NETDEV_TX_BUSY;
1862                 }
1863         }
1864
1865         if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
1866                 tx_flags |= E1000_TX_FLAGS_VLAN;
1867                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
1868         }
1869
1870         first = adapter->tx_ring.next_to_use;
1871         
1872         if(likely(e1000_tso(adapter, skb)))
1873                 tx_flags |= E1000_TX_FLAGS_TSO;
1874         else if(likely(e1000_tx_csum(adapter, skb)))
1875                 tx_flags |= E1000_TX_FLAGS_CSUM;
1876
1877         e1000_tx_queue(adapter,
1878                 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
1879                 tx_flags);
1880
1881         netdev->trans_start = jiffies;
1882
1883         /* Make sure there is space in the ring for the next send. */
1884         if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2))
1885                 netif_stop_queue(netdev);
1886
1887         spin_unlock_irqrestore(&adapter->tx_lock, flags);
1888         return NETDEV_TX_OK;
1889 }
1890
1891 /**
1892  * e1000_tx_timeout - Respond to a Tx Hang
1893  * @netdev: network interface device structure
1894  **/
1895
1896 static void
1897 e1000_tx_timeout(struct net_device *netdev)
1898 {
1899         struct e1000_adapter *adapter = netdev->priv;
1900
1901         /* Do the reset outside of interrupt context */
1902         schedule_work(&adapter->tx_timeout_task);
1903 }
1904
1905 static void
1906 e1000_tx_timeout_task(struct net_device *netdev)
1907 {
1908         struct e1000_adapter *adapter = netdev->priv;
1909
1910         e1000_down(adapter);
1911         e1000_up(adapter);
1912 }
1913
1914 /**
1915  * e1000_get_stats - Get System Network Statistics
1916  * @netdev: network interface device structure
1917  *
1918  * Returns the address of the device statistics structure.
1919  * The statistics are actually updated from the timer callback.
1920  **/
1921
1922 static struct net_device_stats *
1923 e1000_get_stats(struct net_device *netdev)
1924 {
1925         struct e1000_adapter *adapter = netdev->priv;
1926
1927         e1000_update_stats(adapter);
1928         return &adapter->net_stats;
1929 }
1930
1931 /**
1932  * e1000_change_mtu - Change the Maximum Transfer Unit
1933  * @netdev: network interface device structure
1934  * @new_mtu: new value for maximum frame size
1935  *
1936  * Returns 0 on success, negative on failure
1937  **/
1938
1939 static int
1940 e1000_change_mtu(struct net_device *netdev, int new_mtu)
1941 {
1942         struct e1000_adapter *adapter = netdev->priv;
1943         int old_mtu = adapter->rx_buffer_len;
1944         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1945
1946         if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
1947                 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
1948                         DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
1949                         return -EINVAL;
1950         }
1951
1952         if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) {
1953                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
1954
1955         } else if(adapter->hw.mac_type < e1000_82543) {
1956                 DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n");
1957                 return -EINVAL;
1958
1959         } else if(max_frame <= E1000_RXBUFFER_4096) {
1960                 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
1961
1962         } else if(max_frame <= E1000_RXBUFFER_8192) {
1963                 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
1964
1965         } else {
1966                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
1967         }
1968
1969         if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
1970                 e1000_down(adapter);
1971                 e1000_up(adapter);
1972         }
1973
1974         netdev->mtu = new_mtu;
1975         adapter->hw.max_frame_size = max_frame;
1976
1977         return 0;
1978 }
1979
1980 /**
1981  * e1000_update_stats - Update the board statistics counters
1982  * @adapter: board private structure
1983  **/
1984
1985 void
1986 e1000_update_stats(struct e1000_adapter *adapter)
1987 {
1988         struct e1000_hw *hw = &adapter->hw;
1989         unsigned long flags;
1990         uint16_t phy_tmp;
1991
1992 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
1993
1994         spin_lock_irqsave(&adapter->stats_lock, flags);
1995
1996         /* these counters are modified from e1000_adjust_tbi_stats,
1997          * called from the interrupt context, so they must only
1998          * be written while holding adapter->stats_lock
1999          */
2000
2001         adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
2002         adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
2003         adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
2004         adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
2005         adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
2006         adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
2007         adapter->stats.roc += E1000_READ_REG(hw, ROC);
2008         adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
2009         adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
2010         adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
2011         adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
2012         adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
2013         adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
2014
2015         adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
2016         adapter->stats.mpc += E1000_READ_REG(hw, MPC);
2017         adapter->stats.scc += E1000_READ_REG(hw, SCC);
2018         adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
2019         adapter->stats.mcc += E1000_READ_REG(hw, MCC);
2020         adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
2021         adapter->stats.dc += E1000_READ_REG(hw, DC);
2022         adapter->stats.sec += E1000_READ_REG(hw, SEC);
2023         adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
2024         adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
2025         adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
2026         adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
2027         adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
2028         adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
2029         adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
2030         adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
2031         adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
2032         adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
2033         adapter->stats.ruc += E1000_READ_REG(hw, RUC);
2034         adapter->stats.rfc += E1000_READ_REG(hw, RFC);
2035         adapter->stats.rjc += E1000_READ_REG(hw, RJC);
2036         adapter->stats.torl += E1000_READ_REG(hw, TORL);
2037         adapter->stats.torh += E1000_READ_REG(hw, TORH);
2038         adapter->stats.totl += E1000_READ_REG(hw, TOTL);
2039         adapter->stats.toth += E1000_READ_REG(hw, TOTH);
2040         adapter->stats.tpr += E1000_READ_REG(hw, TPR);
2041         adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
2042         adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
2043         adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
2044         adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
2045         adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
2046         adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
2047         adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
2048         adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
2049
2050         /* used for adaptive IFS */
2051
2052         hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
2053         adapter->stats.tpt += hw->tx_packet_delta;
2054         hw->collision_delta = E1000_READ_REG(hw, COLC);
2055         adapter->stats.colc += hw->collision_delta;
2056
2057         if(hw->mac_type >= e1000_82543) {
2058                 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
2059                 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
2060                 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
2061                 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
2062                 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
2063                 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
2064         }
2065
2066         /* Fill out the OS statistics structure */
2067
2068         adapter->net_stats.rx_packets = adapter->stats.gprc;
2069         adapter->net_stats.tx_packets = adapter->stats.gptc;
2070         adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2071         adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2072         adapter->net_stats.multicast = adapter->stats.mprc;
2073         adapter->net_stats.collisions = adapter->stats.colc;
2074
2075         /* Rx Errors */
2076
2077         adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2078                 adapter->stats.crcerrs + adapter->stats.algnerrc +
2079                 adapter->stats.rlec + adapter->stats.rnbc +
2080                 adapter->stats.mpc + adapter->stats.cexterr;
2081         adapter->net_stats.rx_dropped = adapter->stats.rnbc;
2082         adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2083         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2084         adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2085         adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
2086         adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2087
2088         /* Tx Errors */
2089
2090         adapter->net_stats.tx_errors = adapter->stats.ecol +
2091                                        adapter->stats.latecol;
2092         adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2093         adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2094         adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2095
2096         /* Tx Dropped needs to be maintained elsewhere */
2097
2098         /* Phy Stats */
2099
2100         if(hw->media_type == e1000_media_type_copper) {
2101                 if((adapter->link_speed == SPEED_1000) &&
2102                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
2103                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2104                         adapter->phy_stats.idle_errors += phy_tmp;
2105                 }
2106
2107                 if((hw->mac_type <= e1000_82546) &&
2108                    (hw->phy_type == e1000_phy_m88) &&
2109                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
2110                         adapter->phy_stats.receive_errors += phy_tmp;
2111         }
2112
2113         spin_unlock_irqrestore(&adapter->stats_lock, flags);
2114 }
2115
2116 /**
2117  * e1000_intr - Interrupt Handler
2118  * @irq: interrupt number
2119  * @data: pointer to a network interface device structure
2120  * @pt_regs: CPU registers structure
2121  **/
2122
2123 static irqreturn_t
2124 e1000_intr(int irq, void *data, struct pt_regs *regs)
2125 {
2126         struct net_device *netdev = data;
2127         struct e1000_adapter *adapter = netdev->priv;
2128         struct e1000_hw *hw = &adapter->hw;
2129         uint32_t icr = E1000_READ_REG(hw, ICR);
2130 #ifndef CONFIG_E1000_NAPI
2131         unsigned int i;
2132 #endif
2133
2134         if(unlikely(!icr))
2135                 return IRQ_NONE;  /* Not our interrupt */
2136
2137         if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
2138                 hw->get_link_status = 1;
2139                 mod_timer(&adapter->watchdog_timer, jiffies);
2140         }
2141
2142 #ifdef CONFIG_E1000_NAPI
2143         if(likely(netif_rx_schedule_prep(netdev))) {
2144
2145                 /* Disable interrupts and register for poll. The flush 
2146                   of the posted write is intentionally left out.
2147                 */
2148
2149                 atomic_inc(&adapter->irq_sem);
2150                 E1000_WRITE_REG(hw, IMC, ~0);
2151                 __netif_rx_schedule(netdev);
2152         }
2153 #else
2154         for(i = 0; i < E1000_MAX_INTR; i++)
2155                 if(unlikely(!e1000_clean_rx_irq(adapter) &
2156                    !e1000_clean_tx_irq(adapter)))
2157                         break;
2158 #endif
2159
2160         return IRQ_HANDLED;
2161 }
2162
2163 #ifdef CONFIG_E1000_NAPI
2164 /**
2165  * e1000_clean - NAPI Rx polling callback
2166  * @adapter: board private structure
2167  **/
2168
2169 static int
2170 e1000_clean(struct net_device *netdev, int *budget)
2171 {
2172         struct e1000_adapter *adapter = netdev->priv;
2173         int work_to_do = min(*budget, netdev->quota);
2174         int tx_cleaned;
2175         int work_done = 0;
2176         
2177         if (!netif_carrier_ok(netdev))
2178                 goto quit_polling;
2179
2180         tx_cleaned = e1000_clean_tx_irq(adapter);
2181         e1000_clean_rx_irq(adapter, &work_done, work_to_do);
2182
2183         *budget -= work_done;
2184         netdev->quota -= work_done;
2185         
2186         /* if no Rx and Tx cleanup work was done, exit the polling mode */
2187         if(!tx_cleaned || (work_done < work_to_do) || 
2188                                 !netif_running(netdev)) {
2189 quit_polling:   netif_rx_complete(netdev);
2190                 e1000_irq_enable(adapter);
2191                 return 0;
2192         }
2193
2194         return (work_done >= work_to_do);
2195 }
2196
2197 #endif
2198 /**
2199  * e1000_clean_tx_irq - Reclaim resources after transmit completes
2200  * @adapter: board private structure
2201  **/
2202
2203 static boolean_t
2204 e1000_clean_tx_irq(struct e1000_adapter *adapter)
2205 {
2206         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2207         struct net_device *netdev = adapter->netdev;
2208         struct e1000_tx_desc *tx_desc, *eop_desc;
2209         struct e1000_buffer *buffer_info;
2210         unsigned int i, eop;
2211         boolean_t cleaned = FALSE;
2212
2213         i = tx_ring->next_to_clean;
2214         eop = tx_ring->buffer_info[i].next_to_watch;
2215         eop_desc = E1000_TX_DESC(*tx_ring, eop);
2216
2217         while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2218                 for(cleaned = FALSE; !cleaned; ) {
2219                         tx_desc = E1000_TX_DESC(*tx_ring, i);
2220                         buffer_info = &tx_ring->buffer_info[i];
2221
2222                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2223                         tx_desc->buffer_addr = 0;
2224                         tx_desc->lower.data = 0;
2225                         tx_desc->upper.data = 0;
2226
2227                         cleaned = (i == eop);
2228                         if(unlikely(++i == tx_ring->count)) i = 0;
2229                 }
2230                 
2231                 eop = tx_ring->buffer_info[i].next_to_watch;
2232                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2233         }
2234
2235         tx_ring->next_to_clean = i;
2236
2237         spin_lock(&adapter->tx_lock);
2238
2239         if(unlikely(cleaned && netif_queue_stopped(netdev) &&
2240                     netif_carrier_ok(netdev)))
2241                 netif_wake_queue(netdev);
2242
2243         spin_unlock(&adapter->tx_lock);
2244
2245         return cleaned;
2246 }
2247
2248 /**
2249  * e1000_rx_checksum - Receive Checksum Offload for 82543
2250  * @adapter: board private structure
2251  * @rx_desc: receive descriptor
2252  * @sk_buff: socket buffer with received data
2253  **/
2254
2255 static inline void
2256 e1000_rx_checksum(struct e1000_adapter *adapter,
2257                   struct e1000_rx_desc *rx_desc,
2258                   struct sk_buff *skb)
2259 {
2260         /* 82543 or newer only */
2261         if(unlikely((adapter->hw.mac_type < e1000_82543) ||
2262         /* Ignore Checksum bit is set */
2263         (rx_desc->status & E1000_RXD_STAT_IXSM) ||
2264         /* TCP Checksum has not been calculated */
2265         (!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) {
2266                 skb->ip_summed = CHECKSUM_NONE;
2267                 return;
2268         }
2269
2270         /* At this point we know the hardware did the TCP checksum */
2271         /* now look at the TCP checksum error bit */
2272         if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
2273                 /* let the stack verify checksum errors */
2274                 skb->ip_summed = CHECKSUM_NONE;
2275                 adapter->hw_csum_err++;
2276         } else {
2277                 /* TCP checksum is good */
2278                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2279                 adapter->hw_csum_good++;
2280         }
2281 }
2282
2283 /**
2284  * e1000_clean_rx_irq - Send received data up the network stack
2285  * @adapter: board private structure
2286  **/
2287
2288 static boolean_t
2289 #ifdef CONFIG_E1000_NAPI
2290 e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
2291                    int work_to_do)
2292 #else
2293 e1000_clean_rx_irq(struct e1000_adapter *adapter)
2294 #endif
2295 {
2296         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2297         struct net_device *netdev = adapter->netdev;
2298         struct pci_dev *pdev = adapter->pdev;
2299         struct e1000_rx_desc *rx_desc;
2300         struct e1000_buffer *buffer_info;
2301         struct sk_buff *skb;
2302         unsigned long flags;
2303         uint32_t length;
2304         uint8_t last_byte;
2305         unsigned int i;
2306         boolean_t cleaned = FALSE;
2307
2308         i = rx_ring->next_to_clean;
2309         rx_desc = E1000_RX_DESC(*rx_ring, i);
2310
2311         while(rx_desc->status & E1000_RXD_STAT_DD) {
2312                 buffer_info = &rx_ring->buffer_info[i];
2313 #ifdef CONFIG_E1000_NAPI
2314                 if(*work_done >= work_to_do)
2315                         break;
2316                 (*work_done)++;
2317 #endif
2318                 cleaned = TRUE;
2319
2320                 pci_unmap_single(pdev,
2321                                  buffer_info->dma,
2322                                  buffer_info->length,
2323                                  PCI_DMA_FROMDEVICE);
2324
2325                 skb = buffer_info->skb;
2326                 length = le16_to_cpu(rx_desc->length);
2327
2328                 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
2329                         /* All receives must fit into a single buffer */
2330                         E1000_DBG("%s: Receive packet consumed multiple"
2331                                         " buffers\n", netdev->name);
2332                         dev_kfree_skb_irq(skb);
2333                         goto next_desc;
2334                 }
2335
2336                 if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
2337                         last_byte = *(skb->data + length - 1);
2338                         if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
2339                                       rx_desc->errors, length, last_byte)) {
2340                                 spin_lock_irqsave(&adapter->stats_lock, flags);
2341                                 e1000_tbi_adjust_stats(&adapter->hw,
2342                                                        &adapter->stats,
2343                                                        length, skb->data);
2344                                 spin_unlock_irqrestore(&adapter->stats_lock,
2345                                                        flags);
2346                                 length--;
2347                         } else {
2348                                 dev_kfree_skb_irq(skb);
2349                                 goto next_desc;
2350                         }
2351                 }
2352
2353                 /* Good Receive */
2354                 skb_put(skb, length - ETHERNET_FCS_SIZE);
2355
2356                 /* Receive Checksum Offload */
2357                 e1000_rx_checksum(adapter, rx_desc, skb);
2358
2359                 skb->protocol = eth_type_trans(skb, netdev);
2360 #ifdef CONFIG_E1000_NAPI
2361                 if(unlikely(adapter->vlgrp &&
2362                             (rx_desc->status & E1000_RXD_STAT_VP))) {
2363                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2364                                         le16_to_cpu(rx_desc->special) &
2365                                         E1000_RXD_SPC_VLAN_MASK);
2366                 } else {
2367                         netif_receive_skb(skb);
2368                 }
2369 #else /* CONFIG_E1000_NAPI */
2370                 if(unlikely(adapter->vlgrp &&
2371                             (rx_desc->status & E1000_RXD_STAT_VP))) {
2372                         vlan_hwaccel_rx(skb, adapter->vlgrp,
2373                                         le16_to_cpu(rx_desc->special) &
2374                                         E1000_RXD_SPC_VLAN_MASK);
2375                 } else {
2376                         netif_rx(skb);
2377                 }
2378 #endif /* CONFIG_E1000_NAPI */
2379                 netdev->last_rx = jiffies;
2380
2381 next_desc:
2382                 rx_desc->status = 0;
2383                 buffer_info->skb = NULL;
2384                 if(unlikely(++i == rx_ring->count)) i = 0;
2385
2386                 rx_desc = E1000_RX_DESC(*rx_ring, i);
2387         }
2388
2389         rx_ring->next_to_clean = i;
2390
2391         e1000_alloc_rx_buffers(adapter);
2392
2393         return cleaned;
2394 }
2395
2396 /**
2397  * e1000_alloc_rx_buffers - Replace used receive buffers
2398  * @adapter: address of board private structure
2399  **/
2400
2401 static void
2402 e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2403 {
2404         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2405         struct net_device *netdev = adapter->netdev;
2406         struct pci_dev *pdev = adapter->pdev;
2407         struct e1000_rx_desc *rx_desc;
2408         struct e1000_buffer *buffer_info;
2409         struct sk_buff *skb;
2410         unsigned int i;
2411
2412         i = rx_ring->next_to_use;
2413         buffer_info = &rx_ring->buffer_info[i];
2414
2415         while(!buffer_info->skb) {
2416                 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
2417
2418                 if(unlikely(!skb)) {
2419                         /* Better luck next round */
2420                         break;
2421                 }
2422
2423                 /* Make buffer alignment 2 beyond a 16 byte boundary
2424                  * this will result in a 16 byte aligned IP header after
2425                  * the 14 byte MAC header is removed
2426                  */
2427                 skb_reserve(skb, NET_IP_ALIGN);
2428
2429                 skb->dev = netdev;
2430
2431                 buffer_info->skb = skb;
2432                 buffer_info->length = adapter->rx_buffer_len;
2433                 buffer_info->dma = pci_map_single(pdev,
2434                                                   skb->data,
2435                                                   adapter->rx_buffer_len,
2436                                                   PCI_DMA_FROMDEVICE);
2437
2438                 rx_desc = E1000_RX_DESC(*rx_ring, i);
2439                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2440
2441                 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
2442                         /* Force memory writes to complete before letting h/w
2443                          * know there are new descriptors to fetch.  (Only
2444                          * applicable for weak-ordered memory model archs,
2445                          * such as IA-64). */
2446                         wmb();
2447
2448                         E1000_WRITE_REG(&adapter->hw, RDT, i);
2449                 }
2450
2451                 if(unlikely(++i == rx_ring->count)) i = 0;
2452                 buffer_info = &rx_ring->buffer_info[i];
2453         }
2454
2455         rx_ring->next_to_use = i;
2456 }
2457
2458 /**
2459  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
2460  * @adapter:
2461  **/
2462
2463 static void
2464 e1000_smartspeed(struct e1000_adapter *adapter)
2465 {
2466         uint16_t phy_status;
2467         uint16_t phy_ctrl;
2468
2469         if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
2470            !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2471                 return;
2472
2473         if(adapter->smartspeed == 0) {
2474                 /* If Master/Slave config fault is asserted twice,
2475                  * we assume back-to-back */
2476                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2477                 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2478                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2479                 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2480                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2481                 if(phy_ctrl & CR_1000T_MS_ENABLE) {
2482                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
2483                         e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2484                                             phy_ctrl);
2485                         adapter->smartspeed++;
2486                         if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2487                            !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
2488                                                &phy_ctrl)) {
2489                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2490                                              MII_CR_RESTART_AUTO_NEG);
2491                                 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
2492                                                     phy_ctrl);
2493                         }
2494                 }
2495                 return;
2496         } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
2497                 /* If still no link, perhaps using 2/3 pair cable */
2498                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2499                 phy_ctrl |= CR_1000T_MS_ENABLE;
2500                 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
2501                 if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2502                    !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
2503                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2504                                      MII_CR_RESTART_AUTO_NEG);
2505                         e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
2506                 }
2507         }
2508         /* Restart process after E1000_SMARTSPEED_MAX iterations */
2509         if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
2510                 adapter->smartspeed = 0;
2511 }
2512
2513 /**
2514  * e1000_ioctl -
2515  * @netdev:
2516  * @ifreq:
2517  * @cmd:
2518  **/
2519
2520 static int
2521 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2522 {
2523         switch (cmd) {
2524         case SIOCGMIIPHY:
2525         case SIOCGMIIREG:
2526         case SIOCSMIIREG:
2527                 return e1000_mii_ioctl(netdev, ifr, cmd);
2528         default:
2529                 return -EOPNOTSUPP;
2530         }
2531 }
2532
2533 /**
2534  * e1000_mii_ioctl -
2535  * @netdev:
2536  * @ifreq:
2537  * @cmd:
2538  **/
2539
2540 static int
2541 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2542 {
2543         struct e1000_adapter *adapter = netdev->priv;
2544         struct mii_ioctl_data *data = if_mii(ifr);
2545         int retval;
2546         uint16_t mii_reg;
2547         uint16_t spddplx;
2548
2549         if(adapter->hw.media_type != e1000_media_type_copper)
2550                 return -EOPNOTSUPP;
2551
2552         switch (cmd) {
2553         case SIOCGMIIPHY:
2554                 data->phy_id = adapter->hw.phy_addr;
2555                 break;
2556         case SIOCGMIIREG:
2557                 if (!capable(CAP_NET_ADMIN))
2558                         return -EPERM;
2559                 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
2560                                    &data->val_out))
2561                         return -EIO;
2562                 break;
2563         case SIOCSMIIREG:
2564                 if (!capable(CAP_NET_ADMIN))
2565                         return -EPERM;
2566                 if (data->reg_num & ~(0x1F))
2567                         return -EFAULT;
2568                 mii_reg = data->val_in;
2569                 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
2570                                         mii_reg))
2571                         return -EIO;
2572                 if (adapter->hw.phy_type == e1000_phy_m88) {
2573                         switch (data->reg_num) {
2574                         case PHY_CTRL:
2575                                 if(mii_reg & MII_CR_POWER_DOWN)
2576                                         break;
2577                                 if(mii_reg & MII_CR_AUTO_NEG_EN) {
2578                                         adapter->hw.autoneg = 1;
2579                                         adapter->hw.autoneg_advertised = 0x2F;
2580                                 } else {
2581                                         if (mii_reg & 0x40)
2582                                                 spddplx = SPEED_1000;
2583                                         else if (mii_reg & 0x2000)
2584                                                 spddplx = SPEED_100;
2585                                         else
2586                                                 spddplx = SPEED_10;
2587                                         spddplx += (mii_reg & 0x100)
2588                                                    ? FULL_DUPLEX :
2589                                                    HALF_DUPLEX;
2590                                         retval = e1000_set_spd_dplx(adapter,
2591                                                                     spddplx);
2592                                         if(retval)
2593                                                 return retval;
2594                                 }
2595                                 if(netif_running(adapter->netdev)) {
2596                                         e1000_down(adapter);
2597                                         e1000_up(adapter);
2598                                 } else
2599                                         e1000_reset(adapter);
2600                                 break;
2601                         case M88E1000_PHY_SPEC_CTRL:
2602                         case M88E1000_EXT_PHY_SPEC_CTRL:
2603                                 if (e1000_phy_reset(&adapter->hw))
2604                                         return -EIO;
2605                                 break;
2606                         }
2607                 } else {
2608                         switch (data->reg_num) {
2609                         case PHY_CTRL:
2610                                 if(mii_reg & MII_CR_POWER_DOWN)
2611                                         break;
2612                                 if(netif_running(adapter->netdev)) {
2613                                         e1000_down(adapter);
2614                                         e1000_up(adapter);
2615                                 } else
2616                                         e1000_reset(adapter);
2617                                 break;
2618                         }
2619                 }
2620                 break;
2621         default:
2622                 return -EOPNOTSUPP;
2623         }
2624         return E1000_SUCCESS;
2625 }
2626
2627 void
2628 e1000_pci_set_mwi(struct e1000_hw *hw)
2629 {
2630         struct e1000_adapter *adapter = hw->back;
2631
2632         int ret;
2633         ret = pci_set_mwi(adapter->pdev);
2634 }
2635
2636 void
2637 e1000_pci_clear_mwi(struct e1000_hw *hw)
2638 {
2639         struct e1000_adapter *adapter = hw->back;
2640
2641         pci_clear_mwi(adapter->pdev);
2642 }
2643
2644 void
2645 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2646 {
2647         struct e1000_adapter *adapter = hw->back;
2648
2649         pci_read_config_word(adapter->pdev, reg, value);
2650 }
2651
2652 void
2653 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2654 {
2655         struct e1000_adapter *adapter = hw->back;
2656
2657         pci_write_config_word(adapter->pdev, reg, *value);
2658 }
2659
2660 uint32_t
2661 e1000_io_read(struct e1000_hw *hw, unsigned long port)
2662 {
2663         return inl(port);
2664 }
2665
2666 void
2667 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
2668 {
2669         outl(value, port);
2670 }
2671
2672 static void
2673 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2674 {
2675         struct e1000_adapter *adapter = netdev->priv;
2676         uint32_t ctrl, rctl;
2677
2678         e1000_irq_disable(adapter);
2679         adapter->vlgrp = grp;
2680
2681         if(grp) {
2682                 /* enable VLAN tag insert/strip */
2683                 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2684                 ctrl |= E1000_CTRL_VME;
2685                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2686
2687                 /* enable VLAN receive filtering */
2688                 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2689                 rctl |= E1000_RCTL_VFE;
2690                 rctl &= ~E1000_RCTL_CFIEN;
2691                 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2692         } else {
2693                 /* disable VLAN tag insert/strip */
2694                 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2695                 ctrl &= ~E1000_CTRL_VME;
2696                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2697
2698                 /* disable VLAN filtering */
2699                 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2700                 rctl &= ~E1000_RCTL_VFE;
2701                 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2702         }
2703
2704         e1000_irq_enable(adapter);
2705 }
2706
2707 static void
2708 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2709 {
2710         struct e1000_adapter *adapter = netdev->priv;
2711         uint32_t vfta, index;
2712
2713         /* add VID to filter table */
2714         index = (vid >> 5) & 0x7F;
2715         vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2716         vfta |= (1 << (vid & 0x1F));
2717         e1000_write_vfta(&adapter->hw, index, vfta);
2718 }
2719
2720 static void
2721 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2722 {
2723         struct e1000_adapter *adapter = netdev->priv;
2724         uint32_t vfta, index;
2725
2726         e1000_irq_disable(adapter);
2727
2728         if(adapter->vlgrp)
2729                 adapter->vlgrp->vlan_devices[vid] = NULL;
2730
2731         e1000_irq_enable(adapter);
2732
2733         /* remove VID from filter table */
2734         index = (vid >> 5) & 0x7F;
2735         vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2736         vfta &= ~(1 << (vid & 0x1F));
2737         e1000_write_vfta(&adapter->hw, index, vfta);
2738 }
2739
2740 static void
2741 e1000_restore_vlan(struct e1000_adapter *adapter)
2742 {
2743         e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2744
2745         if(adapter->vlgrp) {
2746                 uint16_t vid;
2747                 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2748                         if(!adapter->vlgrp->vlan_devices[vid])
2749                                 continue;
2750                         e1000_vlan_rx_add_vid(adapter->netdev, vid);
2751                 }
2752         }
2753 }
2754
2755 int
2756 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
2757 {
2758         adapter->hw.autoneg = 0;
2759
2760         switch(spddplx) {
2761         case SPEED_10 + DUPLEX_HALF:
2762                 adapter->hw.forced_speed_duplex = e1000_10_half;
2763                 break;
2764         case SPEED_10 + DUPLEX_FULL:
2765                 adapter->hw.forced_speed_duplex = e1000_10_full;
2766                 break;
2767         case SPEED_100 + DUPLEX_HALF:
2768                 adapter->hw.forced_speed_duplex = e1000_100_half;
2769                 break;
2770         case SPEED_100 + DUPLEX_FULL:
2771                 adapter->hw.forced_speed_duplex = e1000_100_full;
2772                 break;
2773         case SPEED_1000 + DUPLEX_FULL:
2774                 adapter->hw.autoneg = 1;
2775                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
2776                 break;
2777         case SPEED_1000 + DUPLEX_HALF: /* not supported */
2778         default:
2779                 DPRINTK(PROBE, ERR, 
2780                         "Unsupported Speed/Duplexity configuration\n");
2781                 return -EINVAL;
2782         }
2783         return 0;
2784 }
2785
2786 static int
2787 e1000_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2788 {
2789         struct pci_dev *pdev = NULL;
2790
2791         switch(event) {
2792         case SYS_DOWN:
2793         case SYS_HALT:
2794         case SYS_POWER_OFF:
2795                 while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
2796                         if(pci_dev_driver(pdev) == &e1000_driver)
2797                                 e1000_suspend(pdev, 3);
2798                 }
2799         }
2800         return NOTIFY_DONE;
2801 }
2802
2803 static int
2804 e1000_suspend(struct pci_dev *pdev, uint32_t state)
2805 {
2806         struct net_device *netdev = pci_get_drvdata(pdev);
2807         struct e1000_adapter *adapter = netdev->priv;
2808         uint32_t ctrl, ctrl_ext, rctl, manc, status;
2809         uint32_t wufc = adapter->wol;
2810
2811         netif_device_detach(netdev);
2812
2813         if(netif_running(netdev))
2814                 e1000_down(adapter);
2815
2816         status = E1000_READ_REG(&adapter->hw, STATUS);
2817         if(status & E1000_STATUS_LU)
2818                 wufc &= ~E1000_WUFC_LNKC;
2819
2820         if(wufc) {
2821                 e1000_setup_rctl(adapter);
2822                 e1000_set_multi(netdev);
2823
2824                 /* turn on all-multi mode if wake on multicast is enabled */
2825                 if(adapter->wol & E1000_WUFC_MC) {
2826                         rctl = E1000_READ_REG(&adapter->hw, RCTL);
2827                         rctl |= E1000_RCTL_MPE;
2828                         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2829                 }
2830
2831                 if(adapter->hw.mac_type >= e1000_82540) {
2832                         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2833                         /* advertise wake from D3Cold */
2834                         #define E1000_CTRL_ADVD3WUC 0x00100000
2835                         /* phy power management enable */
2836                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
2837                         ctrl |= E1000_CTRL_ADVD3WUC |
2838                                 E1000_CTRL_EN_PHY_PWR_MGMT;
2839                         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2840                 }
2841
2842                 if(adapter->hw.media_type == e1000_media_type_fiber ||
2843                    adapter->hw.media_type == e1000_media_type_internal_serdes) {
2844                         /* keep the laser running in D3 */
2845                         ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
2846                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
2847                         E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
2848                 }
2849
2850                 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
2851                 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
2852                 pci_enable_wake(pdev, 3, 1);
2853                 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2854         } else {
2855                 E1000_WRITE_REG(&adapter->hw, WUC, 0);
2856                 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
2857                 pci_enable_wake(pdev, 3, 0);
2858                 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2859         }
2860
2861         pci_save_state(pdev);
2862
2863         if(adapter->hw.mac_type >= e1000_82540 &&
2864            adapter->hw.media_type == e1000_media_type_copper) {
2865                 manc = E1000_READ_REG(&adapter->hw, MANC);
2866                 if(manc & E1000_MANC_SMBUS_EN) {
2867                         manc |= E1000_MANC_ARP_EN;
2868                         E1000_WRITE_REG(&adapter->hw, MANC, manc);
2869                         pci_enable_wake(pdev, 3, 1);
2870                         pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2871                 }
2872         }
2873
2874         pci_disable_device(pdev);
2875
2876         state = (state > 0) ? 3 : 0;
2877         pci_set_power_state(pdev, state);
2878
2879         return 0;
2880 }
2881
2882 #ifdef CONFIG_PM
2883 static int
2884 e1000_resume(struct pci_dev *pdev)
2885 {
2886         struct net_device *netdev = pci_get_drvdata(pdev);
2887         struct e1000_adapter *adapter = netdev->priv;
2888         uint32_t manc, ret;
2889
2890         pci_set_power_state(pdev, 0);
2891         pci_restore_state(pdev);
2892         ret = pci_enable_device(pdev);
2893         if (pdev->is_busmaster)
2894                 pci_set_master(pdev);
2895
2896         pci_enable_wake(pdev, 3, 0);
2897         pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2898
2899         e1000_reset(adapter);
2900         E1000_WRITE_REG(&adapter->hw, WUS, ~0);
2901
2902         if(netif_running(netdev))
2903                 e1000_up(adapter);
2904
2905         netif_device_attach(netdev);
2906
2907         if(adapter->hw.mac_type >= e1000_82540 &&
2908            adapter->hw.media_type == e1000_media_type_copper) {
2909                 manc = E1000_READ_REG(&adapter->hw, MANC);
2910                 manc &= ~(E1000_MANC_ARP_EN);
2911                 E1000_WRITE_REG(&adapter->hw, MANC, manc);
2912         }
2913
2914         return 0;
2915 }
2916 #endif
2917
2918 #ifdef CONFIG_NET_POLL_CONTROLLER
2919 /*
2920  * Polling 'interrupt' - used by things like netconsole to send skbs
2921  * without having to re-enable interrupts. It's not called while
2922  * the interrupt routine is executing.
2923  */
2924 static void
2925 e1000_netpoll (struct net_device *netdev)
2926 {
2927         struct e1000_adapter *adapter = netdev->priv;
2928         disable_irq(adapter->pdev->irq);
2929         e1000_intr(adapter->pdev->irq, netdev, NULL);
2930         enable_irq(adapter->pdev->irq);
2931 }
2932 #endif
2933
2934 /* e1000_main.c */