VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / net / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   
4   Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
5   
6   This program is free software; you can redistribute it and/or modify it 
7   under the terms of the GNU General Public License as published by the Free 
8   Software Foundation; either version 2 of the License, or (at your option) 
9   any later version.
10   
11   This program is distributed in the hope that it will be useful, but WITHOUT 
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
14   more details.
15   
16   You should have received a copy of the GNU General Public License along with
17   this program; if not, write to the Free Software Foundation, Inc., 59 
18   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
19   
20   The full GNU General Public License is included in this distribution in the
21   file called LICENSE.
22   
23   Contact Information:
24   Linux NICS <linux.nics@intel.com>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <linux/rtnetlink.h>
31
32 /* Change Log
33  *
34  * 5.2.51   5/14/04
35  *   o set default configuration to 'NAPI disabled'. NAPI enabled driver
36  *     causes kernel panic when the interface is shutdown while data is being
37  *     transferred.
38  * 5.2.47   5/04/04
39  *   o fixed ethtool -t implementation
40  * 5.2.45   4/29/04
41  *   o fixed ethtool -e implementation
42  *   o Support for ethtool ops [Stephen Hemminger (shemminger@osdl.org)]
43  * 5.2.42   4/26/04
44  *   o Added support for the DPRINTK macro for enhanced error logging.  Some
45  *     parts of the patch were supplied by Jon Mason.
46  *   o Move the register_netdevice() donw in the probe routine due to a 
47  *     loading/unloading test issue.
48  *   o Added a long RX byte count the the extra ethtool data members for BER
49  *     testing purposes.
50  * 5.2.39       3/12/04
51  */
52
53 char e1000_driver_name[] = "e1000";
54 char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
55 char e1000_driver_version[] = "5.2.52-k4";
56 char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
57
58 /* e1000_pci_tbl - PCI Device ID Table
59  *
60  * Wildcard entries (PCI_ANY_ID) should come last
61  * Last entry must be all 0s
62  *
63  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64  *   Class, Class Mask, private data (not used) }
65  */
66 static struct pci_device_id e1000_pci_tbl[] = {
67         {0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
68         {0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
69         {0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
70         {0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
71         {0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
72         {0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
73         {0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
74         {0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
75         {0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
76         {0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
77         {0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
78         {0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
79         {0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
80         {0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
81         {0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
82         {0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
83         {0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
84         {0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
85         {0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
86         {0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
87         {0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
88         {0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
89         {0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
90         {0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
91         {0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
92         {0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
93         {0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
94         {0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
95         {0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
96         {0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
97         /* required last entry */
98         {0,}
99 };
100
101 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
102
103 int e1000_up(struct e1000_adapter *adapter);
104 void e1000_down(struct e1000_adapter *adapter);
105 void e1000_reset(struct e1000_adapter *adapter);
106 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
107 int e1000_setup_tx_resources(struct e1000_adapter *adapter);
108 int e1000_setup_rx_resources(struct e1000_adapter *adapter);
109 void e1000_free_tx_resources(struct e1000_adapter *adapter);
110 void e1000_free_rx_resources(struct e1000_adapter *adapter);
111 void e1000_update_stats(struct e1000_adapter *adapter);
112
113 /* Local Function Prototypes */
114
115 static int e1000_init_module(void);
116 static void e1000_exit_module(void);
117 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
118 static void __devexit e1000_remove(struct pci_dev *pdev);
119 static int e1000_sw_init(struct e1000_adapter *adapter);
120 static int e1000_open(struct net_device *netdev);
121 static int e1000_close(struct net_device *netdev);
122 static void e1000_configure_tx(struct e1000_adapter *adapter);
123 static void e1000_configure_rx(struct e1000_adapter *adapter);
124 static void e1000_setup_rctl(struct e1000_adapter *adapter);
125 static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
127 static void e1000_set_multi(struct net_device *netdev);
128 static void e1000_update_phy_info(unsigned long data);
129 static void e1000_watchdog(unsigned long data);
130 static void e1000_82547_tx_fifo_stall(unsigned long data);
131 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
132 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
133 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
134 static int e1000_set_mac(struct net_device *netdev, void *p);
135 static inline void e1000_irq_disable(struct e1000_adapter *adapter);
136 static inline void e1000_irq_enable(struct e1000_adapter *adapter);
137 static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
138 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
139 #ifdef CONFIG_E1000_NAPI
140 static int e1000_clean(struct net_device *netdev, int *budget);
141 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
142                                     int *work_done, int work_to_do);
143 #else
144 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
145 #endif
146 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
147 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
148 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
149                            int cmd);
150 void set_ethtool_ops(struct net_device *netdev);
151 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
152 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
153 static inline void e1000_rx_checksum(struct e1000_adapter *adapter,
154                                      struct e1000_rx_desc *rx_desc,
155                                      struct sk_buff *skb);
156 static void e1000_tx_timeout(struct net_device *dev);
157 static void e1000_tx_timeout_task(struct net_device *dev);
158 static void e1000_smartspeed(struct e1000_adapter *adapter);
159 static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
160                                               struct sk_buff *skb);
161
162 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
163 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
164 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
165 static void e1000_restore_vlan(struct e1000_adapter *adapter);
166
167 static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
168 static int e1000_suspend(struct pci_dev *pdev, uint32_t state);
169 #ifdef CONFIG_PM
170 static int e1000_resume(struct pci_dev *pdev);
171 #endif
172
173 #ifdef CONFIG_NET_POLL_CONTROLLER
174 /* for netdump / net console */
175 static void e1000_netpoll (struct net_device *dev);
176 #endif
177
178 struct notifier_block e1000_notifier_reboot = {
179         .notifier_call  = e1000_notify_reboot,
180         .next           = NULL,
181         .priority       = 0
182 };
183
184 /* Exported from other modules */
185
186 extern void e1000_check_options(struct e1000_adapter *adapter);
187
188
189 static struct pci_driver e1000_driver = {
190         .name     = e1000_driver_name,
191         .id_table = e1000_pci_tbl,
192         .probe    = e1000_probe,
193         .remove   = __devexit_p(e1000_remove),
194         /* Power Managment Hooks */
195 #ifdef CONFIG_PM
196         .suspend  = e1000_suspend,
197         .resume   = e1000_resume
198 #endif
199 };
200
201 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
202 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
203 MODULE_LICENSE("GPL");
204
205 static int debug = 3;
206 module_param(debug, int, 0);
207 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
208
209 /**
210  * e1000_init_module - Driver Registration Routine
211  *
212  * e1000_init_module is the first routine called when the driver is
213  * loaded. All it does is register with the PCI subsystem.
214  **/
215
216 static int __init
217 e1000_init_module(void)
218 {
219         int ret;
220         printk(KERN_INFO "%s - version %s\n",
221                e1000_driver_string, e1000_driver_version);
222
223         printk(KERN_INFO "%s\n", e1000_copyright);
224
225         ret = pci_module_init(&e1000_driver);
226         if(ret >= 0) {
227                 register_reboot_notifier(&e1000_notifier_reboot);
228         }
229         return ret;
230 }
231
232 module_init(e1000_init_module);
233
234 /**
235  * e1000_exit_module - Driver Exit Cleanup Routine
236  *
237  * e1000_exit_module is called just before the driver is removed
238  * from memory.
239  **/
240
241 static void __exit
242 e1000_exit_module(void)
243 {
244         unregister_reboot_notifier(&e1000_notifier_reboot);
245         pci_unregister_driver(&e1000_driver);
246 }
247
248 module_exit(e1000_exit_module);
249
250
251 int
252 e1000_up(struct e1000_adapter *adapter)
253 {
254         struct net_device *netdev = adapter->netdev;
255         int err;
256
257         /* hardware has been reset, we need to reload some things */
258
259         e1000_set_multi(netdev);
260
261         e1000_restore_vlan(adapter);
262
263         e1000_configure_tx(adapter);
264         e1000_setup_rctl(adapter);
265         e1000_configure_rx(adapter);
266         e1000_alloc_rx_buffers(adapter);
267
268         if((err = request_irq(adapter->pdev->irq, &e1000_intr,
269                               SA_SHIRQ | SA_SAMPLE_RANDOM,
270                               netdev->name, netdev)))
271                 return err;
272
273         mod_timer(&adapter->watchdog_timer, jiffies);
274         e1000_irq_enable(adapter);
275
276         return 0;
277 }
278
279 void
280 e1000_down(struct e1000_adapter *adapter)
281 {
282         struct net_device *netdev = adapter->netdev;
283
284         e1000_irq_disable(adapter);
285         free_irq(adapter->pdev->irq, netdev);
286         del_timer_sync(&adapter->tx_fifo_stall_timer);
287         del_timer_sync(&adapter->watchdog_timer);
288         del_timer_sync(&adapter->phy_info_timer);
289         adapter->link_speed = 0;
290         adapter->link_duplex = 0;
291         netif_carrier_off(netdev);
292         netif_stop_queue(netdev);
293
294         e1000_reset(adapter);
295         e1000_clean_tx_ring(adapter);
296         e1000_clean_rx_ring(adapter);
297 }
298
299 void
300 e1000_reset(struct e1000_adapter *adapter)
301 {
302         uint32_t pba, manc;
303         /* Repartition Pba for greater than 9k mtu
304          * To take effect CTRL.RST is required.
305          */
306
307         if(adapter->hw.mac_type < e1000_82547) {
308                 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
309                         pba = E1000_PBA_40K;
310                 else
311                         pba = E1000_PBA_48K;
312         } else {
313                 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
314                         pba = E1000_PBA_22K;
315                 else
316                         pba = E1000_PBA_30K;
317                 adapter->tx_fifo_head = 0;
318                 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
319                 adapter->tx_fifo_size =
320                         (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
321                 atomic_set(&adapter->tx_fifo_stall, 0);
322         }
323         E1000_WRITE_REG(&adapter->hw, PBA, pba);
324
325         /* flow control settings */
326         adapter->hw.fc_high_water =
327                 (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_HIGH_DIFF;
328         adapter->hw.fc_low_water =
329                 (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_LOW_DIFF;
330         adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
331         adapter->hw.fc_send_xon = 1;
332         adapter->hw.fc = adapter->hw.original_fc;
333
334         e1000_reset_hw(&adapter->hw);
335         if(adapter->hw.mac_type >= e1000_82544)
336                 E1000_WRITE_REG(&adapter->hw, WUC, 0);
337         e1000_init_hw(&adapter->hw);
338
339         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
340         E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
341
342         e1000_reset_adaptive(&adapter->hw);
343         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
344
345         if(adapter->en_mng_pt) {
346                 manc = E1000_READ_REG(&adapter->hw, MANC);
347                 manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
348                 E1000_WRITE_REG(&adapter->hw, MANC, manc);
349         }
350 }
351
352 /**
353  * e1000_probe - Device Initialization Routine
354  * @pdev: PCI device information struct
355  * @ent: entry in e1000_pci_tbl
356  *
357  * Returns 0 on success, negative on failure
358  *
359  * e1000_probe initializes an adapter identified by a pci_dev structure.
360  * The OS initialization, configuring of the adapter private structure,
361  * and a hardware reset occur.
362  **/
363
364 static int __devinit
365 e1000_probe(struct pci_dev *pdev,
366             const struct pci_device_id *ent)
367 {
368         struct net_device *netdev;
369         struct e1000_adapter *adapter;
370         static int cards_found = 0;
371         unsigned long mmio_start;
372         int mmio_len;
373         int pci_using_dac;
374         int i;
375         int err;
376         uint16_t eeprom_data;
377
378         if((err = pci_enable_device(pdev)))
379                 return err;
380
381         if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
382                 pci_using_dac = 1;
383         } else {
384                 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
385                         E1000_ERR("No usable DMA configuration, aborting\n");
386                         return err;
387                 }
388                 pci_using_dac = 0;
389         }
390
391         if((err = pci_request_regions(pdev, e1000_driver_name)))
392                 return err;
393
394         pci_set_master(pdev);
395
396         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
397         if(!netdev) {
398                 err = -ENOMEM;
399                 goto err_alloc_etherdev;
400         }
401
402         SET_MODULE_OWNER(netdev);
403         SET_NETDEV_DEV(netdev, &pdev->dev);
404
405         pci_set_drvdata(pdev, netdev);
406         adapter = netdev->priv;
407         adapter->netdev = netdev;
408         adapter->pdev = pdev;
409         adapter->hw.back = adapter;
410         adapter->msg_enable = (1 << debug) - 1;
411
412         rtnl_lock();
413         /* we need to set the name early since the DPRINTK macro needs it set */
414         if (dev_alloc_name(netdev, netdev->name) < 0) 
415                 goto err_free_unlock;
416
417         mmio_start = pci_resource_start(pdev, BAR_0);
418         mmio_len = pci_resource_len(pdev, BAR_0);
419
420         adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
421         if(!adapter->hw.hw_addr) {
422                 err = -EIO;
423                 goto err_ioremap;
424         }
425
426         for(i = BAR_1; i <= BAR_5; i++) {
427                 if(pci_resource_len(pdev, i) == 0)
428                         continue;
429                 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
430                         adapter->hw.io_base = pci_resource_start(pdev, i);
431                         break;
432                 }
433         }
434
435         netdev->open = &e1000_open;
436         netdev->stop = &e1000_close;
437         netdev->hard_start_xmit = &e1000_xmit_frame;
438         netdev->get_stats = &e1000_get_stats;
439         netdev->set_multicast_list = &e1000_set_multi;
440         netdev->set_mac_address = &e1000_set_mac;
441         netdev->change_mtu = &e1000_change_mtu;
442         netdev->do_ioctl = &e1000_ioctl;
443         set_ethtool_ops(netdev);
444         netdev->tx_timeout = &e1000_tx_timeout;
445         netdev->watchdog_timeo = 5 * HZ;
446 #ifdef CONFIG_E1000_NAPI
447         netdev->poll = &e1000_clean;
448         netdev->weight = 64;
449 #endif
450         netdev->vlan_rx_register = e1000_vlan_rx_register;
451         netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
452         netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
453 #ifdef CONFIG_NET_POLL_CONTROLLER
454         netdev->poll_controller = e1000_netpoll;
455 #endif
456
457         netdev->mem_start = mmio_start;
458         netdev->mem_end = mmio_start + mmio_len;
459         netdev->base_addr = adapter->hw.io_base;
460
461         adapter->bd_number = cards_found;
462
463         /* setup the private structure */
464
465         if((err = e1000_sw_init(adapter)))
466                 goto err_sw_init;
467
468         if(adapter->hw.mac_type >= e1000_82543) {
469                 netdev->features = NETIF_F_SG |
470                                    NETIF_F_HW_CSUM |
471                                    NETIF_F_HW_VLAN_TX |
472                                    NETIF_F_HW_VLAN_RX |
473                                    NETIF_F_HW_VLAN_FILTER;
474         } else {
475                 netdev->features = NETIF_F_SG;
476         }
477
478 #ifdef NETIF_F_TSO
479 #ifdef BROKEN_ON_NON_IA_ARCHS
480         /* Disbaled for now until root-cause is found for
481          * hangs reported against non-IA archs.  TSO can be
482          * enabled using ethtool -K eth<x> tso on */
483         if((adapter->hw.mac_type >= e1000_82544) &&
484            (adapter->hw.mac_type != e1000_82547))
485                 netdev->features |= NETIF_F_TSO;
486 #endif
487 #endif
488
489         if(pci_using_dac)
490                 netdev->features |= NETIF_F_HIGHDMA;
491
492         adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
493
494         /* before reading the EEPROM, reset the controller to 
495          * put the device in a known good starting state */
496         
497         e1000_reset_hw(&adapter->hw);
498
499         /* make sure the EEPROM is good */
500
501         if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
502                 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
503                 err = -EIO;
504                 goto err_eeprom;
505         }
506
507         /* copy the MAC address out of the EEPROM */
508
509         e1000_read_mac_addr(&adapter->hw);
510         memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
511
512         if(!is_valid_ether_addr(netdev->dev_addr)) {
513                 err = -EIO;
514                 goto err_eeprom;
515         }
516
517         e1000_read_part_num(&adapter->hw, &(adapter->part_num));
518
519         e1000_get_bus_info(&adapter->hw);
520
521         init_timer(&adapter->tx_fifo_stall_timer);
522         adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
523         adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
524
525         init_timer(&adapter->watchdog_timer);
526         adapter->watchdog_timer.function = &e1000_watchdog;
527         adapter->watchdog_timer.data = (unsigned long) adapter;
528
529         init_timer(&adapter->phy_info_timer);
530         adapter->phy_info_timer.function = &e1000_update_phy_info;
531         adapter->phy_info_timer.data = (unsigned long) adapter;
532
533         INIT_WORK(&adapter->tx_timeout_task,
534                 (void (*)(void *))e1000_tx_timeout_task, netdev);
535
536         /* we're going to reset, so assume we have no link for now */
537
538         netif_carrier_off(netdev);
539         netif_stop_queue(netdev);
540
541         DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
542         e1000_check_options(adapter);
543
544         /* Initial Wake on LAN setting
545          * If APM wake is enabled in the EEPROM,
546          * enable the ACPI Magic Packet filter
547          */
548
549         switch(adapter->hw.mac_type) {
550         case e1000_82542_rev2_0:
551         case e1000_82542_rev2_1:
552         case e1000_82543:
553                 break;
554         case e1000_82546:
555         case e1000_82546_rev_3:
556                 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
557                    && (adapter->hw.media_type == e1000_media_type_copper)) {
558                         e1000_read_eeprom(&adapter->hw,
559                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
560                         break;
561                 }
562                 /* Fall Through */
563         default:
564                 e1000_read_eeprom(&adapter->hw,
565                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
566                 break;
567         }
568         if(eeprom_data & E1000_EEPROM_APME)
569                 adapter->wol |= E1000_WUFC_MAG;
570
571         /* reset the hardware with the new settings */
572
573         e1000_reset(adapter);
574
575         /* since we are holding the rtnl lock already, call the no-lock version */
576         if((err = register_netdevice(netdev)))
577                 goto err_register;
578
579         cards_found++;
580         rtnl_unlock();
581         return 0;
582
583 err_register:
584 err_sw_init:
585 err_eeprom:
586         iounmap(adapter->hw.hw_addr);
587 err_ioremap:
588 err_free_unlock:
589         rtnl_unlock();
590         free_netdev(netdev);
591 err_alloc_etherdev:
592         pci_release_regions(pdev);
593         return err;
594 }
595
596 /**
597  * e1000_remove - Device Removal Routine
598  * @pdev: PCI device information struct
599  *
600  * e1000_remove is called by the PCI subsystem to alert the driver
601  * that it should release a PCI device.  The could be caused by a
602  * Hot-Plug event, or because the driver is going to be removed from
603  * memory.
604  **/
605
606 static void __devexit
607 e1000_remove(struct pci_dev *pdev)
608 {
609         struct net_device *netdev = pci_get_drvdata(pdev);
610         struct e1000_adapter *adapter = netdev->priv;
611         uint32_t manc;
612
613         if(adapter->hw.mac_type >= e1000_82540 &&
614            adapter->hw.media_type == e1000_media_type_copper) {
615                 manc = E1000_READ_REG(&adapter->hw, MANC);
616                 if(manc & E1000_MANC_SMBUS_EN) {
617                         manc |= E1000_MANC_ARP_EN;
618                         E1000_WRITE_REG(&adapter->hw, MANC, manc);
619                 }
620         }
621
622         unregister_netdev(netdev);
623
624         e1000_phy_hw_reset(&adapter->hw);
625
626         iounmap(adapter->hw.hw_addr);
627         pci_release_regions(pdev);
628
629         free_netdev(netdev);
630 }
631
632 /**
633  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
634  * @adapter: board private structure to initialize
635  *
636  * e1000_sw_init initializes the Adapter private data structure.
637  * Fields are initialized based on PCI device information and
638  * OS network device settings (MTU size).
639  **/
640
641 static int __devinit
642 e1000_sw_init(struct e1000_adapter *adapter)
643 {
644         struct e1000_hw *hw = &adapter->hw;
645         struct net_device *netdev = adapter->netdev;
646         struct pci_dev *pdev = adapter->pdev;
647
648         /* PCI config space info */
649
650         hw->vendor_id = pdev->vendor;
651         hw->device_id = pdev->device;
652         hw->subsystem_vendor_id = pdev->subsystem_vendor;
653         hw->subsystem_id = pdev->subsystem_device;
654
655         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
656
657         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
658
659         adapter->rx_buffer_len = E1000_RXBUFFER_2048;
660         hw->max_frame_size = netdev->mtu +
661                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
662         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
663
664         /* identify the MAC */
665
666         if (e1000_set_mac_type(hw)) {
667                 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
668                 return -EIO;
669         }
670
671         /* initialize eeprom parameters */
672
673         e1000_init_eeprom_params(hw);
674
675         if((hw->mac_type == e1000_82541) ||
676            (hw->mac_type == e1000_82547) ||
677            (hw->mac_type == e1000_82541_rev_2) ||
678            (hw->mac_type == e1000_82547_rev_2))
679                 hw->phy_init_script = 1;
680
681         e1000_set_media_type(hw);
682
683         if(hw->mac_type < e1000_82543)
684                 hw->report_tx_early = 0;
685         else
686                 hw->report_tx_early = 1;
687
688         hw->wait_autoneg_complete = FALSE;
689         hw->tbi_compatibility_en = TRUE;
690         hw->adaptive_ifs = TRUE;
691
692         /* Copper options */
693
694         if(hw->media_type == e1000_media_type_copper) {
695                 hw->mdix = AUTO_ALL_MODES;
696                 hw->disable_polarity_correction = FALSE;
697                 hw->master_slave = E1000_MASTER_SLAVE;
698         }
699
700         atomic_set(&adapter->irq_sem, 1);
701         spin_lock_init(&adapter->stats_lock);
702         spin_lock_init(&adapter->tx_lock);
703
704         return 0;
705 }
706
707 /**
708  * e1000_open - Called when a network interface is made active
709  * @netdev: network interface device structure
710  *
711  * Returns 0 on success, negative value on failure
712  *
713  * The open entry point is called when a network interface is made
714  * active by the system (IFF_UP).  At this point all resources needed
715  * for transmit and receive operations are allocated, the interrupt
716  * handler is registered with the OS, the watchdog timer is started,
717  * and the stack is notified that the interface is ready.
718  **/
719
720 static int
721 e1000_open(struct net_device *netdev)
722 {
723         struct e1000_adapter *adapter = netdev->priv;
724         int err;
725
726         /* allocate transmit descriptors */
727
728         if((err = e1000_setup_tx_resources(adapter)))
729                 goto err_setup_tx;
730
731         /* allocate receive descriptors */
732
733         if((err = e1000_setup_rx_resources(adapter)))
734                 goto err_setup_rx;
735
736         if((err = e1000_up(adapter)))
737                 goto err_up;
738
739         return 0;
740
741 err_up:
742         e1000_free_rx_resources(adapter);
743 err_setup_rx:
744         e1000_free_tx_resources(adapter);
745 err_setup_tx:
746         e1000_reset(adapter);
747
748         return err;
749 }
750
751 /**
752  * e1000_close - Disables a network interface
753  * @netdev: network interface device structure
754  *
755  * Returns 0, this is not allowed to fail
756  *
757  * The close entry point is called when an interface is de-activated
758  * by the OS.  The hardware is still under the drivers control, but
759  * needs to be disabled.  A global MAC reset is issued to stop the
760  * hardware, and all transmit and receive resources are freed.
761  **/
762
763 static int
764 e1000_close(struct net_device *netdev)
765 {
766         struct e1000_adapter *adapter = netdev->priv;
767
768         e1000_down(adapter);
769
770         e1000_free_tx_resources(adapter);
771         e1000_free_rx_resources(adapter);
772
773         return 0;
774 }
775
776 /**
777  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
778  * @adapter: board private structure
779  *
780  * Return 0 on success, negative on failure
781  **/
782
783 int
784 e1000_setup_tx_resources(struct e1000_adapter *adapter)
785 {
786         struct e1000_desc_ring *txdr = &adapter->tx_ring;
787         struct pci_dev *pdev = adapter->pdev;
788         int size;
789
790         size = sizeof(struct e1000_buffer) * txdr->count;
791         txdr->buffer_info = kmalloc(size, GFP_KERNEL);
792         if(!txdr->buffer_info) {
793                 return -ENOMEM;
794         }
795         memset(txdr->buffer_info, 0, size);
796
797         /* round up to nearest 4K */
798
799         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
800         E1000_ROUNDUP(txdr->size, 4096);
801
802         txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
803         if(!txdr->desc) {
804                 kfree(txdr->buffer_info);
805                 return -ENOMEM;
806         }
807         memset(txdr->desc, 0, txdr->size);
808
809         txdr->next_to_use = 0;
810         txdr->next_to_clean = 0;
811
812         return 0;
813 }
814
815 /**
816  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
817  * @adapter: board private structure
818  *
819  * Configure the Tx unit of the MAC after a reset.
820  **/
821
822 static void
823 e1000_configure_tx(struct e1000_adapter *adapter)
824 {
825         uint64_t tdba = adapter->tx_ring.dma;
826         uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
827         uint32_t tctl, tipg;
828
829         E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
830         E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
831
832         E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
833
834         /* Setup the HW Tx Head and Tail descriptor pointers */
835
836         E1000_WRITE_REG(&adapter->hw, TDH, 0);
837         E1000_WRITE_REG(&adapter->hw, TDT, 0);
838
839         /* Set the default values for the Tx Inter Packet Gap timer */
840
841         switch (adapter->hw.mac_type) {
842         case e1000_82542_rev2_0:
843         case e1000_82542_rev2_1:
844                 tipg = DEFAULT_82542_TIPG_IPGT;
845                 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
846                 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
847                 break;
848         default:
849                 if(adapter->hw.media_type == e1000_media_type_fiber ||
850                    adapter->hw.media_type == e1000_media_type_internal_serdes)
851                         tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
852                 else
853                         tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
854                 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
855                 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
856         }
857         E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
858
859         /* Set the Tx Interrupt Delay register */
860
861         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
862         if(adapter->hw.mac_type >= e1000_82540)
863                 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
864
865         /* Program the Transmit Control Register */
866
867         tctl = E1000_READ_REG(&adapter->hw, TCTL);
868
869         tctl &= ~E1000_TCTL_CT;
870         tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
871                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
872
873         E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
874
875         e1000_config_collision_dist(&adapter->hw);
876
877         /* Setup Transmit Descriptor Settings for eop descriptor */
878         adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
879                 E1000_TXD_CMD_IFCS;
880
881         if(adapter->hw.report_tx_early == 1)
882                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
883         else
884                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
885
886         /* Cache if we're 82544 running in PCI-X because we'll
887          * need this to apply a workaround later in the send path. */
888         if(adapter->hw.mac_type == e1000_82544 &&
889            adapter->hw.bus_type == e1000_bus_type_pcix)
890                 adapter->pcix_82544 = 1;
891 }
892
893 /**
894  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
895  * @adapter: board private structure
896  *
897  * Returns 0 on success, negative on failure
898  **/
899
900 int
901 e1000_setup_rx_resources(struct e1000_adapter *adapter)
902 {
903         struct e1000_desc_ring *rxdr = &adapter->rx_ring;
904         struct pci_dev *pdev = adapter->pdev;
905         int size;
906
907         size = sizeof(struct e1000_buffer) * rxdr->count;
908         rxdr->buffer_info = kmalloc(size, GFP_KERNEL);
909         if(!rxdr->buffer_info) {
910                 return -ENOMEM;
911         }
912         memset(rxdr->buffer_info, 0, size);
913
914         /* Round up to nearest 4K */
915
916         rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
917         E1000_ROUNDUP(rxdr->size, 4096);
918
919         rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
920
921         if(!rxdr->desc) {
922                 kfree(rxdr->buffer_info);
923                 return -ENOMEM;
924         }
925         memset(rxdr->desc, 0, rxdr->size);
926
927         rxdr->next_to_clean = 0;
928         rxdr->next_to_use = 0;
929
930         return 0;
931 }
932
933 /**
934  * e1000_setup_rctl - configure the receive control register
935  * @adapter: Board private structure
936  **/
937
938 static void
939 e1000_setup_rctl(struct e1000_adapter *adapter)
940 {
941         uint32_t rctl;
942
943         rctl = E1000_READ_REG(&adapter->hw, RCTL);
944
945         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
946
947         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
948                 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
949                 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
950
951         if(adapter->hw.tbi_compatibility_on == 1)
952                 rctl |= E1000_RCTL_SBP;
953         else
954                 rctl &= ~E1000_RCTL_SBP;
955
956         rctl &= ~(E1000_RCTL_SZ_4096);
957         switch (adapter->rx_buffer_len) {
958         case E1000_RXBUFFER_2048:
959         default:
960                 rctl |= E1000_RCTL_SZ_2048;
961                 rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE);
962                 break;
963         case E1000_RXBUFFER_4096:
964                 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
965                 break;
966         case E1000_RXBUFFER_8192:
967                 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
968                 break;
969         case E1000_RXBUFFER_16384:
970                 rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
971                 break;
972         }
973
974         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
975 }
976
977 /**
978  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
979  * @adapter: board private structure
980  *
981  * Configure the Rx unit of the MAC after a reset.
982  **/
983
984 static void
985 e1000_configure_rx(struct e1000_adapter *adapter)
986 {
987         uint64_t rdba = adapter->rx_ring.dma;
988         uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
989         uint32_t rctl;
990         uint32_t rxcsum;
991
992         /* make sure receives are disabled while setting up the descriptors */
993
994         rctl = E1000_READ_REG(&adapter->hw, RCTL);
995         E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
996
997         /* set the Receive Delay Timer Register */
998
999         E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
1000
1001         if(adapter->hw.mac_type >= e1000_82540) {
1002                 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
1003                 if(adapter->itr > 1)
1004                         E1000_WRITE_REG(&adapter->hw, ITR,
1005                                 1000000000 / (adapter->itr * 256));
1006         }
1007
1008         /* Setup the Base and Length of the Rx Descriptor Ring */
1009
1010         E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1011         E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1012
1013         E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
1014
1015         /* Setup the HW Rx Head and Tail Descriptor Pointers */
1016         E1000_WRITE_REG(&adapter->hw, RDH, 0);
1017         E1000_WRITE_REG(&adapter->hw, RDT, 0);
1018
1019         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1020         if((adapter->hw.mac_type >= e1000_82543) &&
1021            (adapter->rx_csum == TRUE)) {
1022                 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
1023                 rxcsum |= E1000_RXCSUM_TUOFL;
1024                 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
1025         }
1026
1027         /* Enable Receives */
1028
1029         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1030 }
1031
1032 /**
1033  * e1000_free_tx_resources - Free Tx Resources
1034  * @adapter: board private structure
1035  *
1036  * Free all transmit software resources
1037  **/
1038
1039 void
1040 e1000_free_tx_resources(struct e1000_adapter *adapter)
1041 {
1042         struct pci_dev *pdev = adapter->pdev;
1043
1044         e1000_clean_tx_ring(adapter);
1045
1046         kfree(adapter->tx_ring.buffer_info);
1047         adapter->tx_ring.buffer_info = NULL;
1048
1049         pci_free_consistent(pdev, adapter->tx_ring.size,
1050                             adapter->tx_ring.desc, adapter->tx_ring.dma);
1051
1052         adapter->tx_ring.desc = NULL;
1053 }
1054
1055 /**
1056  * e1000_clean_tx_ring - Free Tx Buffers
1057  * @adapter: board private structure
1058  **/
1059
1060 static void
1061 e1000_clean_tx_ring(struct e1000_adapter *adapter)
1062 {
1063         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1064         struct e1000_buffer *buffer_info;
1065         struct pci_dev *pdev = adapter->pdev;
1066         unsigned long size;
1067         unsigned int i;
1068
1069         /* Free all the Tx ring sk_buffs */
1070
1071         for(i = 0; i < tx_ring->count; i++) {
1072                 buffer_info = &tx_ring->buffer_info[i];
1073                 if(buffer_info->skb) {
1074
1075                         pci_unmap_page(pdev,
1076                                        buffer_info->dma,
1077                                        buffer_info->length,
1078                                        PCI_DMA_TODEVICE);
1079
1080                         dev_kfree_skb(buffer_info->skb);
1081
1082                         buffer_info->skb = NULL;
1083                 }
1084         }
1085
1086         size = sizeof(struct e1000_buffer) * tx_ring->count;
1087         memset(tx_ring->buffer_info, 0, size);
1088
1089         /* Zero out the descriptor ring */
1090
1091         memset(tx_ring->desc, 0, tx_ring->size);
1092
1093         tx_ring->next_to_use = 0;
1094         tx_ring->next_to_clean = 0;
1095
1096         E1000_WRITE_REG(&adapter->hw, TDH, 0);
1097         E1000_WRITE_REG(&adapter->hw, TDT, 0);
1098 }
1099
1100 /**
1101  * e1000_free_rx_resources - Free Rx Resources
1102  * @adapter: board private structure
1103  *
1104  * Free all receive software resources
1105  **/
1106
1107 void
1108 e1000_free_rx_resources(struct e1000_adapter *adapter)
1109 {
1110         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1111         struct pci_dev *pdev = adapter->pdev;
1112
1113         e1000_clean_rx_ring(adapter);
1114
1115         kfree(rx_ring->buffer_info);
1116         rx_ring->buffer_info = NULL;
1117
1118         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1119
1120         rx_ring->desc = NULL;
1121 }
1122
1123 /**
1124  * e1000_clean_rx_ring - Free Rx Buffers
1125  * @adapter: board private structure
1126  **/
1127
1128 static void
1129 e1000_clean_rx_ring(struct e1000_adapter *adapter)
1130 {
1131         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1132         struct e1000_buffer *buffer_info;
1133         struct pci_dev *pdev = adapter->pdev;
1134         unsigned long size;
1135         unsigned int i;
1136
1137         /* Free all the Rx ring sk_buffs */
1138
1139         for(i = 0; i < rx_ring->count; i++) {
1140                 buffer_info = &rx_ring->buffer_info[i];
1141                 if(buffer_info->skb) {
1142
1143                         pci_unmap_single(pdev,
1144                                          buffer_info->dma,
1145                                          buffer_info->length,
1146                                          PCI_DMA_FROMDEVICE);
1147
1148                         dev_kfree_skb(buffer_info->skb);
1149
1150                         buffer_info->skb = NULL;
1151                 }
1152         }
1153
1154         size = sizeof(struct e1000_buffer) * rx_ring->count;
1155         memset(rx_ring->buffer_info, 0, size);
1156
1157         /* Zero out the descriptor ring */
1158
1159         memset(rx_ring->desc, 0, rx_ring->size);
1160
1161         rx_ring->next_to_clean = 0;
1162         rx_ring->next_to_use = 0;
1163
1164         E1000_WRITE_REG(&adapter->hw, RDH, 0);
1165         E1000_WRITE_REG(&adapter->hw, RDT, 0);
1166 }
1167
1168 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
1169  * and memory write and invalidate disabled for certain operations
1170  */
1171 static void
1172 e1000_enter_82542_rst(struct e1000_adapter *adapter)
1173 {
1174         struct net_device *netdev = adapter->netdev;
1175         uint32_t rctl;
1176
1177         e1000_pci_clear_mwi(&adapter->hw);
1178
1179         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1180         rctl |= E1000_RCTL_RST;
1181         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1182         E1000_WRITE_FLUSH(&adapter->hw);
1183         mdelay(5);
1184
1185         if(netif_running(netdev))
1186                 e1000_clean_rx_ring(adapter);
1187 }
1188
1189 static void
1190 e1000_leave_82542_rst(struct e1000_adapter *adapter)
1191 {
1192         struct net_device *netdev = adapter->netdev;
1193         uint32_t rctl;
1194
1195         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1196         rctl &= ~E1000_RCTL_RST;
1197         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1198         E1000_WRITE_FLUSH(&adapter->hw);
1199         mdelay(5);
1200
1201         if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
1202                 e1000_pci_set_mwi(&adapter->hw);
1203
1204         if(netif_running(netdev)) {
1205                 e1000_configure_rx(adapter);
1206                 e1000_alloc_rx_buffers(adapter);
1207         }
1208 }
1209
1210 /**
1211  * e1000_set_mac - Change the Ethernet Address of the NIC
1212  * @netdev: network interface device structure
1213  * @p: pointer to an address structure
1214  *
1215  * Returns 0 on success, negative on failure
1216  **/
1217
1218 static int
1219 e1000_set_mac(struct net_device *netdev, void *p)
1220 {
1221         struct e1000_adapter *adapter = netdev->priv;
1222         struct sockaddr *addr = p;
1223
1224         if(!is_valid_ether_addr(addr->sa_data))
1225                 return -EADDRNOTAVAIL;
1226
1227         /* 82542 2.0 needs to be in reset to write receive address registers */
1228
1229         if(adapter->hw.mac_type == e1000_82542_rev2_0)
1230                 e1000_enter_82542_rst(adapter);
1231
1232         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1233         memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
1234
1235         e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1236
1237         if(adapter->hw.mac_type == e1000_82542_rev2_0)
1238                 e1000_leave_82542_rst(adapter);
1239
1240         return 0;
1241 }
1242
1243 /**
1244  * e1000_set_multi - Multicast and Promiscuous mode set
1245  * @netdev: network interface device structure
1246  *
1247  * The set_multi entry point is called whenever the multicast address
1248  * list or the network interface flags are updated.  This routine is
1249  * responsible for configuring the hardware for proper multicast,
1250  * promiscuous mode, and all-multi behavior.
1251  **/
1252
1253 static void
1254 e1000_set_multi(struct net_device *netdev)
1255 {
1256         struct e1000_adapter *adapter = netdev->priv;
1257         struct e1000_hw *hw = &adapter->hw;
1258         struct dev_mc_list *mc_ptr;
1259         uint32_t rctl;
1260         uint32_t hash_value;
1261         int i;
1262
1263         /* Check for Promiscuous and All Multicast modes */
1264
1265         rctl = E1000_READ_REG(hw, RCTL);
1266
1267         if(netdev->flags & IFF_PROMISC) {
1268                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1269         } else if(netdev->flags & IFF_ALLMULTI) {
1270                 rctl |= E1000_RCTL_MPE;
1271                 rctl &= ~E1000_RCTL_UPE;
1272         } else {
1273                 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
1274         }
1275
1276         E1000_WRITE_REG(hw, RCTL, rctl);
1277
1278         /* 82542 2.0 needs to be in reset to write receive address registers */
1279
1280         if(hw->mac_type == e1000_82542_rev2_0)
1281                 e1000_enter_82542_rst(adapter);
1282
1283         /* load the first 14 multicast address into the exact filters 1-14
1284          * RAR 0 is used for the station MAC adddress
1285          * if there are not 14 addresses, go ahead and clear the filters
1286          */
1287         mc_ptr = netdev->mc_list;
1288
1289         for(i = 1; i < E1000_RAR_ENTRIES; i++) {
1290                 if(mc_ptr) {
1291                         e1000_rar_set(hw, mc_ptr->dmi_addr, i);
1292                         mc_ptr = mc_ptr->next;
1293                 } else {
1294                         E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
1295                         E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
1296                 }
1297         }
1298
1299         /* clear the old settings from the multicast hash table */
1300
1301         for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
1302                 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
1303
1304         /* load any remaining addresses into the hash table */
1305
1306         for(; mc_ptr; mc_ptr = mc_ptr->next) {
1307                 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
1308                 e1000_mta_set(hw, hash_value);
1309         }
1310
1311         if(hw->mac_type == e1000_82542_rev2_0)
1312                 e1000_leave_82542_rst(adapter);
1313 }
1314
1315 /* need to wait a few seconds after link up to get diagnostic information from the phy */
1316
1317 static void
1318 e1000_update_phy_info(unsigned long data)
1319 {
1320         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1321         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
1322 }
1323
1324 /**
1325  * e1000_82547_tx_fifo_stall - Timer Call-back
1326  * @data: pointer to adapter cast into an unsigned long
1327  **/
1328
1329 static void
1330 e1000_82547_tx_fifo_stall(unsigned long data)
1331 {
1332         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1333         struct net_device *netdev = adapter->netdev;
1334         uint32_t tctl;
1335
1336         if(atomic_read(&adapter->tx_fifo_stall)) {
1337                 if((E1000_READ_REG(&adapter->hw, TDT) ==
1338                     E1000_READ_REG(&adapter->hw, TDH)) &&
1339                    (E1000_READ_REG(&adapter->hw, TDFT) ==
1340                     E1000_READ_REG(&adapter->hw, TDFH)) &&
1341                    (E1000_READ_REG(&adapter->hw, TDFTS) ==
1342                     E1000_READ_REG(&adapter->hw, TDFHS))) {
1343                         tctl = E1000_READ_REG(&adapter->hw, TCTL);
1344                         E1000_WRITE_REG(&adapter->hw, TCTL,
1345                                         tctl & ~E1000_TCTL_EN);
1346                         E1000_WRITE_REG(&adapter->hw, TDFT,
1347                                         adapter->tx_head_addr);
1348                         E1000_WRITE_REG(&adapter->hw, TDFH,
1349                                         adapter->tx_head_addr);
1350                         E1000_WRITE_REG(&adapter->hw, TDFTS,
1351                                         adapter->tx_head_addr);
1352                         E1000_WRITE_REG(&adapter->hw, TDFHS,
1353                                         adapter->tx_head_addr);
1354                         E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1355                         E1000_WRITE_FLUSH(&adapter->hw);
1356
1357                         adapter->tx_fifo_head = 0;
1358                         atomic_set(&adapter->tx_fifo_stall, 0);
1359                         netif_wake_queue(netdev);
1360                 } else {
1361                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
1362                 }
1363         }
1364 }
1365
1366 /**
1367  * e1000_watchdog - Timer Call-back
1368  * @data: pointer to netdev cast into an unsigned long
1369  **/
1370
1371 static void
1372 e1000_watchdog(unsigned long data)
1373 {
1374         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1375         struct net_device *netdev = adapter->netdev;
1376         struct e1000_desc_ring *txdr = &adapter->tx_ring;
1377         unsigned int i;
1378         uint32_t link;
1379
1380         e1000_check_for_link(&adapter->hw);
1381
1382         if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
1383            !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
1384                 link = !adapter->hw.serdes_link_down;
1385         else
1386                 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
1387
1388         if(link) {
1389                 if(!netif_carrier_ok(netdev)) {
1390                         e1000_get_speed_and_duplex(&adapter->hw,
1391                                                    &adapter->link_speed,
1392                                                    &adapter->link_duplex);
1393
1394                         DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
1395                                adapter->link_speed,
1396                                adapter->link_duplex == FULL_DUPLEX ?
1397                                "Full Duplex" : "Half Duplex");
1398
1399                         netif_carrier_on(netdev);
1400                         netif_wake_queue(netdev);
1401                         mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1402                         adapter->smartspeed = 0;
1403                 }
1404         } else {
1405                 if(netif_carrier_ok(netdev)) {
1406                         adapter->link_speed = 0;
1407                         adapter->link_duplex = 0;
1408                         DPRINTK(LINK, INFO, "NIC Link is Down\n");
1409                         netif_carrier_off(netdev);
1410                         netif_stop_queue(netdev);
1411                         mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1412                 }
1413
1414                 e1000_smartspeed(adapter);
1415         }
1416
1417         e1000_update_stats(adapter);
1418
1419         adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
1420         adapter->tpt_old = adapter->stats.tpt;
1421         adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
1422         adapter->colc_old = adapter->stats.colc;
1423         
1424         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
1425         adapter->gorcl_old = adapter->stats.gorcl;
1426         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
1427         adapter->gotcl_old = adapter->stats.gotcl;
1428
1429         e1000_update_adaptive(&adapter->hw);
1430
1431         if(!netif_carrier_ok(netdev)) {
1432                 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1433                         /* We've lost link, so the controller stops DMA,
1434                          * but we've got queued Tx work that's never going
1435                          * to get done, so reset controller to flush Tx.
1436                          * (Do the reset outside of interrupt context). */
1437                         schedule_work(&adapter->tx_timeout_task);
1438                 }
1439         }
1440
1441         /* Dynamic mode for Interrupt Throttle Rate (ITR) */
1442         if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
1443                 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
1444                  * asymmetrical Tx or Rx gets ITR=8000; everyone
1445                  * else is between 2000-8000. */
1446                 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
1447                 uint32_t dif = (adapter->gotcl > adapter->gorcl ? 
1448                         adapter->gotcl - adapter->gorcl :
1449                         adapter->gorcl - adapter->gotcl) / 10000;
1450                 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
1451                 E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
1452         }
1453
1454         /* Cause software interrupt to ensure rx ring is cleaned */
1455         E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
1456
1457         /* Early detection of hung controller */
1458         i = txdr->next_to_clean;
1459         if(txdr->buffer_info[i].dma &&
1460            time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
1461            !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF))
1462                 netif_stop_queue(netdev);
1463
1464         /* Reset the timer */
1465         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1466 }
1467
1468 #define E1000_TX_FLAGS_CSUM             0x00000001
1469 #define E1000_TX_FLAGS_VLAN             0x00000002
1470 #define E1000_TX_FLAGS_TSO              0x00000004
1471 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
1472 #define E1000_TX_FLAGS_VLAN_SHIFT       16
1473
1474 static inline boolean_t
1475 e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1476 {
1477 #ifdef NETIF_F_TSO
1478         struct e1000_context_desc *context_desc;
1479         unsigned int i;
1480         uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1481         uint16_t ipcse, tucse, mss;
1482
1483         if(skb_shinfo(skb)->tso_size) {
1484                 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1485                 mss = skb_shinfo(skb)->tso_size;
1486                 skb->nh.iph->tot_len = 0;
1487                 skb->nh.iph->check = 0;
1488                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1489                                                       skb->nh.iph->daddr,
1490                                                       0,
1491                                                       IPPROTO_TCP,
1492                                                       0);
1493                 ipcss = skb->nh.raw - skb->data;
1494                 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1495                 ipcse = skb->h.raw - skb->data - 1;
1496                 tucss = skb->h.raw - skb->data;
1497                 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1498                 tucse = 0;
1499
1500                 i = adapter->tx_ring.next_to_use;
1501                 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1502
1503                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
1504                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
1505                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
1506                 context_desc->upper_setup.tcp_fields.tucss = tucss;
1507                 context_desc->upper_setup.tcp_fields.tucso = tucso;
1508                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
1509                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
1510                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1511                 context_desc->cmd_and_length = cpu_to_le32(
1512                         E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1513                         E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
1514                         (skb->len - (hdr_len)));
1515
1516                 if(++i == adapter->tx_ring.count) i = 0;
1517                 adapter->tx_ring.next_to_use = i;
1518
1519                 return TRUE;
1520         }
1521 #endif
1522
1523         return FALSE;
1524 }
1525
1526 static inline boolean_t
1527 e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1528 {
1529         struct e1000_context_desc *context_desc;
1530         unsigned int i;
1531         uint8_t css, cso;
1532
1533         if(skb->ip_summed == CHECKSUM_HW) {
1534                 css = skb->h.raw - skb->data;
1535                 cso = (skb->h.raw + skb->csum) - skb->data;
1536
1537                 i = adapter->tx_ring.next_to_use;
1538                 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1539
1540                 context_desc->upper_setup.tcp_fields.tucss = css;
1541                 context_desc->upper_setup.tcp_fields.tucso = cso;
1542                 context_desc->upper_setup.tcp_fields.tucse = 0;
1543                 context_desc->tcp_seg_setup.data = 0;
1544                 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1545
1546                 if(++i == adapter->tx_ring.count) i = 0;
1547                 adapter->tx_ring.next_to_use = i;
1548
1549                 return TRUE;
1550         }
1551
1552         return FALSE;
1553 }
1554
1555 #define E1000_MAX_TXD_PWR       12
1556 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
1557
1558 static inline int
1559 e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
1560         unsigned int first, unsigned int max_per_txd,
1561         unsigned int nr_frags, unsigned int mss)
1562 {
1563         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1564         struct e1000_buffer *buffer_info;
1565         unsigned int len = skb->len;
1566         unsigned int offset = 0, size, count = 0, i;
1567         unsigned int f;
1568         len -= skb->data_len;
1569
1570
1571         i = tx_ring->next_to_use;
1572
1573         while(len) {
1574                 buffer_info = &tx_ring->buffer_info[i];
1575                 size = min(len, max_per_txd);
1576 #ifdef NETIF_F_TSO
1577                 /* Workaround for premature desc write-backs
1578                  * in TSO mode.  Append 4-byte sentinel desc */
1579                 if(mss && !nr_frags && size == len && size > 8)
1580                         size -= 4;
1581 #endif
1582                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
1583                  * terminating buffers within evenly-aligned dwords. */
1584                 if(adapter->pcix_82544 &&
1585                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
1586                    size > 4)
1587                         size -= 4;
1588
1589                 buffer_info->length = size;
1590                 buffer_info->dma =
1591                         pci_map_single(adapter->pdev,
1592                                 skb->data + offset,
1593                                 size,
1594                                 PCI_DMA_TODEVICE);
1595                 buffer_info->time_stamp = jiffies;
1596
1597                 len -= size;
1598                 offset += size;
1599                 count++;
1600                 if(++i == tx_ring->count) i = 0;
1601         }
1602
1603         for(f = 0; f < nr_frags; f++) {
1604                 struct skb_frag_struct *frag;
1605
1606                 frag = &skb_shinfo(skb)->frags[f];
1607                 len = frag->size;
1608                 offset = frag->page_offset;
1609
1610                 while(len) {
1611                         buffer_info = &tx_ring->buffer_info[i];
1612                         size = min(len, max_per_txd);
1613 #ifdef NETIF_F_TSO
1614                         /* Workaround for premature desc write-backs
1615                          * in TSO mode.  Append 4-byte sentinel desc */
1616                         if(mss && f == (nr_frags-1) && size == len && size > 8)
1617                                 size -= 4;
1618 #endif
1619                         /* Workaround for potential 82544 hang in PCI-X.
1620                          * Avoid terminating buffers within evenly-aligned
1621                          * dwords. */
1622                         if(adapter->pcix_82544 &&
1623                            !((unsigned long)(frag->page+offset+size-1) & 4) &&
1624                            size > 4)
1625                                 size -= 4;
1626
1627                         buffer_info->length = size;
1628                         buffer_info->dma =
1629                                 pci_map_page(adapter->pdev,
1630                                         frag->page,
1631                                         offset,
1632                                         size,
1633                                         PCI_DMA_TODEVICE);
1634                         buffer_info->time_stamp = jiffies;
1635
1636                         len -= size;
1637                         offset += size;
1638                         count++;
1639                         if(++i == tx_ring->count) i = 0;
1640                 }
1641         }
1642         i = (i == 0) ? tx_ring->count - 1 : i - 1;
1643         tx_ring->buffer_info[i].skb = skb;
1644         tx_ring->buffer_info[first].next_to_watch = i;
1645         
1646         return count;
1647 }
1648
1649 static inline void
1650 e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
1651 {
1652         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1653         struct e1000_tx_desc *tx_desc = NULL;
1654         struct e1000_buffer *buffer_info;
1655         uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
1656         unsigned int i;
1657
1658         if(tx_flags & E1000_TX_FLAGS_TSO) {
1659                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
1660                              E1000_TXD_CMD_TSE;
1661                 txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
1662         }
1663
1664         if(tx_flags & E1000_TX_FLAGS_CSUM) {
1665                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
1666                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
1667         }
1668
1669         if(tx_flags & E1000_TX_FLAGS_VLAN) {
1670                 txd_lower |= E1000_TXD_CMD_VLE;
1671                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
1672         }
1673
1674         i = tx_ring->next_to_use;
1675
1676         while(count--) {
1677                 buffer_info = &tx_ring->buffer_info[i];
1678                 tx_desc = E1000_TX_DESC(*tx_ring, i);
1679                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1680                 tx_desc->lower.data =
1681                         cpu_to_le32(txd_lower | buffer_info->length);
1682                 tx_desc->upper.data = cpu_to_le32(txd_upper);
1683                 if(++i == tx_ring->count) i = 0;
1684         }
1685
1686         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
1687
1688         /* Force memory writes to complete before letting h/w
1689          * know there are new descriptors to fetch.  (Only
1690          * applicable for weak-ordered memory model archs,
1691          * such as IA-64). */
1692         wmb();
1693
1694         tx_ring->next_to_use = i;
1695         E1000_WRITE_REG(&adapter->hw, TDT, i);
1696 }
1697
1698 /**
1699  * 82547 workaround to avoid controller hang in half-duplex environment.
1700  * The workaround is to avoid queuing a large packet that would span
1701  * the internal Tx FIFO ring boundary by notifying the stack to resend
1702  * the packet at a later time.  This gives the Tx FIFO an opportunity to
1703  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
1704  * to the beginning of the Tx FIFO.
1705  **/
1706
1707 #define E1000_FIFO_HDR                  0x10
1708 #define E1000_82547_PAD_LEN             0x3E0
1709
1710 static inline int
1711 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
1712 {
1713         uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1714         uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
1715
1716         E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
1717
1718         if(adapter->link_duplex != HALF_DUPLEX)
1719                 goto no_fifo_stall_required;
1720
1721         if(atomic_read(&adapter->tx_fifo_stall))
1722                 return 1;
1723
1724         if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
1725                 atomic_set(&adapter->tx_fifo_stall, 1);
1726                 return 1;
1727         }
1728
1729 no_fifo_stall_required:
1730         adapter->tx_fifo_head += skb_fifo_len;
1731         if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
1732                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1733         return 0;
1734 }
1735
1736 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 
1737 static int
1738 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1739 {
1740         struct e1000_adapter *adapter = netdev->priv;
1741         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
1742         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
1743         unsigned int tx_flags = 0;
1744         unsigned long flags;
1745         unsigned int len = skb->len;
1746         int count = 0;
1747         unsigned int mss = 0;
1748         unsigned int nr_frags = 0;
1749         unsigned int f;
1750         nr_frags = skb_shinfo(skb)->nr_frags;
1751         len -= skb->data_len;
1752         if(skb->len <= 0) {
1753                 dev_kfree_skb_any(skb);
1754                 return 0;
1755         }
1756
1757 #ifdef NETIF_F_TSO
1758         mss = skb_shinfo(skb)->tso_size;
1759         /* The controller does a simple calculation to 
1760          * make sure there is enough room in the FIFO before
1761          * initiating the DMA for each buffer.  The calc is:
1762          * 4 = ceil(buffer len/mss).  To make sure we don't
1763          * overrun the FIFO, adjust the max buffer len if mss
1764          * drops. */
1765         if(mss) {
1766                 max_per_txd = min(mss << 2, max_per_txd);
1767                 max_txd_pwr = fls(max_per_txd) - 1;
1768         }
1769         if((mss) || (skb->ip_summed == CHECKSUM_HW))
1770                 count++;
1771         count++;        /*for sentinel desc*/
1772 #else
1773         if(skb->ip_summed == CHECKSUM_HW)
1774                 count++;
1775 #endif
1776
1777         count += TXD_USE_COUNT(len, max_txd_pwr);
1778         if(adapter->pcix_82544)
1779                 count++;
1780
1781         nr_frags = skb_shinfo(skb)->nr_frags;
1782         for(f = 0; f < nr_frags; f++)
1783                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
1784                                        max_txd_pwr);
1785         if(adapter->pcix_82544)
1786                 count += nr_frags;
1787         
1788         spin_lock_irqsave(&adapter->tx_lock, flags);
1789         /* need: count +  2 desc gap to keep tail from touching 
1790          * head, otherwise try next time */
1791         if(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2 ) {
1792                 netif_stop_queue(netdev);
1793                 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1794                 return 1;
1795         }
1796         spin_unlock_irqrestore(&adapter->tx_lock, flags);
1797
1798         if(adapter->hw.mac_type == e1000_82547) {
1799                 if(e1000_82547_fifo_workaround(adapter, skb)) {
1800                         netif_stop_queue(netdev);
1801                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
1802                         return 1;
1803                 }
1804         }
1805
1806         if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1807                 tx_flags |= E1000_TX_FLAGS_VLAN;
1808                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
1809         }
1810
1811         first = adapter->tx_ring.next_to_use;
1812         
1813         if(e1000_tso(adapter, skb))
1814                 tx_flags |= E1000_TX_FLAGS_TSO;
1815         else if(e1000_tx_csum(adapter, skb))
1816                 tx_flags |= E1000_TX_FLAGS_CSUM;
1817
1818         e1000_tx_queue(adapter, 
1819                 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), 
1820                 tx_flags);
1821
1822         netdev->trans_start = jiffies;
1823
1824         return 0;
1825 }
1826
1827 /**
1828  * e1000_tx_timeout - Respond to a Tx Hang
1829  * @netdev: network interface device structure
1830  **/
1831
1832 static void
1833 e1000_tx_timeout(struct net_device *netdev)
1834 {
1835         struct e1000_adapter *adapter = netdev->priv;
1836
1837         /* Do the reset outside of interrupt context */
1838         schedule_work(&adapter->tx_timeout_task);
1839 }
1840
1841 static void
1842 e1000_tx_timeout_task(struct net_device *netdev)
1843 {
1844         struct e1000_adapter *adapter = netdev->priv;
1845
1846         netif_device_detach(netdev);
1847         e1000_down(adapter);
1848         e1000_up(adapter);
1849         netif_device_attach(netdev);
1850 }
1851
1852 /**
1853  * e1000_get_stats - Get System Network Statistics
1854  * @netdev: network interface device structure
1855  *
1856  * Returns the address of the device statistics structure.
1857  * The statistics are actually updated from the timer callback.
1858  **/
1859
1860 static struct net_device_stats *
1861 e1000_get_stats(struct net_device *netdev)
1862 {
1863         struct e1000_adapter *adapter = netdev->priv;
1864
1865         e1000_update_stats(adapter);
1866         return &adapter->net_stats;
1867 }
1868
1869 /**
1870  * e1000_change_mtu - Change the Maximum Transfer Unit
1871  * @netdev: network interface device structure
1872  * @new_mtu: new value for maximum frame size
1873  *
1874  * Returns 0 on success, negative on failure
1875  **/
1876
1877 static int
1878 e1000_change_mtu(struct net_device *netdev, int new_mtu)
1879 {
1880         struct e1000_adapter *adapter = netdev->priv;
1881         int old_mtu = adapter->rx_buffer_len;
1882         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1883
1884         if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
1885            (max_frame > MAX_JUMBO_FRAME_SIZE)) {
1886                 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
1887                 return -EINVAL;
1888         }
1889
1890         if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) {
1891                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
1892
1893         } else if(adapter->hw.mac_type < e1000_82543) {
1894                 DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n");
1895                 return -EINVAL;
1896
1897         } else if(max_frame <= E1000_RXBUFFER_4096) {
1898                 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
1899
1900         } else if(max_frame <= E1000_RXBUFFER_8192) {
1901                 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
1902
1903         } else {
1904                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
1905         }
1906
1907         if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
1908
1909                 e1000_down(adapter);
1910                 e1000_up(adapter);
1911         }
1912
1913         netdev->mtu = new_mtu;
1914         adapter->hw.max_frame_size = max_frame;
1915
1916         return 0;
1917 }
1918
1919 /**
1920  * e1000_update_stats - Update the board statistics counters
1921  * @adapter: board private structure
1922  **/
1923
1924 void
1925 e1000_update_stats(struct e1000_adapter *adapter)
1926 {
1927         struct e1000_hw *hw = &adapter->hw;
1928         unsigned long flags;
1929         uint16_t phy_tmp;
1930
1931 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
1932
1933         spin_lock_irqsave(&adapter->stats_lock, flags);
1934
1935         /* these counters are modified from e1000_adjust_tbi_stats,
1936          * called from the interrupt context, so they must only
1937          * be written while holding adapter->stats_lock
1938          */
1939
1940         adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
1941         adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
1942         adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
1943         adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
1944         adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
1945         adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
1946         adapter->stats.roc += E1000_READ_REG(hw, ROC);
1947         adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
1948         adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
1949         adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
1950         adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
1951         adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
1952         adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
1953
1954         /* the rest of the counters are only modified here */
1955
1956         adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
1957         adapter->stats.mpc += E1000_READ_REG(hw, MPC);
1958         adapter->stats.scc += E1000_READ_REG(hw, SCC);
1959         adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
1960         adapter->stats.mcc += E1000_READ_REG(hw, MCC);
1961         adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
1962         adapter->stats.dc += E1000_READ_REG(hw, DC);
1963         adapter->stats.sec += E1000_READ_REG(hw, SEC);
1964         adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
1965         adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
1966         adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
1967         adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
1968         adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
1969         adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
1970         adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
1971         adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
1972         adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
1973         adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
1974         adapter->stats.ruc += E1000_READ_REG(hw, RUC);
1975         adapter->stats.rfc += E1000_READ_REG(hw, RFC);
1976         adapter->stats.rjc += E1000_READ_REG(hw, RJC);
1977         adapter->stats.torl += E1000_READ_REG(hw, TORL);
1978         adapter->stats.torh += E1000_READ_REG(hw, TORH);
1979         adapter->stats.totl += E1000_READ_REG(hw, TOTL);
1980         adapter->stats.toth += E1000_READ_REG(hw, TOTH);
1981         adapter->stats.tpr += E1000_READ_REG(hw, TPR);
1982         adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
1983         adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
1984         adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
1985         adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
1986         adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
1987         adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
1988         adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
1989         adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
1990
1991         /* used for adaptive IFS */
1992
1993         hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
1994         adapter->stats.tpt += hw->tx_packet_delta;
1995         hw->collision_delta = E1000_READ_REG(hw, COLC);
1996         adapter->stats.colc += hw->collision_delta;
1997
1998         if(hw->mac_type >= e1000_82543) {
1999                 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
2000                 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
2001                 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
2002                 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
2003                 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
2004                 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
2005         }
2006
2007         /* Fill out the OS statistics structure */
2008
2009         adapter->net_stats.rx_packets = adapter->stats.gprc;
2010         adapter->net_stats.tx_packets = adapter->stats.gptc;
2011         adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2012         adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2013         adapter->net_stats.multicast = adapter->stats.mprc;
2014         adapter->net_stats.collisions = adapter->stats.colc;
2015
2016         /* Rx Errors */
2017
2018         adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2019                 adapter->stats.crcerrs + adapter->stats.algnerrc +
2020                 adapter->stats.rlec + adapter->stats.rnbc +
2021                 adapter->stats.mpc + adapter->stats.cexterr;
2022         adapter->net_stats.rx_dropped = adapter->stats.rnbc;
2023         adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2024         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2025         adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2026         adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
2027         adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2028
2029         /* Tx Errors */
2030
2031         adapter->net_stats.tx_errors = adapter->stats.ecol +
2032                                        adapter->stats.latecol;
2033         adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2034         adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2035         adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2036
2037         /* Tx Dropped needs to be maintained elsewhere */
2038
2039         /* Phy Stats */
2040
2041         if(hw->media_type == e1000_media_type_copper) {
2042                 if((adapter->link_speed == SPEED_1000) &&
2043                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
2044                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2045                         adapter->phy_stats.idle_errors += phy_tmp;
2046                 }
2047
2048                 if((hw->mac_type <= e1000_82546) &&
2049                    (hw->phy_type == e1000_phy_m88) &&
2050                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
2051                         adapter->phy_stats.receive_errors += phy_tmp;
2052         }
2053
2054         spin_unlock_irqrestore(&adapter->stats_lock, flags);
2055 }
2056
2057 /**
2058  * e1000_irq_disable - Mask off interrupt generation on the NIC
2059  * @adapter: board private structure
2060  **/
2061
2062 static inline void
2063 e1000_irq_disable(struct e1000_adapter *adapter)
2064 {
2065         atomic_inc(&adapter->irq_sem);
2066         E1000_WRITE_REG(&adapter->hw, IMC, ~0);
2067         E1000_WRITE_FLUSH(&adapter->hw);
2068         synchronize_irq(adapter->pdev->irq);
2069 }
2070
2071 /**
2072  * e1000_irq_enable - Enable default interrupt generation settings
2073  * @adapter: board private structure
2074  **/
2075
2076 static inline void
2077 e1000_irq_enable(struct e1000_adapter *adapter)
2078 {
2079         if(atomic_dec_and_test(&adapter->irq_sem)) {
2080                 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
2081                 E1000_WRITE_FLUSH(&adapter->hw);
2082         }
2083 }
2084
2085 /**
2086  * e1000_intr - Interrupt Handler
2087  * @irq: interrupt number
2088  * @data: pointer to a network interface device structure
2089  * @pt_regs: CPU registers structure
2090  **/
2091
2092 static irqreturn_t
2093 e1000_intr(int irq, void *data, struct pt_regs *regs)
2094 {
2095         struct net_device *netdev = data;
2096         struct e1000_adapter *adapter = netdev->priv;
2097         struct e1000_hw *hw = &adapter->hw;
2098         uint32_t icr = E1000_READ_REG(&adapter->hw, ICR);
2099 #ifndef CONFIG_E1000_NAPI
2100         unsigned int i;
2101 #endif
2102
2103         if(!icr)
2104                 return IRQ_NONE;  /* Not our interrupt */
2105
2106         if(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
2107                 hw->get_link_status = 1;
2108                 mod_timer(&adapter->watchdog_timer, jiffies);
2109         }
2110
2111 #ifdef CONFIG_E1000_NAPI
2112         if(netif_rx_schedule_prep(netdev)) {
2113
2114                 /* Disable interrupts and register for poll. The flush 
2115                   of the posted write is intentionally left out.
2116                 */
2117
2118                 atomic_inc(&adapter->irq_sem);
2119                 E1000_WRITE_REG(hw, IMC, ~0);
2120                 __netif_rx_schedule(netdev);
2121         }
2122 #else
2123         for(i = 0; i < E1000_MAX_INTR; i++)
2124                 if(!e1000_clean_rx_irq(adapter) &
2125                    !e1000_clean_tx_irq(adapter))
2126                         break;
2127 #endif
2128
2129         return IRQ_HANDLED;
2130 }
2131
2132 #ifdef CONFIG_E1000_NAPI
2133 /**
2134  * e1000_clean - NAPI Rx polling callback
2135  * @adapter: board private structure
2136  **/
2137
2138 static int
2139 e1000_clean(struct net_device *netdev, int *budget)
2140 {
2141         struct e1000_adapter *adapter = netdev->priv;
2142         int work_to_do = min(*budget, netdev->quota);
2143         int work_done = 0;
2144         
2145         e1000_clean_tx_irq(adapter);
2146         e1000_clean_rx_irq(adapter, &work_done, work_to_do);
2147
2148         *budget -= work_done;
2149         netdev->quota -= work_done;
2150         
2151         if(work_done < work_to_do || !netif_running(netdev)) {
2152                 netif_rx_complete(netdev);
2153                 e1000_irq_enable(adapter);
2154                 return 0;
2155         }
2156
2157         return (work_done >= work_to_do);
2158 }
2159 #endif
2160
2161 /**
2162  * e1000_clean_tx_irq - Reclaim resources after transmit completes
2163  * @adapter: board private structure
2164  **/
2165
2166 static boolean_t
2167 e1000_clean_tx_irq(struct e1000_adapter *adapter)
2168 {
2169         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2170         struct net_device *netdev = adapter->netdev;
2171         struct pci_dev *pdev = adapter->pdev;
2172         struct e1000_tx_desc *tx_desc, *eop_desc;
2173         struct e1000_buffer *buffer_info;
2174         unsigned int i, eop;
2175         boolean_t cleaned = FALSE;
2176
2177
2178         i = tx_ring->next_to_clean;
2179         eop = tx_ring->buffer_info[i].next_to_watch;
2180         eop_desc = E1000_TX_DESC(*tx_ring, eop);
2181
2182         while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2183
2184                 for(cleaned = FALSE; !cleaned; ) {
2185                         tx_desc = E1000_TX_DESC(*tx_ring, i);
2186                         buffer_info = &tx_ring->buffer_info[i];
2187
2188                         if(buffer_info->dma) {
2189
2190                                 pci_unmap_page(pdev,
2191                                                buffer_info->dma,
2192                                                buffer_info->length,
2193                                                PCI_DMA_TODEVICE);
2194
2195                                 buffer_info->dma = 0;
2196                         }
2197
2198                         if(buffer_info->skb) {
2199
2200                                 dev_kfree_skb_any(buffer_info->skb);
2201
2202                                 buffer_info->skb = NULL;
2203                         }
2204
2205                         tx_desc->buffer_addr = 0;
2206                         tx_desc->lower.data = 0;
2207                         tx_desc->upper.data = 0;
2208
2209                         cleaned = (i == eop);
2210                         if(++i == tx_ring->count) i = 0;
2211                 }
2212                 
2213                 eop = tx_ring->buffer_info[i].next_to_watch;
2214                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2215         }
2216
2217         tx_ring->next_to_clean = i;
2218
2219         spin_lock(&adapter->tx_lock);
2220
2221         if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
2222                 netif_wake_queue(netdev);
2223
2224         spin_unlock(&adapter->tx_lock);
2225
2226         return cleaned;
2227 }
2228
2229 /**
2230  * e1000_clean_rx_irq - Send received data up the network stack,
2231  * @adapter: board private structure
2232  **/
2233
2234 static boolean_t
2235 #ifdef CONFIG_E1000_NAPI
2236 e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
2237                    int work_to_do)
2238 #else
2239 e1000_clean_rx_irq(struct e1000_adapter *adapter)
2240 #endif
2241 {
2242         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2243         struct net_device *netdev = adapter->netdev;
2244         struct pci_dev *pdev = adapter->pdev;
2245         struct e1000_rx_desc *rx_desc;
2246         struct e1000_buffer *buffer_info;
2247         struct sk_buff *skb;
2248         unsigned long flags;
2249         uint32_t length;
2250         uint8_t last_byte;
2251         unsigned int i;
2252         boolean_t cleaned = FALSE;
2253
2254         i = rx_ring->next_to_clean;
2255         rx_desc = E1000_RX_DESC(*rx_ring, i);
2256
2257         while(rx_desc->status & E1000_RXD_STAT_DD) {
2258                 buffer_info = &rx_ring->buffer_info[i];
2259
2260 #ifdef CONFIG_E1000_NAPI
2261                 if(*work_done >= work_to_do)
2262                         break;
2263
2264                 (*work_done)++;
2265 #endif
2266
2267                 cleaned = TRUE;
2268
2269                 pci_unmap_single(pdev,
2270                                  buffer_info->dma,
2271                                  buffer_info->length,
2272                                  PCI_DMA_FROMDEVICE);
2273
2274                 skb = buffer_info->skb;
2275                 length = le16_to_cpu(rx_desc->length);
2276
2277                 if(!(rx_desc->status & E1000_RXD_STAT_EOP)) {
2278
2279                         /* All receives must fit into a single buffer */
2280
2281                         E1000_DBG("%s: Receive packet consumed multiple buffers\n",
2282                                 netdev->name);
2283
2284                         dev_kfree_skb_irq(skb);
2285                         rx_desc->status = 0;
2286                         buffer_info->skb = NULL;
2287
2288                         if(++i == rx_ring->count) i = 0;
2289
2290                         rx_desc = E1000_RX_DESC(*rx_ring, i);
2291                         continue;
2292                 }
2293
2294                 if(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2295
2296                         last_byte = *(skb->data + length - 1);
2297
2298                         if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
2299                                       rx_desc->errors, length, last_byte)) {
2300
2301                                 spin_lock_irqsave(&adapter->stats_lock, flags);
2302
2303                                 e1000_tbi_adjust_stats(&adapter->hw,
2304                                                        &adapter->stats,
2305                                                        length, skb->data);
2306
2307                                 spin_unlock_irqrestore(&adapter->stats_lock,
2308                                                        flags);
2309                                 length--;
2310                         } else {
2311
2312                                 dev_kfree_skb_irq(skb);
2313                                 rx_desc->status = 0;
2314                                 buffer_info->skb = NULL;
2315
2316                                 if(++i == rx_ring->count) i = 0;
2317
2318                                 rx_desc = E1000_RX_DESC(*rx_ring, i);
2319                                 continue;
2320                         }
2321                 }
2322
2323                 /* Good Receive */
2324                 skb_put(skb, length - ETHERNET_FCS_SIZE);
2325
2326                 /* Receive Checksum Offload */
2327                 e1000_rx_checksum(adapter, rx_desc, skb);
2328
2329                 skb->protocol = eth_type_trans(skb, netdev);
2330 #ifdef CONFIG_E1000_NAPI
2331                 if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
2332                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2333                                 le16_to_cpu(rx_desc->special &
2334                                         E1000_RXD_SPC_VLAN_MASK));
2335                 } else {
2336                         netif_receive_skb(skb);
2337                 }
2338 #else /* CONFIG_E1000_NAPI */
2339                 if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
2340                         vlan_hwaccel_rx(skb, adapter->vlgrp,
2341                                 le16_to_cpu(rx_desc->special &
2342                                         E1000_RXD_SPC_VLAN_MASK));
2343                 } else {
2344                         netif_rx(skb);
2345                 }
2346 #endif /* CONFIG_E1000_NAPI */
2347                 netdev->last_rx = jiffies;
2348
2349                 rx_desc->status = 0;
2350                 buffer_info->skb = NULL;
2351
2352                 if(++i == rx_ring->count) i = 0;
2353
2354                 rx_desc = E1000_RX_DESC(*rx_ring, i);
2355         }
2356
2357         rx_ring->next_to_clean = i;
2358
2359         e1000_alloc_rx_buffers(adapter);
2360
2361         return cleaned;
2362 }
2363
2364 /**
2365  * e1000_alloc_rx_buffers - Replace used receive buffers
2366  * @adapter: address of board private structure
2367  **/
2368
2369 static void
2370 e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2371 {
2372         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2373         struct net_device *netdev = adapter->netdev;
2374         struct pci_dev *pdev = adapter->pdev;
2375         struct e1000_rx_desc *rx_desc;
2376         struct e1000_buffer *buffer_info;
2377         struct sk_buff *skb;
2378         unsigned int i;
2379
2380         i = rx_ring->next_to_use;
2381         buffer_info = &rx_ring->buffer_info[i];
2382
2383         while(!buffer_info->skb) {
2384                 rx_desc = E1000_RX_DESC(*rx_ring, i);
2385
2386                 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
2387
2388                 if(!skb) {
2389                         /* Better luck next round */
2390                         break;
2391                 }
2392
2393                 /* Make buffer alignment 2 beyond a 16 byte boundary
2394                  * this will result in a 16 byte aligned IP header after
2395                  * the 14 byte MAC header is removed
2396                  */
2397                 skb_reserve(skb, NET_IP_ALIGN);
2398
2399                 skb->dev = netdev;
2400
2401                 buffer_info->skb = skb;
2402                 buffer_info->length = adapter->rx_buffer_len;
2403                 buffer_info->dma =
2404                         pci_map_single(pdev,
2405                                        skb->data,
2406                                        adapter->rx_buffer_len,
2407                                        PCI_DMA_FROMDEVICE);
2408
2409                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2410
2411                 if((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i) {
2412                         /* Force memory writes to complete before letting h/w
2413                          * know there are new descriptors to fetch.  (Only
2414                          * applicable for weak-ordered memory model archs,
2415                          * such as IA-64). */
2416                         wmb();
2417
2418                         E1000_WRITE_REG(&adapter->hw, RDT, i);
2419                 }
2420
2421                 if(++i == rx_ring->count) i = 0;
2422                 buffer_info = &rx_ring->buffer_info[i];
2423         }
2424
2425         rx_ring->next_to_use = i;
2426 }
2427
2428 /**
2429  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
2430  * @adapter:
2431  **/
2432
2433 static void
2434 e1000_smartspeed(struct e1000_adapter *adapter)
2435 {
2436         uint16_t phy_status;
2437         uint16_t phy_ctrl;
2438
2439         if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
2440            !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2441                 return;
2442
2443         if(adapter->smartspeed == 0) {
2444                 /* If Master/Slave config fault is asserted twice,
2445                  * we assume back-to-back */
2446                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2447                 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2448                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2449                 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2450                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2451                 if(phy_ctrl & CR_1000T_MS_ENABLE) {
2452                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
2453                         e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2454                                             phy_ctrl);
2455                         adapter->smartspeed++;
2456                         if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2457                            !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
2458                                                &phy_ctrl)) {
2459                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2460                                              MII_CR_RESTART_AUTO_NEG);
2461                                 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
2462                                                     phy_ctrl);
2463                         }
2464                 }
2465                 return;
2466         } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
2467                 /* If still no link, perhaps using 2/3 pair cable */
2468                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2469                 phy_ctrl |= CR_1000T_MS_ENABLE;
2470                 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
2471                 if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2472                    !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
2473                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2474                                      MII_CR_RESTART_AUTO_NEG);
2475                         e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
2476                 }
2477         }
2478         /* Restart process after E1000_SMARTSPEED_MAX iterations */
2479         if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
2480                 adapter->smartspeed = 0;
2481 }
2482
2483 /**
2484  * e1000_ioctl -
2485  * @netdev:
2486  * @ifreq:
2487  * @cmd:
2488  **/
2489
2490 static int
2491 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2492 {
2493         switch (cmd) {
2494         case SIOCGMIIPHY:
2495         case SIOCGMIIREG:
2496         case SIOCSMIIREG:
2497                 return e1000_mii_ioctl(netdev, ifr, cmd);
2498         default:
2499                 return -EOPNOTSUPP;
2500         }
2501 }
2502
2503 /**
2504  * e1000_mii_ioctl -
2505  * @netdev:
2506  * @ifreq:
2507  * @cmd:
2508  **/
2509
2510 static int
2511 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2512 {
2513         struct e1000_adapter *adapter = netdev->priv;
2514         struct mii_ioctl_data *data = if_mii(ifr);
2515         int retval;
2516         uint16_t mii_reg;
2517         uint16_t spddplx;
2518
2519         if(adapter->hw.media_type != e1000_media_type_copper)
2520                 return -EOPNOTSUPP;
2521
2522         switch (cmd) {
2523         case SIOCGMIIPHY:
2524                 data->phy_id = adapter->hw.phy_addr;
2525                 break;
2526         case SIOCGMIIREG:
2527                 if (!capable(CAP_NET_ADMIN))
2528                         return -EPERM;
2529                 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
2530                                    &data->val_out))
2531                         return -EIO;
2532                 break;
2533         case SIOCSMIIREG:
2534                 if (!capable(CAP_NET_ADMIN))
2535                         return -EPERM;
2536                 if (data->reg_num & ~(0x1F))
2537                         return -EFAULT;
2538                 mii_reg = data->val_in;
2539                 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
2540                                         data->val_in))
2541                         return -EIO;
2542                 if (adapter->hw.phy_type == e1000_phy_m88) {
2543                         switch (data->reg_num) {
2544                         case PHY_CTRL:
2545                                 if(data->val_in & MII_CR_AUTO_NEG_EN) {
2546                                         adapter->hw.autoneg = 1;
2547                                         adapter->hw.autoneg_advertised = 0x2F;
2548                                 } else {
2549                                         if (data->val_in & 0x40)
2550                                                 spddplx = SPEED_1000;
2551                                         else if (data->val_in & 0x2000)
2552                                                 spddplx = SPEED_100;
2553                                         else
2554                                                 spddplx = SPEED_10;
2555                                         spddplx += (data->val_in & 0x100)
2556                                                    ? FULL_DUPLEX :
2557                                                    HALF_DUPLEX;
2558                                         retval = e1000_set_spd_dplx(adapter,
2559                                                                     spddplx);
2560                                         if(retval)
2561                                                 return retval;
2562                                 }
2563                                 if(netif_running(adapter->netdev)) {
2564                                         e1000_down(adapter);
2565                                         e1000_up(adapter);
2566                                 } else
2567                                         e1000_reset(adapter);
2568                                 break;
2569                         case M88E1000_PHY_SPEC_CTRL:
2570                         case M88E1000_EXT_PHY_SPEC_CTRL:
2571                                 if (e1000_phy_reset(&adapter->hw))
2572                                         return -EIO;
2573                                 break;
2574                         }
2575                 }
2576                 break;
2577         default:
2578                 return -EOPNOTSUPP;
2579         }
2580         return E1000_SUCCESS;
2581 }
2582
2583 /**
2584  * e1000_rx_checksum - Receive Checksum Offload for 82543
2585  * @adapter: board private structure
2586  * @rx_desc: receive descriptor
2587  * @sk_buff: socket buffer with received data
2588  **/
2589
2590 static inline void
2591 e1000_rx_checksum(struct e1000_adapter *adapter,
2592                   struct e1000_rx_desc *rx_desc,
2593                   struct sk_buff *skb)
2594 {
2595         /* 82543 or newer only */
2596         if((adapter->hw.mac_type < e1000_82543) ||
2597         /* Ignore Checksum bit is set */
2598         (rx_desc->status & E1000_RXD_STAT_IXSM) ||
2599         /* TCP Checksum has not been calculated */
2600         (!(rx_desc->status & E1000_RXD_STAT_TCPCS))) {
2601                 skb->ip_summed = CHECKSUM_NONE;
2602                 return;
2603         }
2604
2605         /* At this point we know the hardware did the TCP checksum */
2606         /* now look at the TCP checksum error bit */
2607         if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
2608                 /* let the stack verify checksum errors */
2609                 skb->ip_summed = CHECKSUM_NONE;
2610                 adapter->hw_csum_err++;
2611         } else {
2612         /* TCP checksum is good */
2613                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2614                 adapter->hw_csum_good++;
2615         }
2616 }
2617
2618 void
2619 e1000_pci_set_mwi(struct e1000_hw *hw)
2620 {
2621         struct e1000_adapter *adapter = hw->back;
2622
2623         pci_set_mwi(adapter->pdev);
2624 }
2625
2626 void
2627 e1000_pci_clear_mwi(struct e1000_hw *hw)
2628 {
2629         struct e1000_adapter *adapter = hw->back;
2630
2631         pci_clear_mwi(adapter->pdev);
2632 }
2633
2634 void
2635 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2636 {
2637         struct e1000_adapter *adapter = hw->back;
2638
2639         pci_read_config_word(adapter->pdev, reg, value);
2640 }
2641
2642 void
2643 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2644 {
2645         struct e1000_adapter *adapter = hw->back;
2646
2647         pci_write_config_word(adapter->pdev, reg, *value);
2648 }
2649
2650 uint32_t
2651 e1000_io_read(struct e1000_hw *hw, unsigned long port)
2652 {
2653         return inl(port);
2654 }
2655
2656 void
2657 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
2658 {
2659         outl(value, port);
2660 }
2661
2662 static void
2663 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2664 {
2665         struct e1000_adapter *adapter = netdev->priv;
2666         uint32_t ctrl, rctl;
2667
2668         e1000_irq_disable(adapter);
2669         adapter->vlgrp = grp;
2670
2671         if(grp) {
2672                 /* enable VLAN tag insert/strip */
2673
2674                 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2675                 ctrl |= E1000_CTRL_VME;
2676                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2677
2678                 /* enable VLAN receive filtering */
2679
2680                 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2681                 rctl |= E1000_RCTL_VFE;
2682                 rctl &= ~E1000_RCTL_CFIEN;
2683                 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2684         } else {
2685                 /* disable VLAN tag insert/strip */
2686
2687                 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2688                 ctrl &= ~E1000_CTRL_VME;
2689                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2690
2691                 /* disable VLAN filtering */
2692
2693                 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2694                 rctl &= ~E1000_RCTL_VFE;
2695                 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2696         }
2697
2698         e1000_irq_enable(adapter);
2699 }
2700
2701 static void
2702 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2703 {
2704         struct e1000_adapter *adapter = netdev->priv;
2705         uint32_t vfta, index;
2706
2707         /* add VID to filter table */
2708
2709         index = (vid >> 5) & 0x7F;
2710         vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2711         vfta |= (1 << (vid & 0x1F));
2712         e1000_write_vfta(&adapter->hw, index, vfta);
2713 }
2714
2715 static void
2716 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2717 {
2718         struct e1000_adapter *adapter = netdev->priv;
2719         uint32_t vfta, index;
2720
2721         e1000_irq_disable(adapter);
2722
2723         if(adapter->vlgrp)
2724                 adapter->vlgrp->vlan_devices[vid] = NULL;
2725
2726         e1000_irq_enable(adapter);
2727
2728         /* remove VID from filter table*/
2729
2730         index = (vid >> 5) & 0x7F;
2731         vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2732         vfta &= ~(1 << (vid & 0x1F));
2733         e1000_write_vfta(&adapter->hw, index, vfta);
2734 }
2735
2736 static void
2737 e1000_restore_vlan(struct e1000_adapter *adapter)
2738 {
2739         e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2740
2741         if(adapter->vlgrp) {
2742                 uint16_t vid;
2743                 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2744                         if(!adapter->vlgrp->vlan_devices[vid])
2745                                 continue;
2746                         e1000_vlan_rx_add_vid(adapter->netdev, vid);
2747                 }
2748         }
2749 }
2750
2751 int
2752 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
2753 {
2754         adapter->hw.autoneg = 0;
2755
2756         switch(spddplx) {
2757         case SPEED_10 + DUPLEX_HALF:
2758                 adapter->hw.forced_speed_duplex = e1000_10_half;
2759                 break;
2760         case SPEED_10 + DUPLEX_FULL:
2761                 adapter->hw.forced_speed_duplex = e1000_10_full;
2762                 break;
2763         case SPEED_100 + DUPLEX_HALF:
2764                 adapter->hw.forced_speed_duplex = e1000_100_half;
2765                 break;
2766         case SPEED_100 + DUPLEX_FULL:
2767                 adapter->hw.forced_speed_duplex = e1000_100_full;
2768                 break;
2769         case SPEED_1000 + DUPLEX_FULL:
2770                 adapter->hw.autoneg = 1;
2771                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
2772                 break;
2773         case SPEED_1000 + DUPLEX_HALF: /* not supported */
2774         default:
2775                 return -EINVAL;
2776         }
2777         return 0;
2778 }
2779
2780 static int
2781 e1000_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2782 {
2783         struct pci_dev *pdev = NULL;
2784
2785         switch(event) {
2786         case SYS_DOWN:
2787         case SYS_HALT:
2788         case SYS_POWER_OFF:
2789                 while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
2790                         if(pci_dev_driver(pdev) == &e1000_driver)
2791                                 e1000_suspend(pdev, 3);
2792                 }
2793         }
2794         return NOTIFY_DONE;
2795 }
2796
2797 static int
2798 e1000_suspend(struct pci_dev *pdev, uint32_t state)
2799 {
2800         struct net_device *netdev = pci_get_drvdata(pdev);
2801         struct e1000_adapter *adapter = netdev->priv;
2802         uint32_t ctrl, ctrl_ext, rctl, manc, status;
2803         uint32_t wufc = adapter->wol;
2804
2805         netif_device_detach(netdev);
2806
2807         if(netif_running(netdev))
2808                 e1000_down(adapter);
2809
2810         status = E1000_READ_REG(&adapter->hw, STATUS);
2811         if(status & E1000_STATUS_LU)
2812                 wufc &= ~E1000_WUFC_LNKC;
2813
2814         if(wufc) {
2815                 e1000_setup_rctl(adapter);
2816                 e1000_set_multi(netdev);
2817
2818                 /* turn on all-multi mode if wake on multicast is enabled */
2819                 if(adapter->wol & E1000_WUFC_MC) {
2820                         rctl = E1000_READ_REG(&adapter->hw, RCTL);
2821                         rctl |= E1000_RCTL_MPE;
2822                         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2823                 }
2824
2825                 if(adapter->hw.mac_type >= e1000_82540) {
2826                         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2827                         /* advertise wake from D3Cold */
2828                         #define E1000_CTRL_ADVD3WUC 0x00100000
2829                         /* phy power management enable */
2830                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
2831                         ctrl |= E1000_CTRL_ADVD3WUC |
2832                                 E1000_CTRL_EN_PHY_PWR_MGMT;
2833                         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2834                 }
2835
2836                 if(adapter->hw.media_type == e1000_media_type_fiber ||
2837                    adapter->hw.media_type == e1000_media_type_internal_serdes) {
2838                         /* keep the laser running in D3 */
2839                         ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
2840                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
2841                         E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
2842                 }
2843
2844                 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
2845                 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
2846                 pci_enable_wake(pdev, 3, 1);
2847                 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2848         } else {
2849                 E1000_WRITE_REG(&adapter->hw, WUC, 0);
2850                 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
2851                 pci_enable_wake(pdev, 3, 0);
2852                 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2853         }
2854
2855         pci_save_state(pdev, adapter->pci_state);
2856
2857         if(adapter->hw.mac_type >= e1000_82540 &&
2858            adapter->hw.media_type == e1000_media_type_copper) {
2859                 manc = E1000_READ_REG(&adapter->hw, MANC);
2860                 if(manc & E1000_MANC_SMBUS_EN) {
2861                         manc |= E1000_MANC_ARP_EN;
2862                         E1000_WRITE_REG(&adapter->hw, MANC, manc);
2863                         pci_enable_wake(pdev, 3, 1);
2864                         pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2865                 }
2866         }
2867
2868         state = (state > 0) ? 3 : 0;
2869         pci_set_power_state(pdev, state);
2870
2871         return 0;
2872 }
2873
2874 #ifdef CONFIG_PM
2875 static int
2876 e1000_resume(struct pci_dev *pdev)
2877 {
2878         struct net_device *netdev = pci_get_drvdata(pdev);
2879         struct e1000_adapter *adapter = netdev->priv;
2880         uint32_t manc;
2881
2882         pci_set_power_state(pdev, 0);
2883         pci_restore_state(pdev, adapter->pci_state);
2884
2885         pci_enable_wake(pdev, 3, 0);
2886         pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2887
2888         e1000_reset(adapter);
2889         E1000_WRITE_REG(&adapter->hw, WUS, ~0);
2890
2891         if(netif_running(netdev))
2892                 e1000_up(adapter);
2893
2894         netif_device_attach(netdev);
2895
2896         if(adapter->hw.mac_type >= e1000_82540 &&
2897            adapter->hw.media_type == e1000_media_type_copper) {
2898                 manc = E1000_READ_REG(&adapter->hw, MANC);
2899                 manc &= ~(E1000_MANC_ARP_EN);
2900                 E1000_WRITE_REG(&adapter->hw, MANC, manc);
2901         }
2902
2903         return 0;
2904 }
2905 #endif
2906
2907 #ifdef CONFIG_NET_POLL_CONTROLLER
2908 /*
2909  * Polling 'interrupt' - used by things like netconsole to send skbs
2910  * without having to re-enable interrupts. It's not called while
2911  * the interrupt routine is executing.
2912  */
2913
2914 static void e1000_netpoll (struct net_device *dev)
2915 {
2916         struct e1000_adapter *adapter = dev->priv;
2917         disable_irq(adapter->pdev->irq);
2918         e1000_intr (adapter->pdev->irq, dev, NULL);
2919         enable_irq(adapter->pdev->irq);
2920 }
2921 #endif
2922
2923 /* e1000_main.c */