upgrade to linux 2.6.10-1.12_FC2
[linux-2.6.git] / drivers / net / ixgb / ixgb_main.c
1 /*******************************************************************************
2
3   
4   Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
5   
6   This program is free software; you can redistribute it and/or modify it 
7   under the terms of the GNU General Public License as published by the Free 
8   Software Foundation; either version 2 of the License, or (at your option) 
9   any later version.
10   
11   This program is distributed in the hope that it will be useful, but WITHOUT 
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
14   more details.
15   
16   You should have received a copy of the GNU General Public License along with
17   this program; if not, write to the Free Software Foundation, Inc., 59 
18   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
19   
20   The full GNU General Public License is included in this distribution in the
21   file called LICENSE.
22   
23   Contact Information:
24   Linux NICS <linux.nics@intel.com>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "ixgb.h"
30
31 char ixgb_driver_name[] = "ixgb";
32 char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
33 #define DRV_VERSION "1.0.66-k2"
34 char ixgb_driver_version[] = DRV_VERSION;
35 char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
36
37 /* ixgb_pci_tbl - PCI Device ID Table
38  *
39  * Wildcard entries (PCI_ANY_ID) should come last
40  * Last entry must be all 0s
41  *
42  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
43  *   Class, Class Mask, private data (not used) }
44  */
45 static struct pci_device_id ixgb_pci_tbl[] = {
46         {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
47          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
48         {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
49          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
50
51         /* required last entry */
52         {0,}
53 };
54
55 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
56
57 /* Local Function Prototypes */
58
59 static inline void ixgb_irq_disable(struct ixgb_adapter *adapter);
60 static inline void ixgb_irq_enable(struct ixgb_adapter *adapter);
61 int ixgb_up(struct ixgb_adapter *adapter);
62 void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
63 void ixgb_reset(struct ixgb_adapter *adapter);
64
65 static int ixgb_init_module(void);
66 static void ixgb_exit_module(void);
67 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
68 static void __devexit ixgb_remove(struct pci_dev *pdev);
69 static int ixgb_sw_init(struct ixgb_adapter *adapter);
70 static int ixgb_open(struct net_device *netdev);
71 static int ixgb_close(struct net_device *netdev);
72 static int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
73 static int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
74 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
75 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
76 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
77 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
78 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
79 static void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
80 static void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
81 static void ixgb_set_multi(struct net_device *netdev);
82 static void ixgb_watchdog(unsigned long data);
83 static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
84 static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
85 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
86 static int ixgb_set_mac(struct net_device *netdev, void *p);
87 static void ixgb_update_stats(struct ixgb_adapter *adapter);
88 static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
89 static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
90 static inline void ixgb_rx_checksum(struct ixgb_adapter *adapter,
91                                     struct ixgb_rx_desc *rx_desc,
92                                     struct sk_buff *skb);
93 #ifdef CONFIG_IXGB_NAPI
94 static int ixgb_clean(struct net_device *netdev, int *budget);
95 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
96                                    int *work_done, int work_to_do);
97 #else
98 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
99 #endif
100 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
101 static void ixgb_tx_timeout(struct net_device *dev);
102 static void ixgb_tx_timeout_task(struct net_device *dev);
103 static void ixgb_vlan_rx_register(struct net_device *netdev,
104                                   struct vlan_group *grp);
105 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
106 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
107 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
108
109 static int ixgb_notify_reboot(struct notifier_block *, unsigned long event,
110                               void *ptr);
111 static int ixgb_suspend(struct pci_dev *pdev, uint32_t state);
112
113 #ifdef CONFIG_NET_POLL_CONTROLLER
114 /* for netdump / net console */
115 static void ixgb_netpoll(struct net_device *dev);
116 #endif
117
118 struct notifier_block ixgb_notifier_reboot = {
119         .notifier_call = ixgb_notify_reboot,
120         .next = NULL,
121         .priority = 0
122 };
123
124 /* Exported from other modules */
125
126 extern void ixgb_check_options(struct ixgb_adapter *adapter);
127 extern struct ethtool_ops ixgb_ethtool_ops;
128
129 static struct pci_driver ixgb_driver = {
130         .name = ixgb_driver_name,
131         .id_table = ixgb_pci_tbl,
132         .probe = ixgb_probe,
133         .remove = __devexit_p(ixgb_remove),
134         /* Power Managment Hooks */
135         .suspend = NULL,
136         .resume = NULL
137 };
138
139 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
140 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_VERSION);
143
144 /* some defines for controlling descriptor fetches in h/w */
145 #define RXDCTL_PTHRESH_DEFAULT 128      /* chip considers prefech below this */
146 #define RXDCTL_HTHRESH_DEFAULT 16       /* chip will only prefetch if tail is 
147                                            pushed this many descriptors from head */
148 #define RXDCTL_WTHRESH_DEFAULT 16       /* chip writes back at this many or RXT0 */
149
150 /**
151  * ixgb_init_module - Driver Registration Routine
152  *
153  * ixgb_init_module is the first routine called when the driver is
154  * loaded. All it does is register with the PCI subsystem.
155  **/
156
157 static int __init ixgb_init_module(void)
158 {
159         int ret;
160         printk(KERN_INFO "%s - version %s\n",
161                ixgb_driver_string, ixgb_driver_version);
162
163         printk(KERN_INFO "%s\n", ixgb_copyright);
164
165         ret = pci_module_init(&ixgb_driver);
166         if (ret >= 0) {
167                 register_reboot_notifier(&ixgb_notifier_reboot);
168         }
169         return ret;
170 }
171
172 module_init(ixgb_init_module);
173
174 /**
175  * ixgb_exit_module - Driver Exit Cleanup Routine
176  *
177  * ixgb_exit_module is called just before the driver is removed
178  * from memory.
179  **/
180
181 static void __exit ixgb_exit_module(void)
182 {
183         unregister_reboot_notifier(&ixgb_notifier_reboot);
184         pci_unregister_driver(&ixgb_driver);
185 }
186
187 module_exit(ixgb_exit_module);
188
189 /**
190  * ixgb_irq_disable - Mask off interrupt generation on the NIC
191  * @adapter: board private structure
192  **/
193
194 static inline void ixgb_irq_disable(struct ixgb_adapter *adapter)
195 {
196         atomic_inc(&adapter->irq_sem);
197         IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
198         IXGB_WRITE_FLUSH(&adapter->hw);
199         synchronize_irq(adapter->pdev->irq);
200 }
201
202 /**
203  * ixgb_irq_enable - Enable default interrupt generation settings
204  * @adapter: board private structure
205  **/
206
207 static inline void ixgb_irq_enable(struct ixgb_adapter *adapter)
208 {
209         if (atomic_dec_and_test(&adapter->irq_sem)) {
210                 IXGB_WRITE_REG(&adapter->hw, IMS,
211                                IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
212                                IXGB_INT_RXO | IXGB_INT_LSC);
213                 IXGB_WRITE_FLUSH(&adapter->hw);
214         }
215 }
216
217 int ixgb_up(struct ixgb_adapter *adapter)
218 {
219         struct net_device *netdev = adapter->netdev;
220         int err;
221         int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
222         struct ixgb_hw *hw = &adapter->hw;
223
224         /* hardware has been reset, we need to reload some things */
225
226         ixgb_set_multi(netdev);
227
228         ixgb_restore_vlan(adapter);
229
230         ixgb_configure_tx(adapter);
231         ixgb_setup_rctl(adapter);
232         ixgb_configure_rx(adapter);
233         ixgb_alloc_rx_buffers(adapter);
234
235         if ((err = request_irq(adapter->pdev->irq, &ixgb_intr,
236                                SA_SHIRQ | SA_SAMPLE_RANDOM,
237                                netdev->name, netdev)))
238                 return err;
239
240         /* disable interrupts and get the hardware into a known state */
241         IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
242
243         if ((hw->max_frame_size != max_frame) ||
244             (hw->max_frame_size !=
245              (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
246
247                 hw->max_frame_size = max_frame;
248
249                 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
250
251                 if (hw->max_frame_size >
252                     IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
253                         uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
254
255                         if (!(ctrl0 & IXGB_CTRL0_JFE)) {
256                                 ctrl0 |= IXGB_CTRL0_JFE;
257                                 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
258                         }
259                 }
260         }
261
262         mod_timer(&adapter->watchdog_timer, jiffies);
263         ixgb_irq_enable(adapter);
264
265         return 0;
266 }
267
268 void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
269 {
270         struct net_device *netdev = adapter->netdev;
271
272         ixgb_irq_disable(adapter);
273         free_irq(adapter->pdev->irq, netdev);
274         if (kill_watchdog)
275                 del_timer_sync(&adapter->watchdog_timer);
276         adapter->link_speed = 0;
277         adapter->link_duplex = 0;
278         netif_carrier_off(netdev);
279         netif_stop_queue(netdev);
280
281         ixgb_reset(adapter);
282         ixgb_clean_tx_ring(adapter);
283         ixgb_clean_rx_ring(adapter);
284 }
285
286 void ixgb_reset(struct ixgb_adapter *adapter)
287 {
288
289         ixgb_adapter_stop(&adapter->hw);
290         if (!ixgb_init_hw(&adapter->hw))
291                 IXGB_DBG("ixgb_init_hw failed.\n");
292 }
293
294 /**
295  * ixgb_probe - Device Initialization Routine
296  * @pdev: PCI device information struct
297  * @ent: entry in ixgb_pci_tbl
298  *
299  * Returns 0 on success, negative on failure
300  *
301  * ixgb_probe initializes an adapter identified by a pci_dev structure.
302  * The OS initialization, configuring of the adapter private structure,
303  * and a hardware reset occur.
304  **/
305
306 static int __devinit
307 ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
308 {
309         struct net_device *netdev = NULL;
310         struct ixgb_adapter *adapter;
311         static int cards_found = 0;
312         unsigned long mmio_start;
313         int mmio_len;
314         int pci_using_dac;
315         int i;
316         int err;
317
318         if ((err = pci_enable_device(pdev)))
319                 return err;
320
321         if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
322                 pci_using_dac = 1;
323         } else {
324                 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
325                         IXGB_ERR("No usable DMA configuration, aborting\n");
326                         return err;
327                 }
328                 pci_using_dac = 0;
329         }
330
331         if ((err = pci_request_regions(pdev, ixgb_driver_name)))
332                 return err;
333
334         pci_set_master(pdev);
335
336         netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
337         if (!netdev) {
338                 err = -ENOMEM;
339                 goto err_alloc_etherdev;
340         }
341
342         SET_MODULE_OWNER(netdev);
343         SET_NETDEV_DEV(netdev, &pdev->dev);
344
345         pci_set_drvdata(pdev, netdev);
346         adapter = netdev->priv;
347         adapter->netdev = netdev;
348         adapter->pdev = pdev;
349         adapter->hw.back = adapter;
350
351         mmio_start = pci_resource_start(pdev, BAR_0);
352         mmio_len = pci_resource_len(pdev, BAR_0);
353
354         adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
355         if (!adapter->hw.hw_addr) {
356                 err = -EIO;
357                 goto err_ioremap;
358         }
359
360         for (i = BAR_1; i <= BAR_5; i++) {
361                 if (pci_resource_len(pdev, i) == 0)
362                         continue;
363                 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
364                         adapter->hw.io_base = pci_resource_start(pdev, i);
365                         break;
366                 }
367         }
368
369         netdev->open = &ixgb_open;
370         netdev->stop = &ixgb_close;
371         netdev->hard_start_xmit = &ixgb_xmit_frame;
372         netdev->get_stats = &ixgb_get_stats;
373         netdev->set_multicast_list = &ixgb_set_multi;
374         netdev->set_mac_address = &ixgb_set_mac;
375         netdev->change_mtu = &ixgb_change_mtu;
376         netdev->tx_timeout = &ixgb_tx_timeout;
377         netdev->watchdog_timeo = HZ;
378         SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
379 #ifdef CONFIG_IXGB_NAPI
380         netdev->poll = &ixgb_clean;
381         netdev->weight = 64;
382 #endif
383         netdev->vlan_rx_register = ixgb_vlan_rx_register;
384         netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
385         netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
386 #ifdef CONFIG_NET_POLL_CONTROLLER
387         netdev->poll_controller = ixgb_netpoll;
388 #endif
389
390         netdev->mem_start = mmio_start;
391         netdev->mem_end = mmio_start + mmio_len;
392         netdev->base_addr = adapter->hw.io_base;
393
394         adapter->bd_number = cards_found;
395         adapter->link_speed = 0;
396         adapter->link_duplex = 0;
397
398         /* setup the private structure */
399
400         if ((err = ixgb_sw_init(adapter)))
401                 goto err_sw_init;
402
403         netdev->features = NETIF_F_SG |
404             NETIF_F_HW_CSUM |
405             NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
406 #ifdef NETIF_F_TSO
407         netdev->features |= NETIF_F_TSO;
408 #endif
409
410         if (pci_using_dac)
411                 netdev->features |= NETIF_F_HIGHDMA;
412
413         /* make sure the EEPROM is good */
414
415         if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
416                 printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
417                 err = -EIO;
418                 goto err_eeprom;
419         }
420
421         ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
422
423         if (!is_valid_ether_addr(netdev->dev_addr)) {
424                 err = -EIO;
425                 goto err_eeprom;
426         }
427
428         adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
429
430         init_timer(&adapter->watchdog_timer);
431         adapter->watchdog_timer.function = &ixgb_watchdog;
432         adapter->watchdog_timer.data = (unsigned long)adapter;
433
434         INIT_WORK(&adapter->tx_timeout_task,
435                   (void (*)(void *))ixgb_tx_timeout_task, netdev);
436
437         if ((err = register_netdev(netdev)))
438                 goto err_register;
439
440         /* we're going to reset, so assume we have no link for now */
441
442         netif_carrier_off(netdev);
443         netif_stop_queue(netdev);
444
445         printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n",
446                netdev->name);
447         ixgb_check_options(adapter);
448         /* reset the hardware with the new settings */
449
450         ixgb_reset(adapter);
451
452         cards_found++;
453         return 0;
454
455       err_register:
456       err_sw_init:
457       err_eeprom:
458         iounmap(adapter->hw.hw_addr);
459       err_ioremap:
460         free_netdev(netdev);
461       err_alloc_etherdev:
462         pci_release_regions(pdev);
463         return err;
464 }
465
466 /**
467  * ixgb_remove - Device Removal Routine
468  * @pdev: PCI device information struct
469  *
470  * ixgb_remove is called by the PCI subsystem to alert the driver
471  * that it should release a PCI device.  The could be caused by a
472  * Hot-Plug event, or because the driver is going to be removed from
473  * memory.
474  **/
475
476 static void __devexit ixgb_remove(struct pci_dev *pdev)
477 {
478         struct net_device *netdev = pci_get_drvdata(pdev);
479         struct ixgb_adapter *adapter = netdev->priv;
480
481         unregister_netdev(netdev);
482
483         iounmap(adapter->hw.hw_addr);
484         pci_release_regions(pdev);
485
486         free_netdev(netdev);
487 }
488
489 /**
490  * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
491  * @adapter: board private structure to initialize
492  *
493  * ixgb_sw_init initializes the Adapter private data structure.
494  * Fields are initialized based on PCI device information and
495  * OS network device settings (MTU size).
496  **/
497
498 static int __devinit ixgb_sw_init(struct ixgb_adapter *adapter)
499 {
500         struct ixgb_hw *hw = &adapter->hw;
501         struct net_device *netdev = adapter->netdev;
502         struct pci_dev *pdev = adapter->pdev;
503
504         /* PCI config space info */
505
506         hw->vendor_id = pdev->vendor;
507         hw->device_id = pdev->device;
508         hw->subsystem_vendor_id = pdev->subsystem_vendor;
509         hw->subsystem_id = pdev->subsystem_device;
510
511         adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
512
513         hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
514
515         if ((hw->device_id == IXGB_DEVICE_ID_82597EX)
516             || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
517                 hw->mac_type = ixgb_82597;
518         else {
519                 /* should never have loaded on this device */
520                 printk(KERN_ERR "ixgb: unsupported device id\n");
521         }
522
523         /* enable flow control to be programmed */
524         hw->fc.send_xon = 1;
525
526         atomic_set(&adapter->irq_sem, 1);
527         spin_lock_init(&adapter->tx_lock);
528
529         return 0;
530 }
531
532 /**
533  * ixgb_open - Called when a network interface is made active
534  * @netdev: network interface device structure
535  *
536  * Returns 0 on success, negative value on failure
537  *
538  * The open entry point is called when a network interface is made
539  * active by the system (IFF_UP).  At this point all resources needed
540  * for transmit and receive operations are allocated, the interrupt
541  * handler is registered with the OS, the watchdog timer is started,
542  * and the stack is notified that the interface is ready.
543  **/
544
545 static int ixgb_open(struct net_device *netdev)
546 {
547         struct ixgb_adapter *adapter = netdev->priv;
548         int err;
549
550         /* allocate transmit descriptors */
551
552         if ((err = ixgb_setup_tx_resources(adapter)))
553                 goto err_setup_tx;
554
555         /* allocate receive descriptors */
556
557         if ((err = ixgb_setup_rx_resources(adapter)))
558                 goto err_setup_rx;
559
560         if ((err = ixgb_up(adapter)))
561                 goto err_up;
562
563         return 0;
564
565       err_up:
566         ixgb_free_rx_resources(adapter);
567       err_setup_rx:
568         ixgb_free_tx_resources(adapter);
569       err_setup_tx:
570         ixgb_reset(adapter);
571
572         return err;
573 }
574
575 /**
576  * ixgb_close - Disables a network interface
577  * @netdev: network interface device structure
578  *
579  * Returns 0, this is not allowed to fail
580  *
581  * The close entry point is called when an interface is de-activated
582  * by the OS.  The hardware is still under the drivers control, but
583  * needs to be disabled.  A global MAC reset is issued to stop the
584  * hardware, and all transmit and receive resources are freed.
585  **/
586
587 static int ixgb_close(struct net_device *netdev)
588 {
589         struct ixgb_adapter *adapter = netdev->priv;
590
591         ixgb_down(adapter, TRUE);
592
593         ixgb_free_tx_resources(adapter);
594         ixgb_free_rx_resources(adapter);
595
596         return 0;
597 }
598
599 /**
600  * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
601  * @adapter: board private structure
602  *
603  * Return 0 on success, negative on failure
604  **/
605
606 static int ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
607 {
608         struct ixgb_desc_ring *txdr = &adapter->tx_ring;
609         struct pci_dev *pdev = adapter->pdev;
610         int size;
611
612         size = sizeof(struct ixgb_buffer) * txdr->count;
613         txdr->buffer_info = kmalloc(size, GFP_KERNEL);
614         if (!txdr->buffer_info) {
615                 return -ENOMEM;
616         }
617         memset(txdr->buffer_info, 0, size);
618
619         /* round up to nearest 4K */
620
621         txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
622         IXGB_ROUNDUP(txdr->size, 4096);
623
624         txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
625         if (!txdr->desc) {
626                 kfree(txdr->buffer_info);
627                 return -ENOMEM;
628         }
629         memset(txdr->desc, 0, txdr->size);
630
631         txdr->next_to_use = 0;
632         txdr->next_to_clean = 0;
633
634         return 0;
635 }
636
637 /**
638  * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
639  * @adapter: board private structure
640  *
641  * Configure the Tx unit of the MAC after a reset.
642  **/
643
644 static void ixgb_configure_tx(struct ixgb_adapter *adapter)
645 {
646         uint64_t tdba = adapter->tx_ring.dma;
647         uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
648         uint32_t tctl;
649         struct ixgb_hw *hw = &adapter->hw;
650
651         /* Setup the Base and Length of the Tx Descriptor Ring 
652          * tx_ring.dma can be either a 32 or 64 bit value 
653          */
654
655         IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
656         IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
657
658         IXGB_WRITE_REG(hw, TDLEN, tdlen);
659
660         /* Setup the HW Tx Head and Tail descriptor pointers */
661
662         IXGB_WRITE_REG(hw, TDH, 0);
663         IXGB_WRITE_REG(hw, TDT, 0);
664
665         /* don't set up txdctl, it induces performance problems if
666          * configured incorrectly
667          txdctl  = TXDCTL_PTHRESH_DEFAULT; // prefetch txds below this threshold
668          txdctl |= (TXDCTL_HTHRESH_DEFAULT // only prefetch if there are this many ready
669          << IXGB_TXDCTL_HTHRESH_SHIFT);
670          IXGB_WRITE_REG (hw, TXDCTL, txdctl);
671          */
672
673         /* Set the Tx Interrupt Delay register */
674
675         IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
676
677         /* Program the Transmit Control Register */
678
679         tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
680         IXGB_WRITE_REG(hw, TCTL, tctl);
681
682         /* Setup Transmit Descriptor Settings for this adapter */
683         adapter->tx_cmd_type =
684             IXGB_TX_DESC_TYPE
685             | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
686 }
687
688 /**
689  * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
690  * @adapter: board private structure
691  *
692  * Returns 0 on success, negative on failure
693  **/
694
695 static int ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
696 {
697         struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
698         struct pci_dev *pdev = adapter->pdev;
699         int size;
700
701         size = sizeof(struct ixgb_buffer) * rxdr->count;
702         rxdr->buffer_info = kmalloc(size, GFP_KERNEL);
703         if (!rxdr->buffer_info) {
704                 return -ENOMEM;
705         }
706         memset(rxdr->buffer_info, 0, size);
707
708         /* Round up to nearest 4K */
709
710         rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
711         IXGB_ROUNDUP(rxdr->size, 4096);
712
713         rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
714
715         if (!rxdr->desc) {
716                 kfree(rxdr->buffer_info);
717                 return -ENOMEM;
718         }
719         memset(rxdr->desc, 0, rxdr->size);
720
721         rxdr->next_to_clean = 0;
722         rxdr->next_to_use = 0;
723
724         return 0;
725 }
726
727 /**
728  * ixgb_setup_rctl - configure the receive control register
729  * @adapter: Board private structure
730  **/
731
732 static void ixgb_setup_rctl(struct ixgb_adapter *adapter)
733 {
734         uint32_t rctl;
735
736         rctl = IXGB_READ_REG(&adapter->hw, RCTL);
737
738         rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
739
740         rctl |=
741             IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
742             IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
743             (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
744
745         rctl |= IXGB_RCTL_SECRC;
746
747         switch (adapter->rx_buffer_len) {
748         case IXGB_RXBUFFER_2048:
749         default:
750                 rctl |= IXGB_RCTL_BSIZE_2048;
751                 break;
752         case IXGB_RXBUFFER_4096:
753                 rctl |= IXGB_RCTL_BSIZE_4096;
754                 break;
755         case IXGB_RXBUFFER_8192:
756                 rctl |= IXGB_RCTL_BSIZE_8192;
757                 break;
758         case IXGB_RXBUFFER_16384:
759                 rctl |= IXGB_RCTL_BSIZE_16384;
760                 break;
761         }
762
763         IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
764 }
765
766 /**
767  * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
768  * @adapter: board private structure
769  *
770  * Configure the Rx unit of the MAC after a reset.
771  **/
772
773 static void ixgb_configure_rx(struct ixgb_adapter *adapter)
774 {
775         uint64_t rdba = adapter->rx_ring.dma;
776         uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
777         struct ixgb_hw *hw = &adapter->hw;
778         uint32_t rctl;
779         uint32_t rxcsum;
780         uint32_t rxdctl;
781
782         /* make sure receives are disabled while setting up the descriptors */
783
784         rctl = IXGB_READ_REG(hw, RCTL);
785         IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
786
787         /* set the Receive Delay Timer Register */
788
789         IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
790
791         /* Setup the Base and Length of the Rx Descriptor Ring */
792
793         IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
794         IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
795
796         IXGB_WRITE_REG(hw, RDLEN, rdlen);
797
798         /* Setup the HW Rx Head and Tail Descriptor Pointers */
799         IXGB_WRITE_REG(hw, RDH, 0);
800         IXGB_WRITE_REG(hw, RDT, 0);
801
802         /* burst 16 or burst when RXT0 */
803         rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
804             | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
805             | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
806         IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
807
808         if (adapter->raidc) {
809                 uint32_t raidc;
810                 uint8_t poll_threshold;
811
812                 /* Poll every rx_int_delay period, if RBD exists
813                  * Receive Backlog Detection is set to <threshold> 
814                  * Rx Descriptors
815                  * max is 0x3F == set to poll when 504 RxDesc left 
816                  * min is 0 */
817
818                 /* polling times are 1 == 0.8192us
819                    2 == 1.6384us
820                    3 == 3.2768us etc
821                    ...
822                    511 == 418 us
823                  */
824 #define IXGB_RAIDC_POLL_DEFAULT 122     /* set to poll every ~100 us under load 
825                                            also known as 10000 interrupts / sec */
826
827                 /* divide this by 2^3 (8) to get a register size count */
828                 poll_threshold = ((adapter->rx_ring.count - 1) >> 3);
829                 /* poll at half of that size */
830                 poll_threshold >>= 1;
831                 /* make sure its not bigger than our max */
832                 poll_threshold &= 0x3F;
833
834                 raidc = IXGB_RAIDC_EN | /* turn on raidc style moderation */
835                     IXGB_RAIDC_RXT_GATE |       /* don't interrupt with rxt0 while
836                                                    in RBD mode (polling) */
837                     (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
838                     /* this sets the regular "min interrupt delay" */
839                     (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
840                     poll_threshold;
841
842                 IXGB_WRITE_REG(hw, RAIDC, raidc);
843         }
844
845         /* Enable Receive Checksum Offload for TCP and UDP */
846         if (adapter->rx_csum == TRUE) {
847                 rxcsum = IXGB_READ_REG(hw, RXCSUM);
848                 rxcsum |= IXGB_RXCSUM_TUOFL;
849                 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
850         }
851
852         /* Enable Receives */
853
854         IXGB_WRITE_REG(hw, RCTL, rctl);
855 }
856
857 /**
858  * ixgb_free_tx_resources - Free Tx Resources
859  * @adapter: board private structure
860  *
861  * Free all transmit software resources
862  **/
863
864 static void ixgb_free_tx_resources(struct ixgb_adapter *adapter)
865 {
866         struct pci_dev *pdev = adapter->pdev;
867
868         ixgb_clean_tx_ring(adapter);
869
870         kfree(adapter->tx_ring.buffer_info);
871         adapter->tx_ring.buffer_info = NULL;
872
873         pci_free_consistent(pdev, adapter->tx_ring.size,
874                             adapter->tx_ring.desc, adapter->tx_ring.dma);
875
876         adapter->tx_ring.desc = NULL;
877 }
878
879 /**
880  * ixgb_clean_tx_ring - Free Tx Buffers
881  * @adapter: board private structure
882  **/
883
884 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
885 {
886         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
887         struct ixgb_buffer *buffer_info;
888         struct pci_dev *pdev = adapter->pdev;
889         unsigned long size;
890         unsigned int i;
891
892         /* Free all the Tx ring sk_buffs */
893
894         for (i = 0; i < tx_ring->count; i++) {
895                 buffer_info = &tx_ring->buffer_info[i];
896                 if (buffer_info->skb) {
897
898                         pci_unmap_page(pdev,
899                                        buffer_info->dma,
900                                        buffer_info->length, PCI_DMA_TODEVICE);
901
902                         dev_kfree_skb(buffer_info->skb);
903
904                         buffer_info->skb = NULL;
905                 }
906         }
907
908         size = sizeof(struct ixgb_buffer) * tx_ring->count;
909         memset(tx_ring->buffer_info, 0, size);
910
911         /* Zero out the descriptor ring */
912
913         memset(tx_ring->desc, 0, tx_ring->size);
914
915         tx_ring->next_to_use = 0;
916         tx_ring->next_to_clean = 0;
917
918         IXGB_WRITE_REG(&adapter->hw, TDH, 0);
919         IXGB_WRITE_REG(&adapter->hw, TDT, 0);
920 }
921
922 /**
923  * ixgb_free_rx_resources - Free Rx Resources
924  * @adapter: board private structure
925  *
926  * Free all receive software resources
927  **/
928
929 static void ixgb_free_rx_resources(struct ixgb_adapter *adapter)
930 {
931         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
932         struct pci_dev *pdev = adapter->pdev;
933
934         ixgb_clean_rx_ring(adapter);
935
936         kfree(rx_ring->buffer_info);
937         rx_ring->buffer_info = NULL;
938
939         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
940
941         rx_ring->desc = NULL;
942 }
943
944 /**
945  * ixgb_clean_rx_ring - Free Rx Buffers
946  * @adapter: board private structure
947  **/
948
949 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
950 {
951         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
952         struct ixgb_buffer *buffer_info;
953         struct pci_dev *pdev = adapter->pdev;
954         unsigned long size;
955         unsigned int i;
956
957         /* Free all the Rx ring sk_buffs */
958
959         for (i = 0; i < rx_ring->count; i++) {
960                 buffer_info = &rx_ring->buffer_info[i];
961                 if (buffer_info->skb) {
962
963                         pci_unmap_single(pdev,
964                                          buffer_info->dma,
965                                          buffer_info->length,
966                                          PCI_DMA_FROMDEVICE);
967
968                         dev_kfree_skb(buffer_info->skb);
969
970                         buffer_info->skb = NULL;
971                 }
972         }
973
974         size = sizeof(struct ixgb_buffer) * rx_ring->count;
975         memset(rx_ring->buffer_info, 0, size);
976
977         /* Zero out the descriptor ring */
978
979         memset(rx_ring->desc, 0, rx_ring->size);
980
981         rx_ring->next_to_clean = 0;
982         rx_ring->next_to_use = 0;
983
984         IXGB_WRITE_REG(&adapter->hw, RDH, 0);
985         IXGB_WRITE_REG(&adapter->hw, RDT, 0);
986 }
987
988 /**
989  * ixgb_set_mac - Change the Ethernet Address of the NIC
990  * @netdev: network interface device structure
991  * @p: pointer to an address structure
992  *
993  * Returns 0 on success, negative on failure
994  **/
995
996 static int ixgb_set_mac(struct net_device *netdev, void *p)
997 {
998         struct ixgb_adapter *adapter = netdev->priv;
999         struct sockaddr *addr = p;
1000
1001         if (!is_valid_ether_addr(addr->sa_data))
1002                 return -EADDRNOTAVAIL;
1003
1004         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1005
1006         ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1007
1008         return 0;
1009 }
1010
1011 /**
1012  * ixgb_set_multi - Multicast and Promiscuous mode set
1013  * @netdev: network interface device structure
1014  *
1015  * The set_multi entry point is called whenever the multicast address
1016  * list or the network interface flags are updated.  This routine is
1017  * responsible for configuring the hardware for proper multicast,
1018  * promiscuous mode, and all-multi behavior.
1019  **/
1020
1021 static void ixgb_set_multi(struct net_device *netdev)
1022 {
1023         struct ixgb_adapter *adapter = netdev->priv;
1024         struct ixgb_hw *hw = &adapter->hw;
1025         struct dev_mc_list *mc_ptr;
1026         uint32_t rctl;
1027         int i;
1028
1029         /* Check for Promiscuous and All Multicast modes */
1030
1031         rctl = IXGB_READ_REG(hw, RCTL);
1032
1033         if (netdev->flags & IFF_PROMISC) {
1034                 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1035         } else if (netdev->flags & IFF_ALLMULTI) {
1036                 rctl |= IXGB_RCTL_MPE;
1037                 rctl &= ~IXGB_RCTL_UPE;
1038         } else {
1039                 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1040         }
1041
1042         if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1043                 rctl |= IXGB_RCTL_MPE;
1044                 IXGB_WRITE_REG(hw, RCTL, rctl);
1045         } else {
1046                 uint8_t mta[netdev->mc_count * IXGB_ETH_LENGTH_OF_ADDRESS];
1047
1048                 IXGB_WRITE_REG(hw, RCTL, rctl);
1049
1050                 for (i = 0, mc_ptr = netdev->mc_list; mc_ptr;
1051                      i++, mc_ptr = mc_ptr->next)
1052                         memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1053                                mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1054
1055                 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
1056         }
1057 }
1058
1059 /**
1060  * ixgb_watchdog - Timer Call-back
1061  * @data: pointer to netdev cast into an unsigned long
1062  **/
1063
1064 static void ixgb_watchdog(unsigned long data)
1065 {
1066         struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1067         struct net_device *netdev = adapter->netdev;
1068         struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1069         unsigned int i;
1070
1071         ixgb_check_for_link(&adapter->hw);
1072
1073         if (ixgb_check_for_bad_link(&adapter->hw)) {
1074                 /* force the reset path */
1075                 netif_stop_queue(netdev);
1076         }
1077
1078         if (adapter->hw.link_up) {
1079                 if (!netif_carrier_ok(netdev)) {
1080                         printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n",
1081                                netdev->name, 10000, "Full Duplex");
1082                         adapter->link_speed = 10000;
1083                         adapter->link_duplex = FULL_DUPLEX;
1084                         netif_carrier_on(netdev);
1085                         netif_wake_queue(netdev);
1086                 }
1087         } else {
1088                 if (netif_carrier_ok(netdev)) {
1089                         adapter->link_speed = 0;
1090                         adapter->link_duplex = 0;
1091                         printk(KERN_INFO
1092                                "ixgb: %s NIC Link is Down\n", netdev->name);
1093                         netif_carrier_off(netdev);
1094                         netif_stop_queue(netdev);
1095
1096                 }
1097         }
1098
1099         ixgb_update_stats(adapter);
1100
1101         if (!netif_carrier_ok(netdev)) {
1102                 if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1103                         /* We've lost link, so the controller stops DMA,
1104                          * but we've got queued Tx work that's never going
1105                          * to get done, so reset controller to flush Tx.
1106                          * (Do the reset outside of interrupt context). */
1107                         schedule_work(&adapter->tx_timeout_task);
1108                 }
1109         }
1110
1111         /* Early detection of hung controller */
1112         i = txdr->next_to_clean;
1113         if (txdr->buffer_info[i].dma &&
1114             time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
1115             !(IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF))
1116                 netif_stop_queue(netdev);
1117
1118         /* generate an interrupt to force clean up of any stragglers */
1119         IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1120
1121         /* Reset the timer */
1122         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1123 }
1124
1125 #define IXGB_TX_FLAGS_CSUM              0x00000001
1126 #define IXGB_TX_FLAGS_VLAN              0x00000002
1127 #define IXGB_TX_FLAGS_TSO               0x00000004
1128
1129 static inline boolean_t
1130 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1131 {
1132 #ifdef NETIF_F_TSO
1133         struct ixgb_context_desc *context_desc;
1134         unsigned int i;
1135         uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1136         uint16_t ipcse, tucse, mss;
1137
1138         if (likely(skb_shinfo(skb)->tso_size)) {
1139                 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1140                 mss = skb_shinfo(skb)->tso_size;
1141                 skb->nh.iph->tot_len = 0;
1142                 skb->nh.iph->check = 0;
1143                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1144                                                       skb->nh.iph->daddr,
1145                                                       0, IPPROTO_TCP, 0);
1146                 ipcss = skb->nh.raw - skb->data;
1147                 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1148                 ipcse = skb->h.raw - skb->data - 1;
1149                 tucss = skb->h.raw - skb->data;
1150                 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1151                 tucse = 0;
1152
1153                 i = adapter->tx_ring.next_to_use;
1154                 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1155
1156                 context_desc->ipcss = ipcss;
1157                 context_desc->ipcso = ipcso;
1158                 context_desc->ipcse = cpu_to_le16(ipcse);
1159                 context_desc->tucss = tucss;
1160                 context_desc->tucso = tucso;
1161                 context_desc->tucse = cpu_to_le16(tucse);
1162                 context_desc->mss = cpu_to_le16(mss);
1163                 context_desc->hdr_len = hdr_len;
1164                 context_desc->status = 0;
1165                 context_desc->cmd_type_len = cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1166                                                          |
1167                                                          IXGB_CONTEXT_DESC_CMD_TSE
1168                                                          |
1169                                                          IXGB_CONTEXT_DESC_CMD_IP
1170                                                          |
1171                                                          IXGB_CONTEXT_DESC_CMD_TCP
1172                                                          |
1173                                                          IXGB_CONTEXT_DESC_CMD_RS
1174                                                          |
1175                                                          IXGB_CONTEXT_DESC_CMD_IDE
1176                                                          | (skb->len -
1177                                                             (hdr_len)));
1178
1179                 if (++i == adapter->tx_ring.count)
1180                         i = 0;
1181                 adapter->tx_ring.next_to_use = i;
1182
1183                 return TRUE;
1184         }
1185 #endif
1186
1187         return FALSE;
1188 }
1189
1190 static inline boolean_t
1191 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1192 {
1193         struct ixgb_context_desc *context_desc;
1194         unsigned int i;
1195         uint8_t css, cso;
1196
1197         if (likely(skb->ip_summed == CHECKSUM_HW)) {
1198                 css = skb->h.raw - skb->data;
1199                 cso = (skb->h.raw + skb->csum) - skb->data;
1200
1201                 i = adapter->tx_ring.next_to_use;
1202                 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1203
1204                 context_desc->tucss = css;
1205                 context_desc->tucso = cso;
1206                 context_desc->tucse = 0;
1207                 /* zero out any previously existing data in one instruction */
1208                 *(uint32_t *) & (context_desc->ipcss) = 0;
1209                 context_desc->status = 0;
1210                 context_desc->hdr_len = 0;
1211                 context_desc->mss = 0;
1212                 context_desc->cmd_type_len =
1213                     cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1214                                 | IXGB_TX_DESC_CMD_RS | IXGB_TX_DESC_CMD_IDE);
1215
1216                 if (++i == adapter->tx_ring.count)
1217                         i = 0;
1218                 adapter->tx_ring.next_to_use = i;
1219
1220                 return TRUE;
1221         }
1222
1223         return FALSE;
1224 }
1225
1226 #define IXGB_MAX_TXD_PWR        14
1227 #define IXGB_MAX_DATA_PER_TXD   (1<<IXGB_MAX_TXD_PWR)
1228
1229 static inline int
1230 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1231             unsigned int first)
1232 {
1233         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1234         struct ixgb_buffer *buffer_info;
1235         int len = skb->len;
1236         unsigned int offset = 0, size, count = 0, i;
1237
1238         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1239         unsigned int f;
1240         len -= skb->data_len;
1241
1242         i = tx_ring->next_to_use;
1243
1244         while (len) {
1245                 buffer_info = &tx_ring->buffer_info[i];
1246                 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1247                 buffer_info->length = size;
1248                 buffer_info->dma =
1249                     pci_map_single(adapter->pdev,
1250                                    skb->data + offset, size, PCI_DMA_TODEVICE);
1251                 buffer_info->time_stamp = jiffies;
1252
1253                 len -= size;
1254                 offset += size;
1255                 count++;
1256                 if (++i == tx_ring->count)
1257                         i = 0;
1258         }
1259
1260         for (f = 0; f < nr_frags; f++) {
1261                 struct skb_frag_struct *frag;
1262
1263                 frag = &skb_shinfo(skb)->frags[f];
1264                 len = frag->size;
1265                 offset = 0;
1266
1267                 while (len) {
1268                         buffer_info = &tx_ring->buffer_info[i];
1269                         size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1270                         buffer_info->length = size;
1271                         buffer_info->dma =
1272                             pci_map_page(adapter->pdev,
1273                                          frag->page,
1274                                          frag->page_offset + offset,
1275                                          size, PCI_DMA_TODEVICE);
1276                         buffer_info->time_stamp = jiffies;
1277
1278                         len -= size;
1279                         offset += size;
1280                         count++;
1281                         if (++i == tx_ring->count)
1282                                 i = 0;
1283                 }
1284         }
1285         i = (i == 0) ? tx_ring->count - 1 : i - 1;
1286         tx_ring->buffer_info[i].skb = skb;
1287         tx_ring->buffer_info[first].next_to_watch = i;
1288
1289         return count;
1290 }
1291
1292 static inline void
1293 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,
1294               int tx_flags)
1295 {
1296         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1297         struct ixgb_tx_desc *tx_desc = NULL;
1298         struct ixgb_buffer *buffer_info;
1299         uint32_t cmd_type_len = adapter->tx_cmd_type;
1300         uint8_t status = 0;
1301         uint8_t popts = 0;
1302         unsigned int i;
1303
1304         if (tx_flags & IXGB_TX_FLAGS_TSO) {
1305                 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1306                 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1307         }
1308
1309         if (tx_flags & IXGB_TX_FLAGS_CSUM)
1310                 popts |= IXGB_TX_DESC_POPTS_TXSM;
1311
1312         if (tx_flags & IXGB_TX_FLAGS_VLAN) {
1313                 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1314         }
1315
1316         i = tx_ring->next_to_use;
1317
1318         while (count--) {
1319                 buffer_info = &tx_ring->buffer_info[i];
1320                 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1321                 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1322                 tx_desc->cmd_type_len =
1323                     cpu_to_le32(cmd_type_len | buffer_info->length);
1324                 tx_desc->status = status;
1325                 tx_desc->popts = popts;
1326                 tx_desc->vlan = cpu_to_le16(vlan_id);
1327
1328                 if (++i == tx_ring->count)
1329                         i = 0;
1330         }
1331
1332         tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
1333                                              | IXGB_TX_DESC_CMD_RS);
1334
1335         /* Force memory writes to complete before letting h/w
1336          * know there are new descriptors to fetch.  (Only
1337          * applicable for weak-ordered memory model archs,
1338          * such as IA-64). */
1339         wmb();
1340
1341         tx_ring->next_to_use = i;
1342         IXGB_WRITE_REG(&adapter->hw, TDT, i);
1343 }
1344
1345 /* Tx Descriptors needed, worst case */
1346 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1347                          (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1348 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
1349         MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
1350
1351 static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1352 {
1353         struct ixgb_adapter *adapter = netdev->priv;
1354         unsigned int first;
1355         unsigned int tx_flags = 0;
1356         unsigned long flags;
1357         int vlan_id = 0;
1358
1359         if (skb->len <= 0) {
1360                 dev_kfree_skb_any(skb);
1361                 return 0;
1362         }
1363
1364         spin_lock_irqsave(&adapter->tx_lock, flags);
1365         if (unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
1366                 netif_stop_queue(netdev);
1367                 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1368                 return 1;
1369         }
1370         spin_unlock_irqrestore(&adapter->tx_lock, flags);
1371
1372         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1373                 tx_flags |= IXGB_TX_FLAGS_VLAN;
1374                 vlan_id = vlan_tx_tag_get(skb);
1375         }
1376
1377         first = adapter->tx_ring.next_to_use;
1378
1379         if (ixgb_tso(adapter, skb))
1380                 tx_flags |= IXGB_TX_FLAGS_TSO;
1381         else if (ixgb_tx_csum(adapter, skb))
1382                 tx_flags |= IXGB_TX_FLAGS_CSUM;
1383
1384         ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
1385                       tx_flags);
1386
1387         netdev->trans_start = jiffies;
1388
1389         return 0;
1390 }
1391
1392 /**
1393  * ixgb_tx_timeout - Respond to a Tx Hang
1394  * @netdev: network interface device structure
1395  **/
1396
1397 static void ixgb_tx_timeout(struct net_device *netdev)
1398 {
1399         struct ixgb_adapter *adapter = netdev->priv;
1400
1401         /* Do the reset outside of interrupt context */
1402         schedule_work(&adapter->tx_timeout_task);
1403 }
1404
1405 static void ixgb_tx_timeout_task(struct net_device *netdev)
1406 {
1407         struct ixgb_adapter *adapter = netdev->priv;
1408
1409         netif_device_detach(netdev);
1410         ixgb_down(adapter, TRUE);
1411         ixgb_up(adapter);
1412         netif_device_attach(netdev);
1413 }
1414
1415 /**
1416  * ixgb_get_stats - Get System Network Statistics
1417  * @netdev: network interface device structure
1418  *
1419  * Returns the address of the device statistics structure.
1420  * The statistics are actually updated from the timer callback.
1421  **/
1422
1423 static struct net_device_stats *ixgb_get_stats(struct net_device *netdev)
1424 {
1425         struct ixgb_adapter *adapter = netdev->priv;
1426
1427         return &adapter->net_stats;
1428 }
1429
1430 /**
1431  * ixgb_change_mtu - Change the Maximum Transfer Unit
1432  * @netdev: network interface device structure
1433  * @new_mtu: new value for maximum frame size
1434  *
1435  * Returns 0 on success, negative on failure
1436  **/
1437
1438 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1439 {
1440         struct ixgb_adapter *adapter = netdev->priv;
1441         uint32_t old_mtu = adapter->rx_buffer_len;
1442         int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1443
1444         if ((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1445             || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1446                 IXGB_ERR("Invalid MTU setting\n");
1447                 return -EINVAL;
1448         }
1449
1450         if ((max_frame <=
1451              IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1452             || (max_frame <= IXGB_RXBUFFER_2048)) {
1453                 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
1454
1455         } else if (max_frame <= IXGB_RXBUFFER_4096) {
1456                 adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
1457
1458         } else if (max_frame <= IXGB_RXBUFFER_8192) {
1459                 adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
1460
1461         } else {
1462                 adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
1463         }
1464
1465         netdev->mtu = new_mtu;
1466
1467         if (old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
1468
1469                 ixgb_down(adapter, TRUE);
1470                 ixgb_up(adapter);
1471         }
1472
1473         return 0;
1474 }
1475
1476 /**
1477  * ixgb_update_stats - Update the board statistics counters.
1478  * @adapter: board private structure
1479  **/
1480
1481 static void ixgb_update_stats(struct ixgb_adapter *adapter)
1482 {
1483         adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1484         adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1485         adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1486         adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1487         adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1488         adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1489         adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1490         adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1491         adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1492         adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1493         adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1494         adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1495         adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1496         adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1497         adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1498         adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1499         adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1500         adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1501         adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1502         adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1503         adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1504         adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1505         adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1506         adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1507         adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1508         adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1509         adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1510         adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1511         adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1512         adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1513         adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1514         adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1515         adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1516         adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1517         adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1518         adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1519         adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1520         adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1521         adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1522         adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1523         adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1524         adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1525         adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1526         adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1527         adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1528         adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1529         adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1530         adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1531         adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1532         adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1533         adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1534         adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1535         adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1536         adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1537         adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1538         adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1539         adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1540         adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1541         adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1542         adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1543
1544         /* Fill out the OS statistics structure */
1545
1546         adapter->net_stats.rx_packets = adapter->stats.gprcl;
1547         adapter->net_stats.tx_packets = adapter->stats.gptcl;
1548         adapter->net_stats.rx_bytes = adapter->stats.gorcl;
1549         adapter->net_stats.tx_bytes = adapter->stats.gotcl;
1550         adapter->net_stats.multicast = adapter->stats.mprcl;
1551         adapter->net_stats.collisions = 0;
1552
1553         /* ignore RLEC as it reports errors for padded (<64bytes) frames
1554          * with a length in the type/len field */
1555         adapter->net_stats.rx_errors =
1556             /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1557             adapter->stats.ruc +
1558             adapter->stats.roc /*+ adapter->stats.rlec */  +
1559             adapter->stats.icbc +
1560             adapter->stats.ecbc + adapter->stats.mpc;
1561
1562         adapter->net_stats.rx_dropped = adapter->stats.mpc;
1563
1564         /* see above
1565          * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1566          */
1567
1568         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
1569         adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
1570         adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
1571         adapter->net_stats.rx_over_errors = adapter->stats.mpc;
1572
1573         adapter->net_stats.tx_errors = 0;
1574         adapter->net_stats.rx_frame_errors = 0;
1575         adapter->net_stats.tx_aborted_errors = 0;
1576         adapter->net_stats.tx_carrier_errors = 0;
1577         adapter->net_stats.tx_fifo_errors = 0;
1578         adapter->net_stats.tx_heartbeat_errors = 0;
1579         adapter->net_stats.tx_window_errors = 0;
1580 }
1581
1582 #define IXGB_MAX_INTR 10
1583 /**
1584  * ixgb_intr - Interrupt Handler
1585  * @irq: interrupt number
1586  * @data: pointer to a network interface device structure
1587  * @pt_regs: CPU registers structure
1588  **/
1589
1590 static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs)
1591 {
1592         struct net_device *netdev = data;
1593         struct ixgb_adapter *adapter = netdev->priv;
1594         struct ixgb_hw *hw = &adapter->hw;
1595         uint32_t icr = IXGB_READ_REG(&adapter->hw, ICR);
1596 #ifndef CONFIG_IXGB_NAPI
1597         unsigned int i;
1598 #endif
1599
1600         if (unlikely(!icr))
1601                 return IRQ_NONE;        /* Not our interrupt */
1602
1603         if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
1604                 mod_timer(&adapter->watchdog_timer, jiffies);
1605         }
1606 #ifdef CONFIG_IXGB_NAPI
1607         if (netif_rx_schedule_prep(netdev)) {
1608
1609                 /* Disable interrupts and register for poll. The flush 
1610                    of the posted write is intentionally left out.
1611                  */
1612
1613                 atomic_inc(&adapter->irq_sem);
1614                 IXGB_WRITE_REG(hw, IMC, ~0);
1615                 __netif_rx_schedule(netdev);
1616         }
1617 #else
1618         /* yes, that is actually a & and it is meant to make sure that
1619          * every pass through this for loop checks both receive and
1620          * transmit queues for completed descriptors, intended to
1621          * avoid starvation issues and assist tx/rx fairness. */
1622         for(i = 0; i < IXGB_MAX_INTR; i++)
1623                 if(!ixgb_clean_rx_irq(adapter) &
1624                    !ixgb_clean_tx_irq(adapter))
1625                         break;
1626         /* if RAIDC:EN == 1 and ICR:RXDMT0 == 1, we need to
1627          * set IMS:RXDMT0 to 1 to restart the RBD timer (POLL)
1628          */
1629         if ((icr & IXGB_INT_RXDMT0) && adapter->raidc) {
1630                 /* ready the timer by writing the clear reg */
1631                 IXGB_WRITE_REG(hw, IMC, IXGB_INT_RXDMT0);
1632                 /* now restart it, h/w will decide if its necessary */
1633                 IXGB_WRITE_REG(hw, IMS, IXGB_INT_RXDMT0);
1634         }
1635 #endif
1636         return IRQ_HANDLED;
1637 }
1638
1639 #ifdef CONFIG_IXGB_NAPI
1640 /**
1641  * ixgb_clean - NAPI Rx polling callback
1642  * @adapter: board private structure
1643  **/
1644
1645 static int ixgb_clean(struct net_device *netdev, int *budget)
1646 {
1647         struct ixgb_adapter *adapter = netdev->priv;
1648         int work_to_do = min(*budget, netdev->quota);
1649         int work_done = 0;
1650
1651         ixgb_clean_tx_irq(adapter);
1652         ixgb_clean_rx_irq(adapter, &work_done, work_to_do);
1653
1654         *budget -= work_done;
1655         netdev->quota -= work_done;
1656
1657         if (work_done < work_to_do || !netif_running(netdev)) {
1658                 netif_rx_complete(netdev);
1659                 /* RAIDC will be automatically restarted by irq_enable */
1660                 ixgb_irq_enable(adapter);
1661         }
1662
1663         return (work_done >= work_to_do);
1664 }
1665 #endif
1666
1667 /**
1668  * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1669  * @adapter: board private structure
1670  **/
1671
1672 static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1673 {
1674         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1675         struct net_device *netdev = adapter->netdev;
1676         struct pci_dev *pdev = adapter->pdev;
1677         struct ixgb_tx_desc *tx_desc, *eop_desc;
1678         struct ixgb_buffer *buffer_info;
1679         unsigned int i, eop;
1680         boolean_t cleaned = FALSE;
1681
1682         i = tx_ring->next_to_clean;
1683         eop = tx_ring->buffer_info[i].next_to_watch;
1684         eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1685
1686         while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1687
1688                 for (cleaned = FALSE; !cleaned;) {
1689                         tx_desc = IXGB_TX_DESC(*tx_ring, i);
1690                         buffer_info = &tx_ring->buffer_info[i];
1691
1692                         if (tx_desc->popts
1693                             & (IXGB_TX_DESC_POPTS_TXSM |
1694                                IXGB_TX_DESC_POPTS_IXSM))
1695                                 adapter->hw_csum_tx_good++;
1696
1697                         if (buffer_info->dma) {
1698
1699                                 pci_unmap_page(pdev,
1700                                                buffer_info->dma,
1701                                                buffer_info->length,
1702                                                PCI_DMA_TODEVICE);
1703
1704                                 buffer_info->dma = 0;
1705                         }
1706
1707                         if (buffer_info->skb) {
1708
1709                                 dev_kfree_skb_any(buffer_info->skb);
1710
1711                                 buffer_info->skb = NULL;
1712                         }
1713
1714                         *(uint32_t *) & (tx_desc->status) = 0;
1715
1716                         cleaned = (i == eop);
1717                         if (++i == tx_ring->count)
1718                                 i = 0;
1719                 }
1720
1721                 eop = tx_ring->buffer_info[i].next_to_watch;
1722                 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1723         }
1724
1725         tx_ring->next_to_clean = i;
1726
1727         spin_lock(&adapter->tx_lock);
1728         if (cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev)
1729             && (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
1730
1731                 netif_wake_queue(netdev);
1732         }
1733         spin_unlock(&adapter->tx_lock);
1734
1735         return cleaned;
1736 }
1737
1738 /**
1739  * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1740  * @adapter: board private structure
1741  * @rx_desc: receive descriptor
1742  * @sk_buff: socket buffer with received data
1743  **/
1744
1745 static inline void
1746 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1747                  struct ixgb_rx_desc *rx_desc, struct sk_buff *skb)
1748 {
1749         /* Ignore Checksum bit is set OR
1750          * TCP Checksum has not been calculated
1751          */
1752         if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1753             (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1754                 skb->ip_summed = CHECKSUM_NONE;
1755                 return;
1756         }
1757
1758         /* At this point we know the hardware did the TCP checksum */
1759         /* now look at the TCP checksum error bit */
1760         if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1761                 /* let the stack verify checksum errors */
1762                 skb->ip_summed = CHECKSUM_NONE;
1763                 adapter->hw_csum_rx_error++;
1764         } else {
1765                 /* TCP checksum is good */
1766                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1767                 adapter->hw_csum_rx_good++;
1768         }
1769 }
1770
1771 /**
1772  * ixgb_clean_rx_irq - Send received data up the network stack,
1773  * @adapter: board private structure
1774  **/
1775
1776 static boolean_t
1777 #ifdef CONFIG_IXGB_NAPI
1778 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1779 #else
1780 ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1781 #endif
1782 {
1783         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1784         struct net_device *netdev = adapter->netdev;
1785         struct pci_dev *pdev = adapter->pdev;
1786         struct ixgb_rx_desc *rx_desc, *next_rxd;
1787         struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1788         struct sk_buff *skb, *next_skb;
1789         uint32_t length;
1790         unsigned int i, j;
1791         boolean_t cleaned = FALSE;
1792
1793         i = rx_ring->next_to_clean;
1794         rx_desc = IXGB_RX_DESC(*rx_ring, i);
1795         buffer_info = &rx_ring->buffer_info[i];
1796
1797         while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1798
1799                 skb = buffer_info->skb;
1800                 prefetch(skb->data);
1801
1802                 if (++i == rx_ring->count)
1803                         i = 0;
1804                 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1805                 prefetch(next_rxd);
1806
1807                 if ((j = i + 1) == rx_ring->count)
1808                         j = 0;
1809                 next2_buffer = &rx_ring->buffer_info[j];
1810                 prefetch(next2_buffer);
1811
1812                 next_buffer = &rx_ring->buffer_info[i];
1813                 next_skb = next_buffer->skb;
1814                 prefetch(next_skb);
1815
1816 #ifdef CONFIG_IXGB_NAPI
1817                 if (*work_done >= work_to_do)
1818                         break;
1819
1820                 (*work_done)++;
1821 #endif
1822
1823                 cleaned = TRUE;
1824
1825                 pci_unmap_single(pdev,
1826                                  buffer_info->dma,
1827                                  buffer_info->length, PCI_DMA_FROMDEVICE);
1828
1829                 length = le16_to_cpu(rx_desc->length);
1830
1831                 if (unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) {
1832
1833                         /* All receives must fit into a single buffer */
1834
1835                         IXGB_DBG("Receive packet consumed multiple buffers "
1836                                  "length<%x>\n", length);
1837
1838                         dev_kfree_skb_irq(skb);
1839                         rx_desc->status = 0;
1840                         buffer_info->skb = NULL;
1841
1842                         rx_desc = next_rxd;
1843                         buffer_info = next_buffer;
1844                         continue;
1845                 }
1846
1847                 if (unlikely(rx_desc->errors
1848                              & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
1849                                 | IXGB_RX_DESC_ERRORS_P |
1850                                 IXGB_RX_DESC_ERRORS_RXE))) {
1851
1852                         dev_kfree_skb_irq(skb);
1853                         rx_desc->status = 0;
1854                         buffer_info->skb = NULL;
1855
1856                         rx_desc = next_rxd;
1857                         buffer_info = next_buffer;
1858                         continue;
1859                 }
1860
1861                 /* Good Receive */
1862                 skb_put(skb, length);
1863
1864                 /* Receive Checksum Offload */
1865                 ixgb_rx_checksum(adapter, rx_desc, skb);
1866
1867                 skb->protocol = eth_type_trans(skb, netdev);
1868 #ifdef CONFIG_IXGB_NAPI
1869                 if (adapter->vlgrp
1870                     && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
1871                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
1872                                                  le16_to_cpu(rx_desc->
1873                                                              special &
1874                                                              IXGB_RX_DESC_SPECIAL_VLAN_MASK));
1875                 } else {
1876                         netif_receive_skb(skb);
1877                 }
1878 #else                           /* CONFIG_IXGB_NAPI */
1879                 if (adapter->vlgrp
1880                     && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
1881                         vlan_hwaccel_rx(skb, adapter->vlgrp,
1882                                         le16_to_cpu(rx_desc->
1883                                                     special &
1884                                                     IXGB_RX_DESC_SPECIAL_VLAN_MASK));
1885                 } else {
1886                         netif_rx(skb);
1887                 }
1888 #endif                          /* CONFIG_IXGB_NAPI */
1889                 netdev->last_rx = jiffies;
1890
1891                 rx_desc->status = 0;
1892                 buffer_info->skb = NULL;
1893
1894                 rx_desc = next_rxd;
1895                 buffer_info = next_buffer;
1896         }
1897
1898         rx_ring->next_to_clean = i;
1899
1900         ixgb_alloc_rx_buffers(adapter);
1901
1902         return cleaned;
1903 }
1904
1905 /**
1906  * ixgb_alloc_rx_buffers - Replace used receive buffers
1907  * @adapter: address of board private structure
1908  **/
1909
1910 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
1911 {
1912         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1913         struct net_device *netdev = adapter->netdev;
1914         struct pci_dev *pdev = adapter->pdev;
1915         struct ixgb_rx_desc *rx_desc;
1916         struct ixgb_buffer *buffer_info;
1917         struct sk_buff *skb;
1918         unsigned int i;
1919         int num_group_tail_writes;
1920         long cleancount;
1921
1922         i = rx_ring->next_to_use;
1923         buffer_info = &rx_ring->buffer_info[i];
1924         cleancount = IXGB_DESC_UNUSED(rx_ring);
1925
1926         /* lessen this to 4 if we're
1927          * in the midst of raidc and rbd is occuring
1928          * because we don't want to delay returning buffers when low
1929          */
1930         num_group_tail_writes = adapter->raidc ? 4 : IXGB_RX_BUFFER_WRITE;
1931
1932         /* leave one descriptor unused */
1933         while (--cleancount > 0) {
1934                 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1935
1936                 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1937
1938                 if (unlikely(!skb)) {
1939                         /* Better luck next round */
1940                         break;
1941                 }
1942
1943                 /* Make buffer alignment 2 beyond a 16 byte boundary
1944                  * this will result in a 16 byte aligned IP header after
1945                  * the 14 byte MAC header is removed
1946                  */
1947                 skb_reserve(skb, NET_IP_ALIGN);
1948
1949                 skb->dev = netdev;
1950
1951                 buffer_info->skb = skb;
1952                 buffer_info->length = adapter->rx_buffer_len;
1953                 buffer_info->dma =
1954                     pci_map_single(pdev,
1955                                    skb->data,
1956                                    adapter->rx_buffer_len, PCI_DMA_FROMDEVICE);
1957
1958                 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1959
1960                 if ((i & ~(num_group_tail_writes - 1)) == i) {
1961                         /* Force memory writes to complete before letting h/w
1962                          * know there are new descriptors to fetch.  (Only
1963                          * applicable for weak-ordered memory model archs,
1964                          * such as IA-64). */
1965                         wmb();
1966
1967                         IXGB_WRITE_REG(&adapter->hw, RDT, i);
1968                 }
1969
1970                 if (++i == rx_ring->count)
1971                         i = 0;
1972                 buffer_info = &rx_ring->buffer_info[i];
1973         }
1974
1975         rx_ring->next_to_use = i;
1976 }
1977
1978 /**
1979  * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
1980  * 
1981  * @param netdev network interface device structure
1982  * @param grp indicates to enable or disable tagging/stripping
1983  **/
1984 static void
1985 ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1986 {
1987         struct ixgb_adapter *adapter = netdev->priv;
1988         uint32_t ctrl, rctl;
1989
1990         ixgb_irq_disable(adapter);
1991         adapter->vlgrp = grp;
1992
1993         if (grp) {
1994                 /* enable VLAN tag insert/strip */
1995                 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
1996                 ctrl |= IXGB_CTRL0_VME;
1997                 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
1998
1999                 /* enable VLAN receive filtering */
2000
2001                 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2002                 rctl |= IXGB_RCTL_VFE;
2003                 rctl &= ~IXGB_RCTL_CFIEN;
2004                 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2005         } else {
2006                 /* disable VLAN tag insert/strip */
2007
2008                 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2009                 ctrl &= ~IXGB_CTRL0_VME;
2010                 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2011
2012                 /* disable VLAN filtering */
2013
2014                 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2015                 rctl &= ~IXGB_RCTL_VFE;
2016                 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2017         }
2018
2019         ixgb_irq_enable(adapter);
2020 }
2021
2022 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2023 {
2024         struct ixgb_adapter *adapter = netdev->priv;
2025         uint32_t vfta, index;
2026
2027         /* add VID to filter table */
2028
2029         index = (vid >> 5) & 0x7F;
2030         vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2031         vfta |= (1 << (vid & 0x1F));
2032         ixgb_write_vfta(&adapter->hw, index, vfta);
2033 }
2034
2035 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2036 {
2037         struct ixgb_adapter *adapter = netdev->priv;
2038         uint32_t vfta, index;
2039
2040         ixgb_irq_disable(adapter);
2041
2042         if (adapter->vlgrp)
2043                 adapter->vlgrp->vlan_devices[vid] = NULL;
2044
2045         ixgb_irq_enable(adapter);
2046
2047         /* remove VID from filter table */
2048
2049         index = (vid >> 5) & 0x7F;
2050         vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2051         vfta &= ~(1 << (vid & 0x1F));
2052         ixgb_write_vfta(&adapter->hw, index, vfta);
2053 }
2054
2055 static void ixgb_restore_vlan(struct ixgb_adapter *adapter)
2056 {
2057         ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2058
2059         if (adapter->vlgrp) {
2060                 uint16_t vid;
2061                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2062                         if (!adapter->vlgrp->vlan_devices[vid])
2063                                 continue;
2064                         ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2065                 }
2066         }
2067 }
2068
2069 /**
2070  * ixgb_notify_reboot - handles OS notification of reboot event.
2071  * @param nb notifier block, unused
2072  * @param event Event being passed to driver to act upon
2073  * @param p A pointer to our net device
2074  **/
2075 static int
2076 ixgb_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2077 {
2078         struct pci_dev *pdev = NULL;
2079
2080         switch (event) {
2081         case SYS_DOWN:
2082         case SYS_HALT:
2083         case SYS_POWER_OFF:
2084                 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
2085                         if (pci_dev_driver(pdev) == &ixgb_driver)
2086                                 ixgb_suspend(pdev, 3);
2087                 }
2088         }
2089         return NOTIFY_DONE;
2090 }
2091
2092 /**
2093  * ixgb_suspend - driver suspend function called from notify.
2094  * @param pdev pci driver structure used for passing to
2095  * @param state power state to enter 
2096  **/
2097 static int ixgb_suspend(struct pci_dev *pdev, uint32_t state)
2098 {
2099         struct net_device *netdev = pci_get_drvdata(pdev);
2100         struct ixgb_adapter *adapter = netdev->priv;
2101
2102         netif_device_detach(netdev);
2103
2104         if (netif_running(netdev))
2105                 ixgb_down(adapter, TRUE);
2106
2107         pci_save_state(pdev);
2108
2109         state = (state > 0) ? 3 : 0;
2110         pci_set_power_state(pdev, state);
2111         msec_delay(200);
2112
2113         return 0;
2114 }
2115
2116 #ifdef CONFIG_NET_POLL_CONTROLLER
2117 /*
2118  * Polling 'interrupt' - used by things like netconsole to send skbs
2119  * without having to re-enable interrupts. It's not called while
2120  * the interrupt routine is executing.
2121  */
2122
2123 static void ixgb_netpoll(struct net_device *dev)
2124 {
2125         struct ixgb_adapter *adapter = dev->priv;
2126         disable_irq(adapter->pdev->irq);
2127         ixgb_intr(adapter->pdev->irq, dev, NULL);
2128         enable_irq(adapter->pdev->irq);
2129 }
2130 #endif
2131
2132 /* ixgb_main.c */