1 /*******************************************************************************
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
33 * - include fix to the condition that determines when to quit NAPI - Robert Olsson
34 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
36 * - reset buffer_info->dma in Tx resource cleanup logic
38 * - sparse cleanup - shemminger@osdl.org
39 * - fix tx resource cleanup logic
42 char ixgb_driver_name[] = "ixgb";
43 char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
45 #ifndef CONFIG_IXGB_NAPI
48 #define DRIVERNAPI "-NAPI"
50 #define DRV_VERSION "1.0.95-k2"DRIVERNAPI
51 char ixgb_driver_version[] = DRV_VERSION;
52 char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
54 /* ixgb_pci_tbl - PCI Device ID Table
56 * Wildcard entries (PCI_ANY_ID) should come last
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
60 * Class, Class Mask, private data (not used) }
62 static struct pci_device_id ixgb_pci_tbl[] = {
63 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
64 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
65 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
66 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
67 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
68 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
70 /* required last entry */
74 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
76 /* Local Function Prototypes */
78 int ixgb_up(struct ixgb_adapter *adapter);
79 void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
80 void ixgb_reset(struct ixgb_adapter *adapter);
81 int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
82 int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
83 void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
84 void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
85 void ixgb_update_stats(struct ixgb_adapter *adapter);
87 static int ixgb_init_module(void);
88 static void ixgb_exit_module(void);
89 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
90 static void __devexit ixgb_remove(struct pci_dev *pdev);
91 static int ixgb_sw_init(struct ixgb_adapter *adapter);
92 static int ixgb_open(struct net_device *netdev);
93 static int ixgb_close(struct net_device *netdev);
94 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
95 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
96 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
97 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
98 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
99 static void ixgb_set_multi(struct net_device *netdev);
100 static void ixgb_watchdog(unsigned long data);
101 static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
102 static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
103 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
104 static int ixgb_set_mac(struct net_device *netdev, void *p);
105 static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
106 static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
108 #ifdef CONFIG_IXGB_NAPI
109 static int ixgb_clean(struct net_device *netdev, int *budget);
110 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
111 int *work_done, int work_to_do);
113 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
115 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
116 void ixgb_set_ethtool_ops(struct net_device *netdev);
117 static void ixgb_tx_timeout(struct net_device *dev);
118 static void ixgb_tx_timeout_task(struct net_device *dev);
119 static void ixgb_vlan_rx_register(struct net_device *netdev,
120 struct vlan_group *grp);
121 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
122 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
123 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
125 #ifdef CONFIG_NET_POLL_CONTROLLER
126 /* for netdump / net console */
127 static void ixgb_netpoll(struct net_device *dev);
130 /* Exported from other modules */
132 extern void ixgb_check_options(struct ixgb_adapter *adapter);
134 static struct pci_driver ixgb_driver = {
135 .name = ixgb_driver_name,
136 .id_table = ixgb_pci_tbl,
138 .remove = __devexit_p(ixgb_remove),
141 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
142 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
143 MODULE_LICENSE("GPL");
144 MODULE_VERSION(DRV_VERSION);
146 /* some defines for controlling descriptor fetches in h/w */
147 #define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
148 #define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
149 pushed this many descriptors from head */
150 #define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
153 * ixgb_init_module - Driver Registration Routine
155 * ixgb_init_module is the first routine called when the driver is
156 * loaded. All it does is register with the PCI subsystem.
160 ixgb_init_module(void)
162 printk(KERN_INFO "%s - version %s\n",
163 ixgb_driver_string, ixgb_driver_version);
165 printk(KERN_INFO "%s\n", ixgb_copyright);
167 return pci_module_init(&ixgb_driver);
170 module_init(ixgb_init_module);
173 * ixgb_exit_module - Driver Exit Cleanup Routine
175 * ixgb_exit_module is called just before the driver is removed
180 ixgb_exit_module(void)
182 pci_unregister_driver(&ixgb_driver);
185 module_exit(ixgb_exit_module);
188 * ixgb_irq_disable - Mask off interrupt generation on the NIC
189 * @adapter: board private structure
193 ixgb_irq_disable(struct ixgb_adapter *adapter)
195 atomic_inc(&adapter->irq_sem);
196 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
197 IXGB_WRITE_FLUSH(&adapter->hw);
198 synchronize_irq(adapter->pdev->irq);
202 * ixgb_irq_enable - Enable default interrupt generation settings
203 * @adapter: board private structure
207 ixgb_irq_enable(struct ixgb_adapter *adapter)
209 if(atomic_dec_and_test(&adapter->irq_sem)) {
210 IXGB_WRITE_REG(&adapter->hw, IMS,
211 IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
213 IXGB_WRITE_FLUSH(&adapter->hw);
218 ixgb_up(struct ixgb_adapter *adapter)
220 struct net_device *netdev = adapter->netdev;
222 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
223 struct ixgb_hw *hw = &adapter->hw;
225 /* hardware has been reset, we need to reload some things */
227 ixgb_set_multi(netdev);
229 ixgb_restore_vlan(adapter);
231 ixgb_configure_tx(adapter);
232 ixgb_setup_rctl(adapter);
233 ixgb_configure_rx(adapter);
234 ixgb_alloc_rx_buffers(adapter);
236 #ifdef CONFIG_PCI_MSI
238 boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
239 IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE;
240 adapter->have_msi = TRUE;
243 adapter->have_msi = FALSE;
244 else if((err = pci_enable_msi(adapter->pdev))) {
246 "Unable to allocate MSI interrupt Error: %d\n", err);
247 adapter->have_msi = FALSE;
248 /* proceed to try to request regular interrupt */
253 if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
254 SA_SHIRQ | SA_SAMPLE_RANDOM,
255 netdev->name, netdev)))
258 /* disable interrupts and get the hardware into a known state */
259 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
261 if((hw->max_frame_size != max_frame) ||
262 (hw->max_frame_size !=
263 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
265 hw->max_frame_size = max_frame;
267 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
269 if(hw->max_frame_size >
270 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
271 uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
273 if(!(ctrl0 & IXGB_CTRL0_JFE)) {
274 ctrl0 |= IXGB_CTRL0_JFE;
275 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
280 mod_timer(&adapter->watchdog_timer, jiffies);
281 ixgb_irq_enable(adapter);
283 #ifdef CONFIG_IXGB_NAPI
284 netif_poll_enable(netdev);
290 ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
292 struct net_device *netdev = adapter->netdev;
294 ixgb_irq_disable(adapter);
295 free_irq(adapter->pdev->irq, netdev);
296 #ifdef CONFIG_PCI_MSI
297 if(adapter->have_msi == TRUE)
298 pci_disable_msi(adapter->pdev);
302 del_timer_sync(&adapter->watchdog_timer);
303 #ifdef CONFIG_IXGB_NAPI
304 netif_poll_disable(netdev);
306 adapter->link_speed = 0;
307 adapter->link_duplex = 0;
308 netif_carrier_off(netdev);
309 netif_stop_queue(netdev);
312 ixgb_clean_tx_ring(adapter);
313 ixgb_clean_rx_ring(adapter);
317 ixgb_reset(struct ixgb_adapter *adapter)
320 ixgb_adapter_stop(&adapter->hw);
321 if(!ixgb_init_hw(&adapter->hw))
322 IXGB_DBG("ixgb_init_hw failed.\n");
326 * ixgb_probe - Device Initialization Routine
327 * @pdev: PCI device information struct
328 * @ent: entry in ixgb_pci_tbl
330 * Returns 0 on success, negative on failure
332 * ixgb_probe initializes an adapter identified by a pci_dev structure.
333 * The OS initialization, configuring of the adapter private structure,
334 * and a hardware reset occur.
338 ixgb_probe(struct pci_dev *pdev,
339 const struct pci_device_id *ent)
341 struct net_device *netdev = NULL;
342 struct ixgb_adapter *adapter;
343 static int cards_found = 0;
344 unsigned long mmio_start;
350 if((err = pci_enable_device(pdev)))
353 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
356 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
357 IXGB_ERR("No usable DMA configuration, aborting\n");
363 if((err = pci_request_regions(pdev, ixgb_driver_name)))
366 pci_set_master(pdev);
368 netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
371 goto err_alloc_etherdev;
374 SET_MODULE_OWNER(netdev);
375 SET_NETDEV_DEV(netdev, &pdev->dev);
377 pci_set_drvdata(pdev, netdev);
378 adapter = netdev->priv;
379 adapter->netdev = netdev;
380 adapter->pdev = pdev;
381 adapter->hw.back = adapter;
383 mmio_start = pci_resource_start(pdev, BAR_0);
384 mmio_len = pci_resource_len(pdev, BAR_0);
386 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
387 if(!adapter->hw.hw_addr) {
392 for(i = BAR_1; i <= BAR_5; i++) {
393 if(pci_resource_len(pdev, i) == 0)
395 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
396 adapter->hw.io_base = pci_resource_start(pdev, i);
401 netdev->open = &ixgb_open;
402 netdev->stop = &ixgb_close;
403 netdev->hard_start_xmit = &ixgb_xmit_frame;
404 netdev->get_stats = &ixgb_get_stats;
405 netdev->set_multicast_list = &ixgb_set_multi;
406 netdev->set_mac_address = &ixgb_set_mac;
407 netdev->change_mtu = &ixgb_change_mtu;
408 ixgb_set_ethtool_ops(netdev);
409 netdev->tx_timeout = &ixgb_tx_timeout;
410 netdev->watchdog_timeo = HZ;
411 #ifdef CONFIG_IXGB_NAPI
412 netdev->poll = &ixgb_clean;
415 netdev->vlan_rx_register = ixgb_vlan_rx_register;
416 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
417 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
418 #ifdef CONFIG_NET_POLL_CONTROLLER
419 netdev->poll_controller = ixgb_netpoll;
422 netdev->mem_start = mmio_start;
423 netdev->mem_end = mmio_start + mmio_len;
424 netdev->base_addr = adapter->hw.io_base;
426 adapter->bd_number = cards_found;
427 adapter->link_speed = 0;
428 adapter->link_duplex = 0;
430 /* setup the private structure */
432 if((err = ixgb_sw_init(adapter)))
435 netdev->features = NETIF_F_SG |
439 NETIF_F_HW_VLAN_FILTER;
441 netdev->features |= NETIF_F_TSO;
445 netdev->features |= NETIF_F_HIGHDMA;
447 /* make sure the EEPROM is good */
449 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
450 printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
455 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
457 if(!is_valid_ether_addr(netdev->dev_addr)) {
462 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
464 init_timer(&adapter->watchdog_timer);
465 adapter->watchdog_timer.function = &ixgb_watchdog;
466 adapter->watchdog_timer.data = (unsigned long)adapter;
468 INIT_WORK(&adapter->tx_timeout_task,
469 (void (*)(void *))ixgb_tx_timeout_task, netdev);
471 if((err = register_netdev(netdev)))
474 /* we're going to reset, so assume we have no link for now */
476 netif_carrier_off(netdev);
477 netif_stop_queue(netdev);
479 printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n",
481 ixgb_check_options(adapter);
482 /* reset the hardware with the new settings */
492 iounmap(adapter->hw.hw_addr);
496 pci_release_regions(pdev);
501 * ixgb_remove - Device Removal Routine
502 * @pdev: PCI device information struct
504 * ixgb_remove is called by the PCI subsystem to alert the driver
505 * that it should release a PCI device. The could be caused by a
506 * Hot-Plug event, or because the driver is going to be removed from
510 static void __devexit
511 ixgb_remove(struct pci_dev *pdev)
513 struct net_device *netdev = pci_get_drvdata(pdev);
514 struct ixgb_adapter *adapter = netdev->priv;
516 unregister_netdev(netdev);
518 iounmap(adapter->hw.hw_addr);
519 pci_release_regions(pdev);
525 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
526 * @adapter: board private structure to initialize
528 * ixgb_sw_init initializes the Adapter private data structure.
529 * Fields are initialized based on PCI device information and
530 * OS network device settings (MTU size).
534 ixgb_sw_init(struct ixgb_adapter *adapter)
536 struct ixgb_hw *hw = &adapter->hw;
537 struct net_device *netdev = adapter->netdev;
538 struct pci_dev *pdev = adapter->pdev;
540 /* PCI config space info */
542 hw->vendor_id = pdev->vendor;
543 hw->device_id = pdev->device;
544 hw->subsystem_vendor_id = pdev->subsystem_vendor;
545 hw->subsystem_id = pdev->subsystem_device;
547 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
549 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
551 if((hw->device_id == IXGB_DEVICE_ID_82597EX)
552 ||(hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
553 ||(hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
554 hw->mac_type = ixgb_82597;
556 /* should never have loaded on this device */
557 printk(KERN_ERR "ixgb: unsupported device id\n");
560 /* enable flow control to be programmed */
563 atomic_set(&adapter->irq_sem, 1);
564 spin_lock_init(&adapter->tx_lock);
570 * ixgb_open - Called when a network interface is made active
571 * @netdev: network interface device structure
573 * Returns 0 on success, negative value on failure
575 * The open entry point is called when a network interface is made
576 * active by the system (IFF_UP). At this point all resources needed
577 * for transmit and receive operations are allocated, the interrupt
578 * handler is registered with the OS, the watchdog timer is started,
579 * and the stack is notified that the interface is ready.
583 ixgb_open(struct net_device *netdev)
585 struct ixgb_adapter *adapter = netdev->priv;
588 /* allocate transmit descriptors */
590 if((err = ixgb_setup_tx_resources(adapter)))
593 /* allocate receive descriptors */
595 if((err = ixgb_setup_rx_resources(adapter)))
598 if((err = ixgb_up(adapter)))
604 ixgb_free_rx_resources(adapter);
606 ixgb_free_tx_resources(adapter);
614 * ixgb_close - Disables a network interface
615 * @netdev: network interface device structure
617 * Returns 0, this is not allowed to fail
619 * The close entry point is called when an interface is de-activated
620 * by the OS. The hardware is still under the drivers control, but
621 * needs to be disabled. A global MAC reset is issued to stop the
622 * hardware, and all transmit and receive resources are freed.
626 ixgb_close(struct net_device *netdev)
628 struct ixgb_adapter *adapter = netdev->priv;
630 ixgb_down(adapter, TRUE);
632 ixgb_free_tx_resources(adapter);
633 ixgb_free_rx_resources(adapter);
639 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
640 * @adapter: board private structure
642 * Return 0 on success, negative on failure
646 ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
648 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
649 struct pci_dev *pdev = adapter->pdev;
652 size = sizeof(struct ixgb_buffer) * txdr->count;
653 txdr->buffer_info = vmalloc(size);
654 if(!txdr->buffer_info) {
657 memset(txdr->buffer_info, 0, size);
659 /* round up to nearest 4K */
661 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
662 IXGB_ROUNDUP(txdr->size, 4096);
664 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
666 vfree(txdr->buffer_info);
669 memset(txdr->desc, 0, txdr->size);
671 txdr->next_to_use = 0;
672 txdr->next_to_clean = 0;
678 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
679 * @adapter: board private structure
681 * Configure the Tx unit of the MAC after a reset.
685 ixgb_configure_tx(struct ixgb_adapter *adapter)
687 uint64_t tdba = adapter->tx_ring.dma;
688 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
690 struct ixgb_hw *hw = &adapter->hw;
692 /* Setup the Base and Length of the Tx Descriptor Ring
693 * tx_ring.dma can be either a 32 or 64 bit value
696 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
697 IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
699 IXGB_WRITE_REG(hw, TDLEN, tdlen);
701 /* Setup the HW Tx Head and Tail descriptor pointers */
703 IXGB_WRITE_REG(hw, TDH, 0);
704 IXGB_WRITE_REG(hw, TDT, 0);
706 /* don't set up txdctl, it induces performance problems if configured
708 /* Set the Tx Interrupt Delay register */
710 IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
712 /* Program the Transmit Control Register */
714 tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
715 IXGB_WRITE_REG(hw, TCTL, tctl);
717 /* Setup Transmit Descriptor Settings for this adapter */
718 adapter->tx_cmd_type =
720 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
724 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
725 * @adapter: board private structure
727 * Returns 0 on success, negative on failure
731 ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
733 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
734 struct pci_dev *pdev = adapter->pdev;
737 size = sizeof(struct ixgb_buffer) * rxdr->count;
738 rxdr->buffer_info = vmalloc(size);
739 if(!rxdr->buffer_info) {
742 memset(rxdr->buffer_info, 0, size);
744 /* Round up to nearest 4K */
746 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
747 IXGB_ROUNDUP(rxdr->size, 4096);
749 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
752 vfree(rxdr->buffer_info);
755 memset(rxdr->desc, 0, rxdr->size);
757 rxdr->next_to_clean = 0;
758 rxdr->next_to_use = 0;
764 * ixgb_setup_rctl - configure the receive control register
765 * @adapter: Board private structure
769 ixgb_setup_rctl(struct ixgb_adapter *adapter)
773 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
775 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
778 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
779 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
780 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
782 rctl |= IXGB_RCTL_SECRC;
784 switch (adapter->rx_buffer_len) {
785 case IXGB_RXBUFFER_2048:
787 rctl |= IXGB_RCTL_BSIZE_2048;
789 case IXGB_RXBUFFER_4096:
790 rctl |= IXGB_RCTL_BSIZE_4096;
792 case IXGB_RXBUFFER_8192:
793 rctl |= IXGB_RCTL_BSIZE_8192;
795 case IXGB_RXBUFFER_16384:
796 rctl |= IXGB_RCTL_BSIZE_16384;
800 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
804 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
805 * @adapter: board private structure
807 * Configure the Rx unit of the MAC after a reset.
811 ixgb_configure_rx(struct ixgb_adapter *adapter)
813 uint64_t rdba = adapter->rx_ring.dma;
814 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
815 struct ixgb_hw *hw = &adapter->hw;
820 /* make sure receives are disabled while setting up the descriptors */
822 rctl = IXGB_READ_REG(hw, RCTL);
823 IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
825 /* set the Receive Delay Timer Register */
827 IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
829 /* Setup the Base and Length of the Rx Descriptor Ring */
831 IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
832 IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
834 IXGB_WRITE_REG(hw, RDLEN, rdlen);
836 /* Setup the HW Rx Head and Tail Descriptor Pointers */
837 IXGB_WRITE_REG(hw, RDH, 0);
838 IXGB_WRITE_REG(hw, RDT, 0);
840 /* set up pre-fetching of receive buffers so we get some before we
841 * run out (default hardware behavior is to run out before fetching
842 * more). This sets up to fetch if HTHRESH rx descriptors are avail
843 * and the descriptors in hw cache are below PTHRESH. This avoids
844 * the hardware behavior of fetching <=512 descriptors in a single
845 * burst that pre-empts all other activity, usually causing fifo
847 /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
848 rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
849 RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
850 RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
851 IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
853 /* Enable Receive Checksum Offload for TCP and UDP */
854 if(adapter->rx_csum == TRUE) {
855 rxcsum = IXGB_READ_REG(hw, RXCSUM);
856 rxcsum |= IXGB_RXCSUM_TUOFL;
857 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
860 /* Enable Receives */
862 IXGB_WRITE_REG(hw, RCTL, rctl);
866 * ixgb_free_tx_resources - Free Tx Resources
867 * @adapter: board private structure
869 * Free all transmit software resources
873 ixgb_free_tx_resources(struct ixgb_adapter *adapter)
875 struct pci_dev *pdev = adapter->pdev;
877 ixgb_clean_tx_ring(adapter);
879 vfree(adapter->tx_ring.buffer_info);
880 adapter->tx_ring.buffer_info = NULL;
882 pci_free_consistent(pdev, adapter->tx_ring.size,
883 adapter->tx_ring.desc, adapter->tx_ring.dma);
885 adapter->tx_ring.desc = NULL;
889 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
890 struct ixgb_buffer *buffer_info)
892 struct pci_dev *pdev = adapter->pdev;
893 if(buffer_info->dma) {
898 buffer_info->dma = 0;
900 if(buffer_info->skb) {
901 dev_kfree_skb_any(buffer_info->skb);
902 buffer_info->skb = NULL;
907 * ixgb_clean_tx_ring - Free Tx Buffers
908 * @adapter: board private structure
912 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
914 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
915 struct ixgb_buffer *buffer_info;
919 /* Free all the Tx ring sk_buffs */
921 for(i = 0; i < tx_ring->count; i++) {
922 buffer_info = &tx_ring->buffer_info[i];
923 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
926 size = sizeof(struct ixgb_buffer) * tx_ring->count;
927 memset(tx_ring->buffer_info, 0, size);
929 /* Zero out the descriptor ring */
931 memset(tx_ring->desc, 0, tx_ring->size);
933 tx_ring->next_to_use = 0;
934 tx_ring->next_to_clean = 0;
936 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
937 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
941 * ixgb_free_rx_resources - Free Rx Resources
942 * @adapter: board private structure
944 * Free all receive software resources
948 ixgb_free_rx_resources(struct ixgb_adapter *adapter)
950 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
951 struct pci_dev *pdev = adapter->pdev;
953 ixgb_clean_rx_ring(adapter);
955 vfree(rx_ring->buffer_info);
956 rx_ring->buffer_info = NULL;
958 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
960 rx_ring->desc = NULL;
964 * ixgb_clean_rx_ring - Free Rx Buffers
965 * @adapter: board private structure
969 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
971 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
972 struct ixgb_buffer *buffer_info;
973 struct pci_dev *pdev = adapter->pdev;
977 /* Free all the Rx ring sk_buffs */
979 for(i = 0; i < rx_ring->count; i++) {
980 buffer_info = &rx_ring->buffer_info[i];
981 if(buffer_info->skb) {
983 pci_unmap_single(pdev,
988 dev_kfree_skb(buffer_info->skb);
990 buffer_info->skb = NULL;
994 size = sizeof(struct ixgb_buffer) * rx_ring->count;
995 memset(rx_ring->buffer_info, 0, size);
997 /* Zero out the descriptor ring */
999 memset(rx_ring->desc, 0, rx_ring->size);
1001 rx_ring->next_to_clean = 0;
1002 rx_ring->next_to_use = 0;
1004 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1005 IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1009 * ixgb_set_mac - Change the Ethernet Address of the NIC
1010 * @netdev: network interface device structure
1011 * @p: pointer to an address structure
1013 * Returns 0 on success, negative on failure
1017 ixgb_set_mac(struct net_device *netdev, void *p)
1019 struct ixgb_adapter *adapter = netdev->priv;
1020 struct sockaddr *addr = p;
1022 if(!is_valid_ether_addr(addr->sa_data))
1023 return -EADDRNOTAVAIL;
1025 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1027 ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1033 * ixgb_set_multi - Multicast and Promiscuous mode set
1034 * @netdev: network interface device structure
1036 * The set_multi entry point is called whenever the multicast address
1037 * list or the network interface flags are updated. This routine is
1038 * responsible for configuring the hardware for proper multicast,
1039 * promiscuous mode, and all-multi behavior.
1043 ixgb_set_multi(struct net_device *netdev)
1045 struct ixgb_adapter *adapter = netdev->priv;
1046 struct ixgb_hw *hw = &adapter->hw;
1047 struct dev_mc_list *mc_ptr;
1051 /* Check for Promiscuous and All Multicast modes */
1053 rctl = IXGB_READ_REG(hw, RCTL);
1055 if(netdev->flags & IFF_PROMISC) {
1056 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1057 } else if(netdev->flags & IFF_ALLMULTI) {
1058 rctl |= IXGB_RCTL_MPE;
1059 rctl &= ~IXGB_RCTL_UPE;
1061 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1064 if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1065 rctl |= IXGB_RCTL_MPE;
1066 IXGB_WRITE_REG(hw, RCTL, rctl);
1068 uint8_t mta[netdev->mc_count * IXGB_ETH_LENGTH_OF_ADDRESS];
1070 IXGB_WRITE_REG(hw, RCTL, rctl);
1072 for(i = 0, mc_ptr = netdev->mc_list; mc_ptr;
1073 i++, mc_ptr = mc_ptr->next)
1074 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1075 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1077 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
1082 * ixgb_watchdog - Timer Call-back
1083 * @data: pointer to netdev cast into an unsigned long
1087 ixgb_watchdog(unsigned long data)
1089 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1090 struct net_device *netdev = adapter->netdev;
1091 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1093 ixgb_check_for_link(&adapter->hw);
1095 if (ixgb_check_for_bad_link(&adapter->hw)) {
1096 /* force the reset path */
1097 netif_stop_queue(netdev);
1100 if(adapter->hw.link_up) {
1101 if(!netif_carrier_ok(netdev)) {
1102 printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n",
1103 netdev->name, 10000, "Full Duplex");
1104 adapter->link_speed = 10000;
1105 adapter->link_duplex = FULL_DUPLEX;
1106 netif_carrier_on(netdev);
1107 netif_wake_queue(netdev);
1110 if(netif_carrier_ok(netdev)) {
1111 adapter->link_speed = 0;
1112 adapter->link_duplex = 0;
1114 "ixgb: %s NIC Link is Down\n",
1116 netif_carrier_off(netdev);
1117 netif_stop_queue(netdev);
1122 ixgb_update_stats(adapter);
1124 if(!netif_carrier_ok(netdev)) {
1125 if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1126 /* We've lost link, so the controller stops DMA,
1127 * but we've got queued Tx work that's never going
1128 * to get done, so reset controller to flush Tx.
1129 * (Do the reset outside of interrupt context). */
1130 schedule_work(&adapter->tx_timeout_task);
1134 /* Force detection of hung controller every watchdog period */
1135 adapter->detect_tx_hung = TRUE;
1137 /* generate an interrupt to force clean up of any stragglers */
1138 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1140 /* Reset the timer */
1141 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1144 #define IXGB_TX_FLAGS_CSUM 0x00000001
1145 #define IXGB_TX_FLAGS_VLAN 0x00000002
1146 #define IXGB_TX_FLAGS_TSO 0x00000004
1149 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1152 struct ixgb_context_desc *context_desc;
1154 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1155 uint16_t ipcse, tucse, mss;
1158 if(likely(skb_shinfo(skb)->tso_size)) {
1159 if (skb_header_cloned(skb)) {
1160 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1165 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1166 mss = skb_shinfo(skb)->tso_size;
1167 skb->nh.iph->tot_len = 0;
1168 skb->nh.iph->check = 0;
1169 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1172 ipcss = skb->nh.raw - skb->data;
1173 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1174 ipcse = skb->h.raw - skb->data - 1;
1175 tucss = skb->h.raw - skb->data;
1176 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1179 i = adapter->tx_ring.next_to_use;
1180 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1182 context_desc->ipcss = ipcss;
1183 context_desc->ipcso = ipcso;
1184 context_desc->ipcse = cpu_to_le16(ipcse);
1185 context_desc->tucss = tucss;
1186 context_desc->tucso = tucso;
1187 context_desc->tucse = cpu_to_le16(tucse);
1188 context_desc->mss = cpu_to_le16(mss);
1189 context_desc->hdr_len = hdr_len;
1190 context_desc->status = 0;
1191 context_desc->cmd_type_len = cpu_to_le32(
1192 IXGB_CONTEXT_DESC_TYPE
1193 | IXGB_CONTEXT_DESC_CMD_TSE
1194 | IXGB_CONTEXT_DESC_CMD_IP
1195 | IXGB_CONTEXT_DESC_CMD_TCP
1196 | IXGB_CONTEXT_DESC_CMD_IDE
1197 | (skb->len - (hdr_len)));
1200 if(++i == adapter->tx_ring.count) i = 0;
1201 adapter->tx_ring.next_to_use = i;
1210 static inline boolean_t
1211 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1213 struct ixgb_context_desc *context_desc;
1217 if(likely(skb->ip_summed == CHECKSUM_HW)) {
1218 css = skb->h.raw - skb->data;
1219 cso = (skb->h.raw + skb->csum) - skb->data;
1221 i = adapter->tx_ring.next_to_use;
1222 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1224 context_desc->tucss = css;
1225 context_desc->tucso = cso;
1226 context_desc->tucse = 0;
1227 /* zero out any previously existing data in one instruction */
1228 *(uint32_t *)&(context_desc->ipcss) = 0;
1229 context_desc->status = 0;
1230 context_desc->hdr_len = 0;
1231 context_desc->mss = 0;
1232 context_desc->cmd_type_len =
1233 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1234 | IXGB_TX_DESC_CMD_IDE);
1236 if(++i == adapter->tx_ring.count) i = 0;
1237 adapter->tx_ring.next_to_use = i;
1245 #define IXGB_MAX_TXD_PWR 14
1246 #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1249 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1252 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1253 struct ixgb_buffer *buffer_info;
1255 unsigned int offset = 0, size, count = 0, i;
1257 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1260 len -= skb->data_len;
1262 i = tx_ring->next_to_use;
1265 buffer_info = &tx_ring->buffer_info[i];
1266 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1267 buffer_info->length = size;
1269 pci_map_single(adapter->pdev,
1273 buffer_info->time_stamp = jiffies;
1278 if(++i == tx_ring->count) i = 0;
1281 for(f = 0; f < nr_frags; f++) {
1282 struct skb_frag_struct *frag;
1284 frag = &skb_shinfo(skb)->frags[f];
1289 buffer_info = &tx_ring->buffer_info[i];
1290 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1291 buffer_info->length = size;
1293 pci_map_page(adapter->pdev,
1295 frag->page_offset + offset,
1298 buffer_info->time_stamp = jiffies;
1303 if(++i == tx_ring->count) i = 0;
1306 i = (i == 0) ? tx_ring->count - 1 : i - 1;
1307 tx_ring->buffer_info[i].skb = skb;
1308 tx_ring->buffer_info[first].next_to_watch = i;
1314 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1316 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1317 struct ixgb_tx_desc *tx_desc = NULL;
1318 struct ixgb_buffer *buffer_info;
1319 uint32_t cmd_type_len = adapter->tx_cmd_type;
1324 if(tx_flags & IXGB_TX_FLAGS_TSO) {
1325 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1326 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1329 if(tx_flags & IXGB_TX_FLAGS_CSUM)
1330 popts |= IXGB_TX_DESC_POPTS_TXSM;
1332 if(tx_flags & IXGB_TX_FLAGS_VLAN) {
1333 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1336 i = tx_ring->next_to_use;
1339 buffer_info = &tx_ring->buffer_info[i];
1340 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1341 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1342 tx_desc->cmd_type_len =
1343 cpu_to_le32(cmd_type_len | buffer_info->length);
1344 tx_desc->status = status;
1345 tx_desc->popts = popts;
1346 tx_desc->vlan = cpu_to_le16(vlan_id);
1348 if(++i == tx_ring->count) i = 0;
1351 tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
1352 | IXGB_TX_DESC_CMD_RS );
1354 /* Force memory writes to complete before letting h/w
1355 * know there are new descriptors to fetch. (Only
1356 * applicable for weak-ordered memory model archs,
1357 * such as IA-64). */
1360 tx_ring->next_to_use = i;
1361 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1364 /* Tx Descriptors needed, worst case */
1365 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1366 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1367 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
1368 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
1371 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1373 struct ixgb_adapter *adapter = netdev->priv;
1375 unsigned int tx_flags = 0;
1376 unsigned long flags;
1381 dev_kfree_skb_any(skb);
1385 spin_lock_irqsave(&adapter->tx_lock, flags);
1386 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
1387 netif_stop_queue(netdev);
1388 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1391 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1393 if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1394 tx_flags |= IXGB_TX_FLAGS_VLAN;
1395 vlan_id = vlan_tx_tag_get(skb);
1398 first = adapter->tx_ring.next_to_use;
1400 tso = ixgb_tso(adapter, skb);
1402 dev_kfree_skb_any(skb);
1403 return NETDEV_TX_OK;
1407 tx_flags |= IXGB_TX_FLAGS_TSO;
1408 else if(ixgb_tx_csum(adapter, skb))
1409 tx_flags |= IXGB_TX_FLAGS_CSUM;
1411 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
1414 netdev->trans_start = jiffies;
1420 * ixgb_tx_timeout - Respond to a Tx Hang
1421 * @netdev: network interface device structure
1425 ixgb_tx_timeout(struct net_device *netdev)
1427 struct ixgb_adapter *adapter = netdev->priv;
1429 /* Do the reset outside of interrupt context */
1430 schedule_work(&adapter->tx_timeout_task);
1434 ixgb_tx_timeout_task(struct net_device *netdev)
1436 struct ixgb_adapter *adapter = netdev->priv;
1438 ixgb_down(adapter, TRUE);
1443 * ixgb_get_stats - Get System Network Statistics
1444 * @netdev: network interface device structure
1446 * Returns the address of the device statistics structure.
1447 * The statistics are actually updated from the timer callback.
1450 static struct net_device_stats *
1451 ixgb_get_stats(struct net_device *netdev)
1453 struct ixgb_adapter *adapter = netdev->priv;
1455 return &adapter->net_stats;
1459 * ixgb_change_mtu - Change the Maximum Transfer Unit
1460 * @netdev: network interface device structure
1461 * @new_mtu: new value for maximum frame size
1463 * Returns 0 on success, negative on failure
1467 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1469 struct ixgb_adapter *adapter = netdev->priv;
1470 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1471 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1474 if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1475 || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1476 IXGB_ERR("Invalid MTU setting\n");
1480 if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1481 || (max_frame <= IXGB_RXBUFFER_2048)) {
1482 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
1484 } else if(max_frame <= IXGB_RXBUFFER_4096) {
1485 adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
1487 } else if(max_frame <= IXGB_RXBUFFER_8192) {
1488 adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
1491 adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
1494 netdev->mtu = new_mtu;
1496 if(old_max_frame != max_frame && netif_running(netdev)) {
1498 ixgb_down(adapter, TRUE);
1506 * ixgb_update_stats - Update the board statistics counters.
1507 * @adapter: board private structure
1511 ixgb_update_stats(struct ixgb_adapter *adapter)
1513 struct net_device *netdev = adapter->netdev;
1515 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1516 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1517 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1518 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1519 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1520 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1522 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1523 /* fix up multicast stats by removing broadcasts */
1526 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1527 adapter->stats.mprch += (multi >> 32);
1528 adapter->stats.bprcl += bcast_l;
1529 adapter->stats.bprch += bcast_h;
1531 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1532 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1533 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1534 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1536 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1537 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1538 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1539 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1540 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1541 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1542 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1543 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1544 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1545 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1546 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1547 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1548 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1549 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1550 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1551 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1552 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1553 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1554 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1555 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1556 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1557 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1558 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1559 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1560 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1561 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1562 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1563 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1564 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1565 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1566 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1567 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1568 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1569 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1570 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1571 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1572 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1573 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1574 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1575 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1576 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1577 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1578 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1579 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1580 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1581 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1582 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1583 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1584 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1585 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1586 adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1587 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1588 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1589 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1590 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1591 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1593 /* Fill out the OS statistics structure */
1595 adapter->net_stats.rx_packets = adapter->stats.gprcl;
1596 adapter->net_stats.tx_packets = adapter->stats.gptcl;
1597 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
1598 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
1599 adapter->net_stats.multicast = adapter->stats.mprcl;
1600 adapter->net_stats.collisions = 0;
1602 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1603 * with a length in the type/len field */
1604 adapter->net_stats.rx_errors =
1605 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1606 adapter->stats.ruc +
1607 adapter->stats.roc /*+ adapter->stats.rlec */ +
1608 adapter->stats.icbc +
1609 adapter->stats.ecbc + adapter->stats.mpc;
1611 adapter->net_stats.rx_dropped = adapter->stats.mpc;
1614 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1617 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
1618 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
1619 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
1620 adapter->net_stats.rx_over_errors = adapter->stats.mpc;
1622 adapter->net_stats.tx_errors = 0;
1623 adapter->net_stats.rx_frame_errors = 0;
1624 adapter->net_stats.tx_aborted_errors = 0;
1625 adapter->net_stats.tx_carrier_errors = 0;
1626 adapter->net_stats.tx_fifo_errors = 0;
1627 adapter->net_stats.tx_heartbeat_errors = 0;
1628 adapter->net_stats.tx_window_errors = 0;
1631 #define IXGB_MAX_INTR 10
1633 * ixgb_intr - Interrupt Handler
1634 * @irq: interrupt number
1635 * @data: pointer to a network interface device structure
1636 * @pt_regs: CPU registers structure
1640 ixgb_intr(int irq, void *data, struct pt_regs *regs)
1642 struct net_device *netdev = data;
1643 struct ixgb_adapter *adapter = netdev->priv;
1644 struct ixgb_hw *hw = &adapter->hw;
1645 uint32_t icr = IXGB_READ_REG(hw, ICR);
1646 #ifndef CONFIG_IXGB_NAPI
1651 return IRQ_NONE; /* Not our interrupt */
1653 if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
1654 mod_timer(&adapter->watchdog_timer, jiffies);
1657 #ifdef CONFIG_IXGB_NAPI
1658 if(netif_rx_schedule_prep(netdev)) {
1660 /* Disable interrupts and register for poll. The flush
1661 of the posted write is intentionally left out.
1664 atomic_inc(&adapter->irq_sem);
1665 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1666 __netif_rx_schedule(netdev);
1669 /* yes, that is actually a & and it is meant to make sure that
1670 * every pass through this for loop checks both receive and
1671 * transmit queues for completed descriptors, intended to
1672 * avoid starvation issues and assist tx/rx fairness. */
1673 for(i = 0; i < IXGB_MAX_INTR; i++)
1674 if(!ixgb_clean_rx_irq(adapter) &
1675 !ixgb_clean_tx_irq(adapter))
1681 #ifdef CONFIG_IXGB_NAPI
1683 * ixgb_clean - NAPI Rx polling callback
1684 * @adapter: board private structure
1688 ixgb_clean(struct net_device *netdev, int *budget)
1690 struct ixgb_adapter *adapter = netdev->priv;
1691 int work_to_do = min(*budget, netdev->quota);
1695 tx_cleaned = ixgb_clean_tx_irq(adapter);
1696 ixgb_clean_rx_irq(adapter, &work_done, work_to_do);
1698 *budget -= work_done;
1699 netdev->quota -= work_done;
1701 /* if no Tx and not enough Rx work done, exit the polling mode */
1702 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1703 netif_rx_complete(netdev);
1704 ixgb_irq_enable(adapter);
1713 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1714 * @adapter: board private structure
1718 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1720 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1721 struct net_device *netdev = adapter->netdev;
1722 struct ixgb_tx_desc *tx_desc, *eop_desc;
1723 struct ixgb_buffer *buffer_info;
1724 unsigned int i, eop;
1725 boolean_t cleaned = FALSE;
1727 i = tx_ring->next_to_clean;
1728 eop = tx_ring->buffer_info[i].next_to_watch;
1729 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1731 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1733 for(cleaned = FALSE; !cleaned; ) {
1734 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1735 buffer_info = &tx_ring->buffer_info[i];
1738 & (IXGB_TX_DESC_POPTS_TXSM |
1739 IXGB_TX_DESC_POPTS_IXSM))
1740 adapter->hw_csum_tx_good++;
1742 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1744 *(uint32_t *)&(tx_desc->status) = 0;
1746 cleaned = (i == eop);
1747 if(++i == tx_ring->count) i = 0;
1750 eop = tx_ring->buffer_info[i].next_to_watch;
1751 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1754 tx_ring->next_to_clean = i;
1756 spin_lock(&adapter->tx_lock);
1757 if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
1758 (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
1760 netif_wake_queue(netdev);
1762 spin_unlock(&adapter->tx_lock);
1764 if(adapter->detect_tx_hung) {
1765 /* detect a transmit hang in hardware, this serializes the
1766 * check with the clearing of time_stamp and movement of i */
1767 adapter->detect_tx_hung = FALSE;
1768 if(tx_ring->buffer_info[i].dma &&
1769 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
1770 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1772 netif_stop_queue(netdev);
1779 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1780 * @adapter: board private structure
1781 * @rx_desc: receive descriptor
1782 * @sk_buff: socket buffer with received data
1786 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1787 struct ixgb_rx_desc *rx_desc,
1788 struct sk_buff *skb)
1790 /* Ignore Checksum bit is set OR
1791 * TCP Checksum has not been calculated
1793 if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1794 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1795 skb->ip_summed = CHECKSUM_NONE;
1799 /* At this point we know the hardware did the TCP checksum */
1800 /* now look at the TCP checksum error bit */
1801 if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1802 /* let the stack verify checksum errors */
1803 skb->ip_summed = CHECKSUM_NONE;
1804 adapter->hw_csum_rx_error++;
1806 /* TCP checksum is good */
1807 skb->ip_summed = CHECKSUM_UNNECESSARY;
1808 adapter->hw_csum_rx_good++;
1813 * ixgb_clean_rx_irq - Send received data up the network stack,
1814 * @adapter: board private structure
1818 #ifdef CONFIG_IXGB_NAPI
1819 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1821 ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1824 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1825 struct net_device *netdev = adapter->netdev;
1826 struct pci_dev *pdev = adapter->pdev;
1827 struct ixgb_rx_desc *rx_desc, *next_rxd;
1828 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1831 boolean_t cleaned = FALSE;
1833 i = rx_ring->next_to_clean;
1834 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1835 buffer_info = &rx_ring->buffer_info[i];
1837 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1838 struct sk_buff *skb, *next_skb;
1841 #ifdef CONFIG_IXGB_NAPI
1842 if(*work_done >= work_to_do)
1847 status = rx_desc->status;
1848 skb = buffer_info->skb;
1850 prefetch(skb->data);
1852 if(++i == rx_ring->count) i = 0;
1853 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1856 if((j = i + 1) == rx_ring->count) j = 0;
1857 next2_buffer = &rx_ring->buffer_info[j];
1858 prefetch(next2_buffer);
1860 next_buffer = &rx_ring->buffer_info[i];
1861 next_skb = next_buffer->skb;
1866 pci_unmap_single(pdev,
1868 buffer_info->length,
1869 PCI_DMA_FROMDEVICE);
1871 length = le16_to_cpu(rx_desc->length);
1873 if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1875 /* All receives must fit into a single buffer */
1877 IXGB_DBG("Receive packet consumed multiple buffers "
1878 "length<%x>\n", length);
1880 dev_kfree_skb_irq(skb);
1884 if (unlikely(rx_desc->errors
1885 & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
1886 | IXGB_RX_DESC_ERRORS_P |
1887 IXGB_RX_DESC_ERRORS_RXE))) {
1889 dev_kfree_skb_irq(skb);
1894 skb_put(skb, length);
1896 /* Receive Checksum Offload */
1897 ixgb_rx_checksum(adapter, rx_desc, skb);
1899 skb->protocol = eth_type_trans(skb, netdev);
1900 #ifdef CONFIG_IXGB_NAPI
1901 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
1902 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
1903 le16_to_cpu(rx_desc->special) &
1904 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
1906 netif_receive_skb(skb);
1908 #else /* CONFIG_IXGB_NAPI */
1909 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
1910 vlan_hwaccel_rx(skb, adapter->vlgrp,
1911 le16_to_cpu(rx_desc->special) &
1912 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
1916 #endif /* CONFIG_IXGB_NAPI */
1917 netdev->last_rx = jiffies;
1920 /* clean up descriptor, might be written over by hw */
1921 rx_desc->status = 0;
1922 buffer_info->skb = NULL;
1924 /* use prefetched values */
1926 buffer_info = next_buffer;
1929 rx_ring->next_to_clean = i;
1931 ixgb_alloc_rx_buffers(adapter);
1937 * ixgb_alloc_rx_buffers - Replace used receive buffers
1938 * @adapter: address of board private structure
1942 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
1944 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1945 struct net_device *netdev = adapter->netdev;
1946 struct pci_dev *pdev = adapter->pdev;
1947 struct ixgb_rx_desc *rx_desc;
1948 struct ixgb_buffer *buffer_info;
1949 struct sk_buff *skb;
1951 int num_group_tail_writes;
1954 i = rx_ring->next_to_use;
1955 buffer_info = &rx_ring->buffer_info[i];
1956 cleancount = IXGB_DESC_UNUSED(rx_ring);
1958 num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
1960 /* leave three descriptors unused */
1961 while(--cleancount > 2) {
1962 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1964 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1966 if(unlikely(!skb)) {
1967 /* Better luck next round */
1971 /* Make buffer alignment 2 beyond a 16 byte boundary
1972 * this will result in a 16 byte aligned IP header after
1973 * the 14 byte MAC header is removed
1975 skb_reserve(skb, NET_IP_ALIGN);
1979 buffer_info->skb = skb;
1980 buffer_info->length = adapter->rx_buffer_len;
1982 pci_map_single(pdev,
1984 adapter->rx_buffer_len,
1985 PCI_DMA_FROMDEVICE);
1987 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1988 /* guarantee DD bit not set now before h/w gets descriptor
1989 * this is the rest of the workaround for h/w double
1991 rx_desc->status = 0;
1993 if((i & ~(num_group_tail_writes- 1)) == i) {
1994 /* Force memory writes to complete before letting h/w
1995 * know there are new descriptors to fetch. (Only
1996 * applicable for weak-ordered memory model archs,
1997 * such as IA-64). */
2000 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2003 if(++i == rx_ring->count) i = 0;
2004 buffer_info = &rx_ring->buffer_info[i];
2007 rx_ring->next_to_use = i;
2011 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2013 * @param netdev network interface device structure
2014 * @param grp indicates to enable or disable tagging/stripping
2017 ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2019 struct ixgb_adapter *adapter = netdev->priv;
2020 uint32_t ctrl, rctl;
2022 ixgb_irq_disable(adapter);
2023 adapter->vlgrp = grp;
2026 /* enable VLAN tag insert/strip */
2027 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2028 ctrl |= IXGB_CTRL0_VME;
2029 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2031 /* enable VLAN receive filtering */
2033 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2034 rctl |= IXGB_RCTL_VFE;
2035 rctl &= ~IXGB_RCTL_CFIEN;
2036 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2038 /* disable VLAN tag insert/strip */
2040 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2041 ctrl &= ~IXGB_CTRL0_VME;
2042 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2044 /* disable VLAN filtering */
2046 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2047 rctl &= ~IXGB_RCTL_VFE;
2048 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2051 ixgb_irq_enable(adapter);
2055 ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2057 struct ixgb_adapter *adapter = netdev->priv;
2058 uint32_t vfta, index;
2060 /* add VID to filter table */
2062 index = (vid >> 5) & 0x7F;
2063 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2064 vfta |= (1 << (vid & 0x1F));
2065 ixgb_write_vfta(&adapter->hw, index, vfta);
2069 ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2071 struct ixgb_adapter *adapter = netdev->priv;
2072 uint32_t vfta, index;
2074 ixgb_irq_disable(adapter);
2077 adapter->vlgrp->vlan_devices[vid] = NULL;
2079 ixgb_irq_enable(adapter);
2081 /* remove VID from filter table*/
2083 index = (vid >> 5) & 0x7F;
2084 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2085 vfta &= ~(1 << (vid & 0x1F));
2086 ixgb_write_vfta(&adapter->hw, index, vfta);
2090 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2092 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2094 if(adapter->vlgrp) {
2096 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2097 if(!adapter->vlgrp->vlan_devices[vid])
2099 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2104 #ifdef CONFIG_NET_POLL_CONTROLLER
2106 * Polling 'interrupt' - used by things like netconsole to send skbs
2107 * without having to re-enable interrupts. It's not called while
2108 * the interrupt routine is executing.
2111 static void ixgb_netpoll(struct net_device *dev)
2113 struct ixgb_adapter *adapter = dev->priv;
2115 disable_irq(adapter->pdev->irq);
2116 ixgb_intr(adapter->pdev->irq, dev, NULL);
2117 enable_irq(adapter->pdev->irq);