1 /*******************************************************************************
4 Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
32 char ixgb_driver_name[] = "ixgb";
33 char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
34 char ixgb_driver_version[] = "1.0.47-k1jg";
35 char ixgb_copyright[] = "Copyright (c) 2001-2003 Intel Corporation.";
37 /* ixgb_pci_tbl - PCI Device ID Table
39 * For selecting devices to load on private driver_data field (last one)
40 * stores an index into ixgb_strings.
41 * Wildcard entries (PCI_ANY_ID) should come last
42 * Last entry must be all 0s
44 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
45 * Class, Class Mask, String Index }
47 static struct pci_device_id ixgb_pci_tbl[] = {
48 /* Intel(R) PRO/10GbE Network Connection */
49 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
50 INTEL_SUBVENDOR_ID, IXGB_SUBDEVICE_ID_A11F, 0, 0, 0},
51 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
52 INTEL_SUBVENDOR_ID, IXGB_SUBDEVICE_ID_A01F, 0, 0, 0},
54 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
57 /* required last entry */
61 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
63 static char *ixgb_strings[] = {
64 "Intel(R) PRO/10GbE Network Connection"
67 /* Local Function Prototypes */
69 int ixgb_up(struct ixgb_adapter *adapter);
70 void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
71 void ixgb_reset(struct ixgb_adapter *adapter);
73 static int ixgb_init_module(void);
74 static void ixgb_exit_module(void);
75 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
76 static void ixgb_remove(struct pci_dev *pdev);
77 static void ixgb_sw_init(struct ixgb_adapter *adapter);
78 static int ixgb_open(struct net_device *netdev);
79 static int ixgb_close(struct net_device *netdev);
80 static int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
81 static int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
82 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
83 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
84 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
85 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
86 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
87 static void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
88 static void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
89 static void ixgb_set_multi(struct net_device *netdev);
90 static void ixgb_watchdog(unsigned long data);
91 static inline boolean_t ixgb_tso(struct ixgb_adapter *adapter,
93 static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
94 static void ixgb_tx_timeout(struct net_device *netdev);
95 static void ixgb_tx_timeout_task(struct net_device *netdev);
96 static void ixgb_vlan_rx_register(struct net_device *netdev,
97 struct vlan_group *grp);
98 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
99 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
100 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
101 static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
102 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
103 static int ixgb_set_mac(struct net_device *netdev, void *p);
104 static void ixgb_update_stats(struct ixgb_adapter *adapter);
105 static inline void ixgb_irq_disable(struct ixgb_adapter *adapter);
106 static inline void ixgb_irq_enable(struct ixgb_adapter *adapter);
107 static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
108 static void ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
109 #ifdef CONFIG_IXGB_NAPI
110 static int ixgb_poll(struct net_device *netdev, int *budget);
112 static void ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
114 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
115 static int ixgb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
116 static inline void ixgb_rx_checksum(struct ixgb_adapter *adapter,
117 struct ixgb_rx_desc *rx_desc,
118 struct sk_buff *skb);
119 static int ixgb_notify_reboot(struct notifier_block *, unsigned long event,
121 static int ixgb_suspend(struct pci_dev *pdev, u32 state);
123 struct notifier_block ixgb_notifier_reboot = {
124 .notifier_call = ixgb_notify_reboot,
129 /* Exported from other modules */
131 extern void ixgb_check_options(struct ixgb_adapter *adapter);
132 extern int ixgb_ethtool_ioctl(struct net_device *netdev, struct ifreq *ifr);
134 static struct pci_driver ixgb_driver = {
135 .name = ixgb_driver_name,
136 .id_table = ixgb_pci_tbl,
138 .remove = __devexit_p(ixgb_remove),
139 /* Power Managment Hooks */
144 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
145 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
146 MODULE_LICENSE("GPL");
148 /* some defines for controlling descriptor fetches in h/w */
149 #define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
150 #define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
151 pushed this many descriptors from head */
152 #define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
155 * ixgb_init_module - Driver Registration Routine.
157 * ixgb_init_module is the first routine called when the driver is
158 * loaded. All it does is register with the PCI subsystem.
162 ixgb_init_module(void)
165 IXGB_DBG("ixgb_init_module\n");
167 printk(KERN_INFO "%s - version %s\n", ixgb_driver_string,
168 ixgb_driver_version);
169 printk(KERN_INFO "%s\n", ixgb_copyright);
170 #ifdef CONFIG_IXGB_NAPI
171 printk(KERN_INFO "NAPI Enabled\n");
173 ret = pci_module_init(&ixgb_driver);
175 register_reboot_notifier(&ixgb_notifier_reboot);
180 module_init(ixgb_init_module);
183 * ixgb_exit_module - Driver Exit Cleanup Routine.
185 * ixgb_exit_module is called just before the driver is removed
190 ixgb_exit_module(void)
193 IXGB_DBG("ixgb_exit_module\n");
194 unregister_reboot_notifier(&ixgb_notifier_reboot);
195 pci_unregister_driver(&ixgb_driver);
198 module_exit(ixgb_exit_module);
201 * ixgb_up - Driver ifconfig UP routine.
203 * ixgb_up is called to initialize and bring online an interface.
204 * @param adapter board private structure
208 ixgb_up(struct ixgb_adapter *adapter)
210 struct net_device *netdev = adapter->netdev;
212 IXGB_DBG("ixgb_up\n");
214 if (request_irq(netdev->irq, &ixgb_intr, SA_SHIRQ | SA_SAMPLE_RANDOM,
215 netdev->name, netdev)) {
216 IXGB_DBG("%s: request_irq failed\n", netdev->name);
219 /* disable interrupts and get the hardware into a known state */
220 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
222 /* hardware was reset in probe/down, we need to reload some things */
223 ixgb_set_multi(netdev);
224 ixgb_restore_vlan(adapter);
226 ixgb_configure_tx(adapter);
227 ixgb_setup_rctl(adapter);
228 ixgb_configure_rx(adapter);
229 ixgb_alloc_rx_buffers(adapter);
231 mod_timer(&adapter->watchdog_timer, jiffies);
232 ixgb_irq_enable(adapter);
234 IXGB_DBG("ixgb_up: RAH_0 is <%x>\n", IXGB_READ_REG(&adapter->hw, RAH));
235 IXGB_DBG("ixgb_up: RDBAL is <%x>\n",
236 IXGB_READ_REG(&adapter->hw, RDBAL));
241 * ixgb_down - Driver ifconfig DOWN routine.
243 * ixgb_down is called to uninitialize and take offline an interface.
244 * @param adapter board private structure
245 * @param kill_watchdog
248 ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
250 struct net_device *netdev = adapter->netdev;
252 IXGB_DBG("ixgb_down\n");
254 ixgb_irq_disable(adapter);
255 free_irq(netdev->irq, netdev);
257 del_timer_sync(&adapter->watchdog_timer);
258 adapter->link_speed = 0;
259 adapter->link_duplex = 0;
260 netif_carrier_off(netdev);
261 netif_stop_queue(netdev);
265 ixgb_clean_tx_ring(adapter);
266 ixgb_clean_rx_ring(adapter);
270 * ixgb_reset - hardware reset.
272 * ixgb_reset is called to initialize hardware to a known state.
273 * @param adapter board private structure
276 ixgb_reset(struct ixgb_adapter *adapter)
278 IXGB_DBG("ixgb_reset\n");
280 ixgb_adapter_stop(&adapter->hw);
281 if (!ixgb_init_hw(&adapter->hw))
282 IXGB_DBG("ixgb_init_hw failed.\n");
286 * ixgb_probe - Device Initialization Routine.
287 * @param pdev PCI device information struct
288 * @param ent entry in ixgb_pci_table
290 * Returns 0 on success, negative on failure
294 ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
296 struct net_device *netdev = NULL;
297 struct ixgb_adapter *adapter;
298 static int cards_found = 0;
299 unsigned long mmio_start;
304 IXGB_DBG("ixgb_probe\n");
306 if ((i = pci_enable_device(pdev))) {
307 IXGB_ERR("pci_enable_device failed\n");
311 if (!(i = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
314 if ((i = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
315 IXGB_ERR("No usable DMA configuration, aborting\n");
321 if ((i = pci_request_regions(pdev, ixgb_driver_name))) {
322 IXGB_ERR("Failed to reserve PCI I/O and Memory resources.\n");
326 pci_set_master(pdev);
328 /* alloc_etherdev clears the memory for us */
329 netdev = alloc_etherdev(sizeof (struct ixgb_adapter));
331 IXGB_ERR("Unable to allocate net_device struct\n");
332 goto err_alloc_etherdev;
335 SET_MODULE_OWNER(netdev);
336 SET_NETDEV_DEV(netdev, &pdev->dev);
338 pci_set_drvdata(pdev, netdev);
339 adapter = netdev->priv;
340 adapter->netdev = netdev;
341 adapter->pdev = pdev;
342 adapter->hw.back = adapter;
344 mmio_start = pci_resource_start(pdev, BAR_0);
345 mmio_len = pci_resource_len(pdev, BAR_0);
347 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
348 if (!adapter->hw.hw_addr)
351 for (i = BAR_1; i <= BAR_5; i++) {
352 if (pci_resource_len(pdev, i) == 0)
354 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
355 adapter->hw.io_base = pci_resource_start(pdev, i);
359 IXGB_DBG("mmio_start<%lx> hw_addr<%p>\n", mmio_start,
360 adapter->hw.hw_addr);
362 netdev->open = &ixgb_open;
363 netdev->stop = &ixgb_close;
364 netdev->hard_start_xmit = &ixgb_xmit_frame;
365 netdev->get_stats = &ixgb_get_stats;
366 netdev->set_multicast_list = &ixgb_set_multi;
367 netdev->set_mac_address = &ixgb_set_mac;
368 netdev->change_mtu = &ixgb_change_mtu;
369 netdev->do_ioctl = &ixgb_ioctl;
370 netdev->tx_timeout = &ixgb_tx_timeout;
371 netdev->watchdog_timeo = HZ;
372 #ifdef CONFIG_IXGB_NAPI
373 netdev->poll = &ixgb_poll;
376 netdev->vlan_rx_register = ixgb_vlan_rx_register;
377 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
378 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
380 netdev->irq = pdev->irq;
381 netdev->mem_start = mmio_start;
382 netdev->mem_end = mmio_start + mmio_len;
383 netdev->base_addr = adapter->hw.io_base;
385 adapter->bd_number = cards_found;
386 adapter->id_string = ixgb_strings[ent->driver_data];
387 adapter->link_speed = 0;
388 adapter->link_duplex = 0;
390 /* setup the private structure */
391 ixgb_sw_init(adapter);
393 netdev->features = NETIF_F_SG |
395 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
397 netdev->features |= NETIF_F_TSO;
401 netdev->features |= NETIF_F_HIGHDMA;
403 /* make sure the EEPROM is good */
405 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
406 IXGB_DBG("Invalid EEPROM checksum.\n");
410 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
412 if (!is_valid_ether_addr(netdev->dev_addr)) {
413 IXGB_DBG("Invalid MAC address in EEPROM.\n");
417 adapter->max_data_per_txd = IXGB_MAX_JUMBO_FRAME_SIZE;
418 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
420 init_timer(&adapter->watchdog_timer);
421 adapter->watchdog_timer.function = &ixgb_watchdog;
422 adapter->watchdog_timer.data = (unsigned long) adapter;
424 INIT_WORK(&adapter->tx_timeout_task,
425 (void (*)(void *)) ixgb_tx_timeout_task, netdev);
427 register_netdev(netdev);
428 memcpy(adapter->ifname, netdev->name, IFNAMSIZ);
429 adapter->ifname[IFNAMSIZ - 1] = 0;
431 /* we're going to reset, so assume we have no link for now */
433 netif_carrier_off(netdev);
434 netif_stop_queue(netdev);
436 printk(KERN_INFO "%s: %s\n", netdev->name, adapter->id_string);
437 ixgb_check_options(adapter);
439 /* reset the hardware with the new settings */
446 iounmap(adapter->hw.hw_addr);
448 pci_release_regions(pdev);
455 * ixgb_remove - Device Removal Routine.
456 * @param pdev PCI device information struct
458 * ixgb_remove is called by the PCI subsystem to alert the driver
459 * that it should release a PCI device. The could be caused by a
460 * Hot-Plug event, or because the driver is going to be removed from
464 static void __devexit
465 ixgb_remove(struct pci_dev *pdev)
467 struct net_device *netdev = pci_get_drvdata(pdev);
468 struct ixgb_adapter *adapter = netdev->priv;
470 IXGB_DBG("ixgb_remove\n");
472 unregister_netdev(netdev);
474 #ifdef ETHTOOL_IDENTIFY
475 ixgb_identify_stop(adapter);
478 iounmap((void *) adapter->hw.hw_addr);
479 pci_release_regions(pdev);
485 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter).
486 * @param adapter board private structure to initialize
488 * ixgb_sw_init initializes the adapter private data structure.
489 * Fields are initialized based on PCI device information and
490 * OS network device settings (MTU size).
493 static void __devinit
494 ixgb_sw_init(struct ixgb_adapter *adapter)
496 struct ixgb_hw *hw = &adapter->hw;
497 struct net_device *netdev = adapter->netdev;
498 struct pci_dev *pdev = adapter->pdev;
500 IXGB_DBG("ixgb_sw_init\n");
502 /* PCI config space info */
504 /* FIXME: do not store, instead directly use struct pci_dev
507 hw->vendor_id = pdev->vendor;
508 hw->device_id = pdev->device;
509 hw->subsystem_vendor_id = pdev->subsystem_vendor;
510 hw->subsystem_id = pdev->subsystem_device;
512 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
514 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
516 if (hw->device_id == IXGB_DEVICE_ID_82597EX)
517 hw->mac_type = ixgb_82597;
519 /* should never have loaded on this device */
520 printk(KERN_ERR "ixgb: unsupported device id\n");
523 /* enable flow control to be programmed */
526 atomic_set(&adapter->irq_sem, 1);
530 * ixgb_open - Called when a network interface is made active.
531 * @param netdev network interface device structure
533 * Returns 0 on success, negative value on failure
535 * The open entry point is called when a network interface is made
536 * active by the system (IFF_UP). At this point all resources needed
537 * for transmit and receive operations are allocated, the interrupt
538 * handler is registered with the OS, the watchdog timer is started,
539 * and the stack is notified that the interface is ready.
543 ixgb_open(struct net_device *netdev)
545 struct ixgb_adapter *adapter = netdev->priv;
547 IXGB_DBG("ixgb_open\n");
549 /* allocate transmit descriptors */
551 if (ixgb_setup_tx_resources(adapter)) {
552 IXGB_DBG("ixgb_open: failed ixgb_setup_tx_resources.\n");
556 /* allocate receive descriptors and buffers */
558 if (ixgb_setup_rx_resources(adapter)) {
559 IXGB_DBG("ixgb_open: failed ixgb_setup_rx_resources.\n");
562 if (ixgb_up(adapter))
568 ixgb_free_rx_resources(adapter);
570 ixgb_free_tx_resources(adapter);
577 * ixgb_close - Disables a network interface.
578 * @param netdev network interface device structure
580 * Returns 0, this is not allowed to fail
582 * The close entry point is called when an interface is de-activated
583 * by the OS. The hardware is still under the drivers control, but
584 * needs to be disabled. A global MAC reset is issued to stop the
585 * hardware, and all transmit and receive resources are freed.
589 ixgb_close(struct net_device *netdev)
591 struct ixgb_adapter *adapter = netdev->priv;
593 IXGB_DBG("ixgb_close\n");
595 ixgb_down(adapter, TRUE);
597 ixgb_free_tx_resources(adapter);
598 ixgb_free_rx_resources(adapter);
604 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors).
605 * @param adapter board private structure
607 * Return 0 on success, negative on failure
611 ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
613 struct pci_dev *pdev = adapter->pdev;
614 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
617 IXGB_DBG("ixgb_setup_tx_resources\n");
619 size = sizeof (struct ixgb_buffer) * txdr->count;
620 txdr->buffer_info = kmalloc(size, GFP_KERNEL);
621 if (!txdr->buffer_info) {
624 memset(txdr->buffer_info, 0, size);
626 /* round up to nearest 4K */
627 txdr->size = txdr->count * sizeof (struct ixgb_tx_desc);
628 IXGB_ROUNDUP(txdr->size, 4096);
630 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
632 kfree(txdr->buffer_info);
635 memset(txdr->desc, 0, txdr->size);
637 IXGB_DBG("txdr->desc <%p>\n", txdr->desc);
638 IXGB_DBG("txdr->next_to_use = <%p>\n", &txdr->next_to_use);
639 IXGB_DBG("txdr->next_to_clean = <%p>\n", &txdr->next_to_clean);
641 txdr->next_to_use = 0;
642 txdr->next_to_clean = 0;
648 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
649 * @adapter board private structure
651 * Configure the Tx unit of the MAC after a reset.
655 ixgb_configure_tx(struct ixgb_adapter *adapter)
658 u32 tdlen = adapter->tx_ring.count * sizeof (struct ixgb_tx_desc);
659 uint64_t tdba = adapter->tx_ring.dma;
660 struct ixgb_hw *hw = &adapter->hw;
662 IXGB_DBG("ixgb_configure_tx\n");
664 /* Setup the Base and Length of the Tx Descriptor Ring
665 * tx_ring.dma can be either a 32 or 64 bit value
668 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
669 IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
671 IXGB_WRITE_REG(hw, TDLEN, tdlen);
673 /* Setup the HW Tx Head and Tail descriptor pointers */
675 IXGB_WRITE_REG(hw, TDH, 0);
676 IXGB_WRITE_REG(hw, TDT, 0);
678 /* don't set up txdctl, it induces performance problems if
679 * configured incorrectly
680 txdctl = TXDCTL_PTHRESH_DEFAULT; // prefetch txds below this threshold
681 txdctl |= (TXDCTL_HTHRESH_DEFAULT // only prefetch if there are this many ready
682 << IXGB_TXDCTL_HTHRESH_SHIFT);
683 IXGB_WRITE_REG (hw, TXDCTL, txdctl);
686 /* Set the Tx Interrupt Delay register */
688 IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
690 /* Program the Transmit Control Register */
692 tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
693 IXGB_WRITE_REG(hw, TCTL, tctl);
695 /* Setup Transmit Descriptor Settings for this adapter */
696 adapter->tx_cmd_type =
697 IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS
698 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
702 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors).
703 * @param adapter board private structure
705 * Returns 0 on success, negative on failure
709 ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
711 struct pci_dev *pdev = adapter->pdev;
712 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
715 IXGB_DBG("ixgb_setup_rx_resources.\n");
717 size = sizeof (struct ixgb_buffer) * rxdr->count;
718 rxdr->buffer_info = kmalloc(size, GFP_KERNEL);
719 if (!rxdr->buffer_info) {
722 memset(rxdr->buffer_info, 0, size);
724 /* Round up to nearest 4K */
725 rxdr->size = rxdr->count * sizeof (struct ixgb_rx_desc);
726 IXGB_ROUNDUP(rxdr->size, 4096);
728 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
731 IXGB_DBG("pci_alloc_consistent failed.\n");
732 kfree(rxdr->buffer_info);
735 memset(rxdr->desc, 0, rxdr->size);
737 IXGB_DBG("rxdr->desc <%p>\n", rxdr->desc);
738 IXGB_DBG("rxdr->next_to_use = <%p>\n", &rxdr->next_to_use);
739 IXGB_DBG("rxdr->next_to_clean = <%p>\n", &rxdr->next_to_clean);
741 rxdr->next_to_use = 0;
742 rxdr->next_to_clean = 0;
748 * ixgb_setup_rctl - configure the receive control register.
749 * @param adapter Board private structure
753 ixgb_setup_rctl(struct ixgb_adapter *adapter)
757 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
759 IXGB_DBG("ixgb_setup_rctl\n");
761 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
764 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
765 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
766 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
768 rctl |= IXGB_RCTL_SECRC;
770 switch (adapter->rx_buffer_len) {
771 case IXGB_RXBUFFER_2048:
773 rctl |= IXGB_RCTL_BSIZE_2048;
775 case IXGB_RXBUFFER_4096:
776 rctl |= IXGB_RCTL_BSIZE_4096;
778 case IXGB_RXBUFFER_8192:
779 rctl |= IXGB_RCTL_BSIZE_8192;
781 case IXGB_RXBUFFER_16384:
782 rctl |= IXGB_RCTL_BSIZE_16384;
785 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
789 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
790 * @param adapter board private structure
792 * Configure the Rx unit of the MAC after a reset.
796 ixgb_configure_rx(struct ixgb_adapter *adapter)
798 uint64_t rdba = adapter->rx_ring.dma;
799 u32 rdlen = adapter->rx_ring.count * sizeof (struct ixgb_rx_desc);
800 struct ixgb_hw *hw = &adapter->hw;
804 IXGB_DBG("ixgb_configure_rx\n");
806 /* make sure receives are disabled while setting up the descriptors */
807 rctl = IXGB_READ_REG(hw, RCTL);
808 IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
810 /* set the Receive Delay Timer Register */
811 IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
813 /* Setup the Base and Length of the Rx Descriptor Ring */
814 IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
815 IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
817 IXGB_WRITE_REG(hw, RDLEN, rdlen);
819 /* Setup the HW Rx Head and Tail Descriptor Pointers */
820 IXGB_WRITE_REG(hw, RDH, 0);
821 IXGB_WRITE_REG(hw, RDT, 0);
825 /* burst 16 or burst when RXT0 */
826 rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
827 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
828 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
829 IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
832 if (adapter->raidc) {
836 /* Poll every rx_int_delay period, if RBD exists
837 * Receive Backlog Detection is set to <threshold>
839 * max is 0x3F == set to poll when 504 RxDesc left
842 /* polling times are 1 == 0.8192us
848 #define IXGB_RAIDC_POLL_DEFAULT 122 /* set to poll every ~100 us under load
849 also known as 10000 interrupts / sec */
851 /* divide this by 2^3 (8) to get a register size count */
852 poll_threshold = ((adapter->rx_ring.count - 1) >> 3);
853 /* poll at half of that size */
854 poll_threshold >>= 1;
855 /* make sure its not bigger than our max */
856 poll_threshold &= 0x3F;
858 raidc = IXGB_RAIDC_EN | /* turn on raidc style moderation */
859 IXGB_RAIDC_RXT_GATE | /* don't interrupt with rxt0 while
860 in RBD mode (polling) */
861 (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
862 /* this sets the regular "min interrupt delay" */
863 (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
866 IXGB_WRITE_REG(hw, RAIDC, raidc);
869 /* Enable Receive Checksum Offload for TCP and UDP */
870 if (adapter->rx_csum == TRUE) {
871 rxcsum = IXGB_READ_REG(hw, RXCSUM);
872 rxcsum |= IXGB_RXCSUM_TUOFL;
873 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
876 /* Enable Receives */
878 IXGB_WRITE_REG(hw, RCTL, rctl);
882 * ixgb_free_tx_resources - Free Tx Resources.
883 * @param adapter board private structure
885 * Free all transmit software resources
889 ixgb_free_tx_resources(struct ixgb_adapter *adapter)
891 struct pci_dev *pdev = adapter->pdev;
893 IXGB_DBG("ixgb_free_tx_resources\n");
895 ixgb_clean_tx_ring(adapter);
897 kfree(adapter->tx_ring.buffer_info);
898 adapter->tx_ring.buffer_info = NULL;
900 pci_free_consistent(pdev, adapter->tx_ring.size, adapter->tx_ring.desc,
901 adapter->tx_ring.dma);
903 adapter->tx_ring.desc = NULL;
907 * ixgb_clean_tx_ring - Free Tx Buffers.
908 * @param adapter board private structure
912 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
914 struct pci_dev *pdev = adapter->pdev;
918 IXGB_DBG("ixgb_clean_tx_ring\n");
920 /* Free all the Tx ring sk_buffs */
922 for (i = 0; i < adapter->tx_ring.count; i++) {
923 if (adapter->tx_ring.buffer_info[i].skb) {
926 adapter->tx_ring.buffer_info[i].dma,
927 adapter->tx_ring.buffer_info[i].length,
930 dev_kfree_skb(adapter->tx_ring.buffer_info[i].skb);
932 adapter->tx_ring.buffer_info[i].skb = NULL;
936 size = sizeof (struct ixgb_buffer) * adapter->tx_ring.count;
937 memset(adapter->tx_ring.buffer_info, 0, size);
939 /* Zero out the descriptor ring */
941 memset(adapter->tx_ring.desc, 0, adapter->tx_ring.size);
943 adapter->tx_ring.next_to_use = 0;
944 adapter->tx_ring.next_to_clean = 0;
946 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
947 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
951 * ixgb_free_rx_resources - Free Rx Resources.
952 * @param adapter board private structure
954 * Free all receive software resources
958 ixgb_free_rx_resources(struct ixgb_adapter *adapter)
960 struct pci_dev *pdev = adapter->pdev;
962 IXGB_DBG("ixgb_free_rx_resources\n");
964 ixgb_clean_rx_ring(adapter);
966 kfree(adapter->rx_ring.buffer_info);
967 adapter->rx_ring.buffer_info = NULL;
969 pci_free_consistent(pdev, adapter->rx_ring.size,
970 adapter->rx_ring.desc, adapter->rx_ring.dma);
972 adapter->rx_ring.desc = NULL;
976 * ixgb_clean_rx_ring - Free Rx Buffers.
977 * @param adapter board private structure
981 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
983 struct pci_dev *pdev = adapter->pdev;
987 IXGB_DBG("ixgb_free_rx_ring\n");
989 /* Free all the Rx ring sk_buffs */
991 for (i = 0; i < adapter->rx_ring.count; i++) {
992 if (adapter->rx_ring.buffer_info[i].skb) {
994 pci_unmap_single(pdev,
995 adapter->rx_ring.buffer_info[i].dma,
996 adapter->rx_ring.buffer_info[i].length,
999 dev_kfree_skb(adapter->rx_ring.buffer_info[i].skb);
1001 adapter->rx_ring.buffer_info[i].skb = NULL;
1005 size = sizeof (struct ixgb_buffer) * adapter->rx_ring.count;
1006 memset(adapter->rx_ring.buffer_info, 0, size);
1008 /* Zero out the descriptor ring */
1010 memset(adapter->rx_ring.desc, 0, adapter->rx_ring.size);
1012 adapter->rx_ring.next_to_clean = 0;
1013 adapter->rx_ring.next_to_use = 0;
1015 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1016 IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1020 * ixgb_set_multi - Multicast and Promiscuous mode set.
1021 * @param netdev network interface device structure
1023 * The set_multi entry point is called whenever the multicast address
1024 * list or the network interface flags are updated. This routine is
1025 * resposible for configuring the hardware for proper multicast,
1026 * promiscuous mode, and all-multi behavior.
1030 ixgb_set_multi(struct net_device *netdev)
1032 struct ixgb_adapter *adapter = netdev->priv;
1033 struct ixgb_hw *hw = &adapter->hw;
1036 struct dev_mc_list *mc_ptr;
1038 IXGB_DBG("ixgb_set_multi <%x>\n", netdev->flags);
1040 /* Check for Promiscuous and All Multicast modes */
1042 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1044 if (netdev->flags & IFF_PROMISC) {
1045 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1046 } else if (netdev->flags & IFF_ALLMULTI) {
1047 rctl |= IXGB_RCTL_MPE;
1048 rctl &= ~IXGB_RCTL_UPE;
1050 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1053 if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1054 rctl |= IXGB_RCTL_MPE;
1055 IXGB_WRITE_REG(hw, RCTL, rctl);
1057 u8 mta[netdev->mc_count * IXGB_ETH_LENGTH_OF_ADDRESS];
1059 IXGB_WRITE_REG(hw, RCTL, rctl);
1061 for (i = 0, mc_ptr = netdev->mc_list; mc_ptr;
1062 i++, mc_ptr = mc_ptr->next)
1063 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1064 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1066 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
1071 * ixgb_watchdog - Timer Call-back.
1072 * @param data pointer to adapter cast into an unsigned long
1076 ixgb_watchdog(unsigned long data)
1078 struct ixgb_adapter *adapter = (struct ixgb_adapter *) data;
1079 struct net_device *netdev = adapter->netdev;
1081 ixgb_check_for_link(&adapter->hw);
1083 if (ixgb_check_for_bad_link(&adapter->hw)) {
1084 /* force the reset path */
1085 netif_stop_queue(netdev);
1088 if (adapter->hw.link_up) {
1089 if (!netif_carrier_ok(netdev)) {
1090 printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n",
1091 netdev->name, 10000, "Full Duplex");
1092 adapter->link_speed = 10000;
1093 adapter->link_duplex = FULL_DUPLEX;
1094 netif_carrier_on(netdev);
1095 netif_wake_queue(netdev);
1098 if (netif_carrier_ok(netdev)) {
1099 printk(KERN_INFO "ixgb: %s NIC Link is Down\n",
1101 adapter->link_speed = 0;
1102 adapter->link_duplex = 0;
1103 netif_carrier_off(netdev);
1104 netif_stop_queue(netdev);
1106 ixgb_down(adapter, FALSE);
1111 ixgb_update_stats(adapter);
1113 /* Early detection of hung controller */
1115 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1116 int i = txdr->next_to_clean;
1118 if (txdr->buffer_info[i].dma &&
1119 time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
1120 !(IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF))
1123 ("ixgb: %s Hung controller? Watchdog stopping queue\n",
1125 netif_stop_queue(netdev);
1129 /* generate an interrupt to force clean up of any stragglers */
1130 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1132 /* Reset the timer */
1133 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1136 #define IXGB_TX_FLAGS_CSUM 0x00000001
1137 #define IXGB_TX_FLAGS_VLAN 0x00000002
1138 #define IXGB_TX_FLAGS_TSO 0x00000004
1140 /** Transmit Segmentation offload setup.
1141 * ixgb_tso - (Large Send) setup where the initial descriptor is prepared
1142 * @param adapter adapter specific information
1143 * @param skb the skb we are trying to set up for segmentation
1146 static inline boolean_t
1147 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1150 struct ixgb_context_desc *context_desc;
1152 u8 ipcss, ipcso, tucss, tucso, hdr_len;
1153 u16 ipcse, tucse, mss;
1155 if (likely(skb_shinfo(skb)->tso_size)) {
1156 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1157 mss = skb_shinfo(skb)->tso_size;
1158 skb->nh.iph->tot_len = 0;
1159 skb->nh.iph->check = 0;
1160 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1163 ipcss = skb->nh.raw - skb->data;
1164 ipcso = (void *) &(skb->nh.iph->check) - (void *) skb->data;
1165 ipcse = skb->h.raw - skb->data - 1;
1166 tucss = skb->h.raw - skb->data;
1167 tucso = (void *) &(skb->h.th->check) - (void *) skb->data;
1170 i = adapter->tx_ring.next_to_use;
1171 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1173 context_desc->ipcss = ipcss;
1174 context_desc->ipcso = ipcso;
1175 context_desc->ipcse = cpu_to_le16(ipcse);
1176 context_desc->tucss = tucss;
1177 context_desc->tucso = tucso;
1178 context_desc->tucse = cpu_to_le16(tucse);
1179 context_desc->mss = cpu_to_le16(mss);
1180 context_desc->hdr_len = hdr_len;
1181 context_desc->status = 0;
1182 context_desc->cmd_type_len = cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1184 IXGB_CONTEXT_DESC_CMD_TSE
1186 IXGB_CONTEXT_DESC_CMD_IP
1188 IXGB_CONTEXT_DESC_CMD_TCP
1190 IXGB_CONTEXT_DESC_CMD_RS
1192 IXGB_CONTEXT_DESC_CMD_IDE
1196 i = (i + 1) % adapter->tx_ring.count;
1197 adapter->tx_ring.next_to_use = i;
1206 * ixgb_tx_csum - prepare context descriptor for checksum offload.
1208 * ixgb_tx_csum is called to prepare for checksumming a packet in hw.
1209 * @param adapter board private structure
1210 * @param skb structure containing data to send
1212 static inline boolean_t
1213 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1215 struct ixgb_context_desc *context_desc;
1219 if (likely(skb->ip_summed == CHECKSUM_HW)) {
1220 css = skb->h.raw - skb->data;
1221 cso = (skb->h.raw + skb->csum) - skb->data;
1222 i = adapter->tx_ring.next_to_use;
1223 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1225 context_desc->tucss = css;
1226 context_desc->tucso = cso;
1227 context_desc->tucse = 0;
1228 /* zero out any previously existing data in one instruction */
1229 *(u32 *) & (context_desc->ipcss) = 0;
1230 context_desc->status = 0;
1231 context_desc->hdr_len = 0;
1232 context_desc->mss = 0;
1233 context_desc->cmd_type_len =
1234 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1235 | IXGB_TX_DESC_CMD_RS | IXGB_TX_DESC_CMD_IDE);
1237 i = (i + 1) % adapter->tx_ring.count;
1238 adapter->tx_ring.next_to_use = i;
1247 * ixgb_tx_map - private function for mapping send data to hardware addresses.
1249 * @param adapter board private structure
1250 * @param skb structure containing data to send
1254 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb)
1256 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1257 int len, offset, count, size, i;
1260 len = skb->len - skb->data_len;
1262 i = (tx_ring->next_to_use + tx_ring->count - 1) % tx_ring->count;
1268 i = (i + 1) % tx_ring->count;
1269 size = min(len, adapter->max_data_per_txd);
1270 tx_ring->buffer_info[i].length = size;
1271 tx_ring->buffer_info[i].dma =
1272 pci_map_single(adapter->pdev, skb->data + offset, size,
1275 tx_ring->buffer_info[i].time_stamp = jiffies;
1282 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
1283 struct skb_frag_struct *frag;
1285 frag = &skb_shinfo(skb)->frags[f];
1290 i = (i + 1) % tx_ring->count;
1291 size = min(len, adapter->max_data_per_txd);
1292 tx_ring->buffer_info[i].length = size;
1293 tx_ring->buffer_info[i].dma =
1294 pci_map_page(adapter->pdev, frag->page,
1295 frag->page_offset + offset, size,
1298 tx_ring->buffer_info[i].time_stamp = jiffies;
1304 tx_ring->buffer_info[i].skb = skb;
1310 * ixgb_tx_queue - private function to start transmit on hardware.
1312 * @param adapter board private structure
1313 * @param count number of tx_descriptors to initialize (consume)
1314 * @param vlan_id the vlan tag to insert (if necessary)
1315 * @param tx_flags special handling for this transmit, if any
1319 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,
1322 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1323 struct ixgb_tx_desc *tx_desc = NULL;
1324 u32 cmd_type_len = adapter->tx_cmd_type;
1329 if (tx_flags & IXGB_TX_FLAGS_TSO) {
1330 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1331 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1334 if (tx_flags & IXGB_TX_FLAGS_CSUM)
1335 popts |= IXGB_TX_DESC_POPTS_TXSM;
1337 if (tx_flags & IXGB_TX_FLAGS_VLAN) {
1338 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1341 i = tx_ring->next_to_use;
1344 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1345 tx_desc->buff_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
1346 tx_desc->cmd_type_len =
1347 cpu_to_le32(cmd_type_len | tx_ring->buffer_info[i].length);
1348 tx_desc->status = status;
1349 tx_desc->popts = popts;
1350 tx_desc->vlan = cpu_to_le16(vlan_id);
1352 i = (i + 1) % tx_ring->count;
1355 tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP);
1357 /* Force memory writes to complete before letting h/w
1358 * know there are new descriptors to fetch. (Only
1359 * applicable for weak-ordered memory model archs,
1360 * such as IA-64). */
1363 tx_ring->next_to_use = i;
1364 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1367 #define TXD_USE_COUNT(S, X) (((S) / (X)) + (((S) % (X)) ? 1 : 0))
1370 * ixgb_xmit_frame - hard_start_xmit linked function, transmit entry point.
1372 * ixgb_xmit_frame is called to send an skb on the wire.
1373 * @param skb contains data to send
1374 * @param netdev network interface device structure
1378 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1380 struct ixgb_adapter *adapter = netdev->priv;
1382 int tx_flags = 0, count;
1386 TXD_USE_COUNT(skb->len - skb->data_len, adapter->max_data_per_txd);
1389 dev_kfree_skb_any(skb);
1393 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1394 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
1395 adapter->max_data_per_txd);
1397 if ((skb_shinfo(skb)->tso_size) || (skb->ip_summed == CHECKSUM_HW))
1400 if (skb->ip_summed == CHECKSUM_HW)
1404 if (unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < count)) {
1405 netif_stop_queue(netdev);
1409 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1410 tx_flags |= IXGB_TX_FLAGS_VLAN;
1411 vlan_id = vlan_tx_tag_get(skb);
1414 if (ixgb_tso(adapter, skb))
1415 tx_flags |= IXGB_TX_FLAGS_TSO;
1416 else if (ixgb_tx_csum(adapter, skb))
1417 tx_flags |= IXGB_TX_FLAGS_CSUM;
1419 count = ixgb_tx_map(adapter, skb);
1420 ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1421 netdev->trans_start = jiffies;
1427 * ixgb_tx_timeout - Respond to a Tx Hang by resetting the adapter.
1428 * @param netdev network interface device structure
1432 ixgb_tx_timeout(struct net_device *netdev)
1434 struct ixgb_adapter *adapter = netdev->priv;
1436 IXGB_DBG("ixgb_tx_timeout\n");
1438 /* Do the reset outside of interrupt context */
1439 schedule_work(&adapter->tx_timeout_task);
1443 * ixgb_tx_timeout_task - worker function to reset hardware and dump queues.
1444 * This function is pointed to by adapter->tx_timeout_task
1446 * @param netdev network interface device structure
1450 ixgb_tx_timeout_task(struct net_device *netdev)
1452 struct ixgb_adapter *adapter = netdev->priv;
1454 IXGB_DBG("ixgb_tx_timeout_task\n");
1456 netif_device_detach(netdev);
1457 ixgb_down(adapter, TRUE);
1459 netif_device_attach(netdev);
1463 * ixgb_get_stats - Get System Network Statistics.
1464 * @param netdev network interface device structure
1466 * Returns the address of the device statistics structure.
1467 * The statistics are actually updated from the timer callback.
1470 static struct net_device_stats *
1471 ixgb_get_stats(struct net_device *netdev)
1473 struct ixgb_adapter *adapter = netdev->priv;
1475 return &adapter->net_stats;
1479 * ixgb_change_mtu - Change the Maximum Transfer Unit.
1480 * @param netdev network interface device structure
1481 * @param new_mtu new value for maximum frame size
1483 * Returns 0 on success, negative on failure
1487 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1489 struct ixgb_adapter *adapter = netdev->priv;
1490 u32 old_mtu = adapter->rx_buffer_len;
1491 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1493 IXGB_DBG("ixgb_change_mtu\n");
1495 if ((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1496 || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1497 IXGB_ERR("Invalid MTU setting\n");
1502 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1503 || (max_frame <= IXGB_RXBUFFER_2048)) {
1504 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
1506 } else if (max_frame <= IXGB_RXBUFFER_4096) {
1507 adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
1509 } else if (max_frame <= IXGB_RXBUFFER_8192) {
1510 adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
1513 adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
1516 if (old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
1518 ixgb_down(adapter, TRUE);
1522 if (adapter->hw.max_frame_size != max_frame) {
1523 struct ixgb_hw *hw = &adapter->hw;
1525 adapter->hw.max_frame_size = max_frame;
1527 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
1529 if (hw->max_frame_size >
1530 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
1531 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
1533 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
1534 ctrl0 |= IXGB_CTRL0_JFE;
1535 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
1539 printk(KERN_ERR "%s: ixgb_change_mtu MFS is set to <%x>\n",
1540 adapter->netdev->name,
1541 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT));
1544 netdev->mtu = new_mtu;
1550 * ixgb_set_mac - Change the Ethernet Address of the NIC.
1551 * @param netdev network interface device structure
1552 * @param p pointer to an address structure
1554 * Returns 0 on success, negative on failure
1558 ixgb_set_mac(struct net_device *netdev, void *p)
1560 struct ixgb_adapter *adapter = netdev->priv;
1561 struct sockaddr *addr = (struct sockaddr *) p;
1563 IXGB_DBG("ixgb_set_mac\n");
1565 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1567 ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1573 * ixgb_update_stats - Update the board statistics counters.
1574 * @param adapter board private structure
1578 ixgb_update_stats(struct ixgb_adapter *adapter)
1580 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1581 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1582 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1583 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1584 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1585 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1586 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1587 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1588 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1589 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1590 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1591 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1592 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1593 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1594 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1595 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1596 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1597 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1598 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1599 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1600 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1601 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1602 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1603 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1604 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1605 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1606 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1607 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1608 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1609 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1610 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1611 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1612 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1613 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1614 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1615 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1616 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1617 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1618 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1619 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1620 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1621 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1622 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1623 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1624 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1625 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1626 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1627 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1628 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1629 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1630 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1631 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1632 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1633 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1634 adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1635 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1636 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1637 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1638 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1639 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1641 /* Fill out the OS statistics structure */
1643 adapter->net_stats.rx_packets = adapter->stats.gprcl;
1644 adapter->net_stats.tx_packets = adapter->stats.gptcl;
1645 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
1646 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
1647 adapter->net_stats.multicast = adapter->stats.mprcl;
1648 adapter->net_stats.collisions = 0;
1650 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1651 * with a length in the type/len field */
1652 adapter->net_stats.rx_errors =
1653 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1654 adapter->stats.ruc +
1655 adapter->stats.roc /*+ adapter->stats.rlec */ +
1656 adapter->stats.icbc +
1657 adapter->stats.ecbc + adapter->stats.mpc;
1659 adapter->net_stats.rx_dropped = adapter->stats.mpc;
1662 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1665 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
1666 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
1667 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
1668 adapter->net_stats.rx_over_errors = adapter->stats.mpc;
1670 adapter->net_stats.tx_errors = 0;
1671 adapter->net_stats.rx_frame_errors = 0;
1672 adapter->net_stats.tx_aborted_errors = 0;
1673 adapter->net_stats.tx_carrier_errors = 0;
1674 adapter->net_stats.tx_fifo_errors = 0;
1675 adapter->net_stats.tx_heartbeat_errors = 0;
1676 adapter->net_stats.tx_window_errors = 0;
1680 * ixgb_irq_disable - Mask off interrupt generation on the NIC
1681 * @param adapter board private structure
1685 ixgb_irq_disable(struct ixgb_adapter *adapter)
1687 IXGB_DBG("ixgb_irq_disable\n");
1689 atomic_inc(&adapter->irq_sem);
1690 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1691 synchronize_irq(adapter->netdev->irq);
1695 * ixgb_irq_enable - Enable default interrupt generation settings.
1696 * @param adapter board private structure
1700 ixgb_irq_enable(struct ixgb_adapter *adapter)
1702 IXGB_DBG("ixgb_irq_enable\n");
1704 if (atomic_dec_and_test(&adapter->irq_sem)) {
1705 IXGB_WRITE_REG(&adapter->hw, IMS,
1706 IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
1707 IXGB_INT_RXO | IXGB_INT_LSC);
1711 #define IXGB_MAX_INTR 10
1713 * ixgb_intr - Interrupt Handler.
1714 * @param irq interrupt number
1715 * @param data pointer to a network interface device structure
1716 * @param regs CPU registers structure
1720 ixgb_intr(int irq, void *data, struct pt_regs *regs)
1722 struct net_device *netdev = (struct net_device *) data;
1723 struct ixgb_adapter *adapter = netdev->priv;
1724 #ifdef CONFIG_IXGB_NAPI
1725 if (netif_rx_schedule_prep(netdev)) {
1726 ixgb_irq_disable(adapter);
1727 __netif_rx_schedule(netdev);
1730 return IRQ_HANDLED; /* FIXME: check for shared interrupts */
1732 struct ixgb_hw *hw = &adapter->hw;
1734 uint i = IXGB_MAX_INTR;
1735 boolean_t rxdmt0 = FALSE;
1738 while (i && (icr = IXGB_READ_REG(hw, ICR))) {
1741 if (icr & IXGB_INT_RXDMT0)
1744 if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
1745 mod_timer(&adapter->watchdog_timer, jiffies);
1748 /* adapter->generate_int = 0; */
1749 ixgb_clean_rx_irq(adapter);
1750 ixgb_clean_tx_irq(adapter);
1755 /* if RAIDC:EN == 1 and ICR:RXDMT0 == 1, we need to
1756 * set IMS:RXDMT0 to 1 to restart the RBD timer (POLL)
1758 if (rxdmt0 && adapter->raidc) {
1759 /* ready the timer by writing the clear reg */
1760 IXGB_WRITE_REG(hw, IMC, IXGB_INT_RXDMT0);
1761 /* now restart it, h/w will decide if its necessary */
1762 IXGB_WRITE_REG(hw, IMS, IXGB_INT_RXDMT0);
1765 return IRQ_RETVAL(handled);
1769 #ifdef CONFIG_IXGB_NAPI
1771 ixgb_process_intr(struct net_device *netdev)
1773 struct ixgb_adapter *adapter = netdev->priv;
1775 int i = IXGB_MAX_INTR;
1776 int hasReceived = 0;
1778 while (i && (icr = IXGB_READ_REG(&adapter->hw, ICR))) {
1779 if (icr & IXGB_INT_RXT0)
1782 if (!(icr & ~(IXGB_INT_RXT0)))
1785 if (icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
1786 mod_timer(&adapter->watchdog_timer, jiffies);
1789 ixgb_clean_tx_irq(adapter);
1798 * ixgb_clean_tx_irq - Reclaim resources after transmit completes.
1799 * @param adapter board private structure
1803 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1805 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1806 struct net_device *netdev = adapter->netdev;
1807 struct pci_dev *pdev = adapter->pdev;
1808 int i = adapter->tx_ring.next_to_clean;
1809 struct ixgb_tx_desc *tx_desc = IXGB_TX_DESC(*tx_ring, i);
1810 while ((tx_desc->status & IXGB_TX_DESC_STATUS_DD)) {
1812 & (IXGB_TX_DESC_POPTS_TXSM | IXGB_TX_DESC_POPTS_IXSM))
1813 adapter->hw_csum_tx_good++;
1815 if (tx_ring->buffer_info[i].dma) {
1816 pci_unmap_page(pdev, tx_ring->buffer_info[i].dma,
1817 tx_ring->buffer_info[i].length,
1819 tx_ring->buffer_info[i].dma = 0;
1822 if (tx_ring->buffer_info[i].skb) {
1823 dev_kfree_skb_any(tx_ring->buffer_info[i].skb);
1824 tx_ring->buffer_info[i].skb = NULL;
1827 *(u32 *) & (tx_desc->status) = 0;
1829 i = (i + 1) % tx_ring->count;
1830 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1833 tx_ring->next_to_clean = i;
1835 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
1836 (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
1838 netif_wake_queue(netdev);
1842 #ifdef CONFIG_IXGB_NAPI
1844 ixgb_poll(struct net_device *netdev, int *budget)
1846 struct ixgb_adapter *adapter = netdev->priv;
1847 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1848 struct pci_dev *pdev = adapter->pdev;
1849 struct ixgb_rx_desc *rx_desc;
1850 struct sk_buff *skb;
1854 int rx_work_limit = *budget;
1856 if (rx_work_limit > netdev->quota)
1857 rx_work_limit = netdev->quota;
1859 ixgb_process_intr(netdev);
1861 i = rx_ring->next_to_clean;
1862 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1864 while ((rx_desc->status & IXGB_RX_DESC_STATUS_DD)) {
1865 if (--rx_work_limit < 0)
1868 pci_unmap_single(pdev,
1869 rx_ring->buffer_info[i].dma,
1870 rx_ring->buffer_info[i].length,
1871 PCI_DMA_FROMDEVICE);
1873 skb = rx_ring->buffer_info[i].skb;
1874 length = le16_to_cpu(rx_desc->length);
1876 if (!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP)) {
1878 /* All receives must fit into a single buffer */
1880 IXGB_DBG("Receive packet consumed multiple buffers\n");
1882 dev_kfree_skb_irq(skb);
1883 rx_desc->status = 0;
1884 rx_ring->buffer_info[i].skb = NULL;
1886 i = (i + 1) % rx_ring->count;
1888 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1893 errors & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
1894 IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))
1897 IXGB_DBG("Receive Errors Reported by Hardware-%x.\n",
1900 dev_kfree_skb_irq(skb);
1901 rx_desc->status = 0;
1902 rx_ring->buffer_info[i].skb = NULL;
1903 i = (i + 1) % rx_ring->count;
1904 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1909 skb_put(skb, length);
1911 /* Receive Checksum Offload */
1912 ixgb_rx_checksum(adapter, rx_desc, skb);
1914 skb->protocol = eth_type_trans(skb, netdev);
1916 && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
1917 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
1918 (rx_desc-> special & IXGB_RX_DESC_SPECIAL_VLAN_MASK));
1920 netif_receive_skb(skb);
1922 netdev->last_rx = jiffies;
1924 rx_desc->status = 0;
1925 rx_ring->buffer_info[i].skb = NULL;
1927 i = (i + 1) % rx_ring->count;
1929 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1936 ixgb_alloc_rx_buffers(adapter);
1938 rx_ring->next_to_clean = i;
1939 netdev->quota -= received;
1940 *budget -= received;
1942 netif_rx_complete(netdev);
1944 /* NOTE: RAIDC will be automatically restarted by this enable */
1945 ixgb_irq_enable(adapter);
1950 ixgb_alloc_rx_buffers(adapter);
1952 rx_ring->next_to_clean = i;
1953 netdev->quota -= received;
1954 *budget -= received;
1960 * ixgb_clean_rx_irq - Send received data up the network stack.
1961 * @param adapter board private structure
1965 ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1967 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1968 struct net_device *netdev = adapter->netdev;
1969 struct pci_dev *pdev = adapter->pdev;
1970 struct ixgb_rx_desc *rx_desc;
1971 struct sk_buff *skb;
1975 i = rx_ring->next_to_clean;
1976 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1978 while ((rx_desc->status & IXGB_RX_DESC_STATUS_DD)) {
1979 pci_unmap_single(pdev, rx_ring->buffer_info[i].dma,
1980 rx_ring->buffer_info[i].length,
1981 PCI_DMA_FROMDEVICE);
1983 skb = rx_ring->buffer_info[i].skb;
1984 length = le16_to_cpu(rx_desc->length);
1986 if (unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) {
1988 /* All receives must fit into a single buffer */
1990 IXGB_DBG("Receive packet consumed multiple buffers "
1991 "length<%x>\n", length);
1993 dev_kfree_skb_irq(skb);
1994 rx_desc->status = 0;
1995 rx_ring->buffer_info[i].skb = NULL;
1996 i = (i + 1) % rx_ring->count;
1997 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2001 if (unlikely(rx_desc->errors
2002 & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
2003 | IXGB_RX_DESC_ERRORS_P |
2004 IXGB_RX_DESC_ERRORS_RXE))) {
2006 IXGB_DBG("Receive Errors Reported by Hardware-%x.\n",
2009 dev_kfree_skb_irq(skb);
2010 rx_desc->status = 0;
2011 rx_ring->buffer_info[i].skb = NULL;
2012 i = (i + 1) % rx_ring->count;
2013 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2018 skb_put(skb, length);
2020 /* Receive Checksum Offload */
2021 ixgb_rx_checksum(adapter, rx_desc, skb);
2023 skb->protocol = eth_type_trans(skb, netdev);
2025 && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
2026 vlan_hwaccel_rx(skb, adapter->vlgrp,
2029 IXGB_RX_DESC_SPECIAL_VLAN_MASK));
2034 netdev->last_rx = jiffies;
2036 rx_desc->status = 0;
2038 rx_ring->buffer_info[i].skb = NULL;
2039 i = (i + 1) % rx_ring->count;
2041 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2044 rx_ring->next_to_clean = i;
2046 ixgb_alloc_rx_buffers(adapter);
2051 * ixgb_alloc_rx_buffers - Replace used receive buffers.
2052 * @param adapter address of board private structure
2056 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
2058 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2059 struct net_device *netdev = adapter->netdev;
2060 struct pci_dev *pdev = adapter->pdev;
2061 struct ixgb_rx_desc *rx_desc;
2062 struct sk_buff *skb;
2065 int num_group_tail_writes;
2070 i = rx_ring->next_to_use;
2071 cleancount = IXGB_DESC_UNUSED(rx_ring);
2073 /* lessen this to 4 if we're
2074 * in the midst of raidc and rbd is occuring
2075 * because we don't want to delay returning buffers when low
2077 num_group_tail_writes = adapter->raidc ? 4 : IXGB_RX_BUFFER_WRITE;
2079 /* leave one descriptor unused */
2080 while (--cleancount > 0) {
2081 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2083 /* allocate a new one */
2084 skb = dev_alloc_skb(adapter->rx_buffer_len + reserve_len);
2086 if (unlikely(!skb)) {
2087 /* better luck next time around */
2088 IXGB_DBG("Could not allocate SKB\n");
2091 /* Make buffer alignment 2 beyond a 16 byte boundary
2092 * this will result in a 16 byte aligned IP header after
2093 * the 14 byte MAC header is removed
2095 skb_reserve(skb, reserve_len);
2099 rx_ring->buffer_info[i].skb = skb;
2100 rx_ring->buffer_info[i].length = adapter->rx_buffer_len;
2101 rx_ring->buffer_info[i].dma =
2102 pci_map_single(pdev, skb->data, adapter->rx_buffer_len,
2103 PCI_DMA_FROMDEVICE);
2105 rx_desc->buff_addr = cpu_to_le64(rx_ring->buffer_info[i].dma);
2107 if (!(i % num_group_tail_writes)) {
2108 /* Force memory writes to complete before letting h/w
2109 * know there are new descriptors to fetch. (Only
2110 * applicable for weak-ordered memory model archs,
2111 * such as IA-64). */
2114 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2116 i = (i + 1) % rx_ring->count;
2119 rx_ring->next_to_use = i;
2123 * ixgb_ioctl - perform a command - e.g: ethtool:get_driver_info.
2124 * @param netdev network interface device structure
2125 * @param ifr data to be used/filled in by the ioctl command
2126 * @param cmd ioctl command to execute
2130 ixgb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2135 return ixgb_ethtool_ioctl(netdev, ifr);
2145 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2147 * @param netdev network interface device structure
2148 * @param grp indicates to enable or disable tagging/stripping
2152 ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2154 struct ixgb_adapter *adapter = netdev->priv;
2157 ixgb_irq_disable(adapter);
2158 adapter->vlgrp = grp;
2161 /* enable VLAN tag insert/strip */
2162 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2163 ctrl |= IXGB_CTRL0_VME;
2164 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2166 /* enable VLAN receive filtering */
2167 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2168 rctl |= IXGB_RCTL_VFE;
2169 rctl &= ~IXGB_RCTL_CFIEN;
2170 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2172 /* disable VLAN tag insert/strip */
2173 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2174 ctrl &= ~IXGB_CTRL0_VME;
2175 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2177 /* disable VLAN filtering */
2178 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2179 rctl &= ~IXGB_RCTL_VFE;
2180 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2183 ixgb_irq_enable(adapter);
2187 * ixgb_vlan_rx_add_vid - adds a vlan id to be tagged/stripped in packet data.
2188 * @param netdev network interface device structure
2189 * @param vid the vlan to be added
2193 ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2195 struct ixgb_adapter *adapter = netdev->priv;
2198 /* add VID to filter table */
2200 index = (vid >> 5) & 0x7F;
2201 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2202 vfta |= (1 << (vid & 0x1F));
2203 ixgb_write_vfta(&adapter->hw, index, vfta);
2207 * ixgb_vlan_rx_kill_vid - removes a vlan id from tag/strip tables.
2208 * @param netdev network interface device structure
2209 * @param vid the vlan to be deleted
2213 ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2215 struct ixgb_adapter *adapter = netdev->priv;
2218 ixgb_irq_disable(adapter);
2221 adapter->vlgrp->vlan_devices[vid] = NULL;
2223 ixgb_irq_enable(adapter);
2225 /* remove VID from filter table */
2227 index = (vid >> 5) & 0x7F;
2228 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2229 vfta &= ~(1 << (vid & 0x1F));
2230 ixgb_write_vfta(&adapter->hw, index, vfta);
2234 * ixgb_restore_vlan - restores vlan settings after adapter reset.
2235 * @param adapter the address of the board private structure
2238 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2240 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2242 if (adapter->vlgrp) {
2244 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2245 if (!adapter->vlgrp->vlan_devices[vid])
2247 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2253 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
2254 * @param adapter board private structure
2255 * @param rx_desc receive descriptor
2256 * @param skb socket buffer with received data
2260 ixgb_rx_checksum(struct ixgb_adapter *adapter,
2261 struct ixgb_rx_desc *rx_desc, struct sk_buff *skb)
2263 /* Ignore Checksum bit is set OR
2264 * TCP Checksum has not been calculated
2266 if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
2267 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
2268 skb->ip_summed = CHECKSUM_NONE;
2272 /* At this point we know the hardware did the TCP checksum
2273 * now look at the TCP checksum error bit
2275 if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
2276 /* let the stack verify checksum errors */
2277 skb->ip_summed = CHECKSUM_NONE;
2278 adapter->hw_csum_rx_error++;
2280 /* TCP checksum is good */
2281 skb->ip_summed = CHECKSUM_UNNECESSARY;
2282 adapter->hw_csum_rx_good++;
2287 * ixgb_write_pci_cfg - write PCI configuration space.
2288 * @param hw board specific data structure
2289 * @param reg PCI configuration space register to write to
2290 * @param value Value to be written to reg
2294 ixgb_write_pci_cfg(struct ixgb_hw *hw, u32 reg, u16 * value)
2296 struct ixgb_adapter *adapter = (struct ixgb_adapter *) hw->back;
2298 pci_write_config_word(adapter->pdev, reg, *value);
2302 * ixgb_notify_reboot - handles OS notification of reboot event.
2303 * @param nb notifier block, unused
2304 * @param event Event being passed to driver to act upon
2305 * @param p A pointer to our net device
2308 ixgb_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2310 struct pci_dev *pdev = NULL;
2316 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
2317 if (pci_dev_driver(pdev) == &ixgb_driver)
2318 ixgb_suspend(pdev, 3);
2325 * ixgb_suspend - driver suspend function called from notify.
2326 * @param pdev pci driver structure used for passing to
2327 * @param state power state to enter
2330 ixgb_suspend(struct pci_dev *pdev, u32 state)
2332 struct net_device *netdev = pci_get_drvdata(pdev);
2333 struct ixgb_adapter *adapter = netdev->priv;
2335 netif_device_detach(netdev);
2337 if (netif_running(netdev))
2338 ixgb_down(adapter, TRUE);
2340 pci_save_state(pdev, adapter->pci_state);
2342 state = (state > 0) ? 3 : 0;
2343 pci_set_power_state(pdev, state);