1 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_82598.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_82598.c
2 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_82598.c 1969-12-31 19:00:00.000000000 -0500
3 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_82598.c 2010-08-25 17:56:26.000000000 -0400
5 +/*******************************************************************************
7 + Intel 10 Gigabit PCI Express Linux driver
8 + Copyright(c) 1999 - 2010 Intel Corporation.
10 + This program is free software; you can redistribute it and/or modify it
11 + under the terms and conditions of the GNU General Public License,
12 + version 2, as published by the Free Software Foundation.
14 + This program is distributed in the hope it will be useful, but WITHOUT
15 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 + You should have received a copy of the GNU General Public License along with
20 + this program; if not, write to the Free Software Foundation, Inc.,
21 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23 + The full GNU General Public License is included in this distribution in
24 + the file called "COPYING".
26 + Contact Information:
27 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
28 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 +*******************************************************************************/
32 +#include "ixgbe_type.h"
33 +#include "ixgbe_api.h"
34 +#include "ixgbe_common.h"
35 +#include "ixgbe_phy.h"
37 +u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
38 +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
39 +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
40 + ixgbe_link_speed *speed,
42 +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
43 +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
44 +static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
45 + bool autoneg_wait_to_complete);
46 +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
47 + ixgbe_link_speed *speed, bool *link_up,
48 + bool link_up_wait_to_complete);
49 +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
50 + ixgbe_link_speed speed,
52 + bool autoneg_wait_to_complete);
53 +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
54 + ixgbe_link_speed speed,
56 + bool autoneg_wait_to_complete);
57 +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
58 +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
59 +void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
60 +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
61 +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
62 +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
63 + u32 vind, bool vlan_on);
64 +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
65 +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
66 +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
67 +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
69 +u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
70 +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
71 +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
72 +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
73 +static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw);
76 + * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
77 + * @hw: pointer to the HW structure
79 + * The defaults for 82598 should be in the range of 50us to 50ms,
80 + * however the hardware default for these parts is 500us to 1ms which is less
81 + * than the 10ms recommended by the pci-e spec. To address this we need to
82 + * increase the value to either 10ms to 250ms for capability version 1 config,
83 + * or 16ms to 55ms for version 2.
85 +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
87 + u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
90 + /* only take action if timeout value is defaulted to 0 */
91 + if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
95 + * if capababilities version is type 1 we can write the
96 + * timeout of 10ms to 250ms through the GCR register
98 + if (!(gcr & IXGBE_GCR_CAP_VER2)) {
99 + gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
104 + * for version 2 capabilities we need to write the config space
105 + * directly in order to set the completion timeout value for
108 + pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
109 + pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
110 + IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
112 + /* disable completion timeout resend */
113 + gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
114 + IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
118 + * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
119 + * @hw: pointer to hardware structure
121 + * Read PCIe configuration space, and get the MSI-X vector count from
122 + * the capabilities table.
124 +u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
126 + u32 msix_count = 18;
128 + if (hw->mac.msix_vectors_from_pcie) {
129 + msix_count = IXGBE_READ_PCIE_WORD(hw,
130 + IXGBE_PCIE_MSIX_82598_CAPS);
131 + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
133 + /* MSI-X count is zero-based in HW, so increment to give
141 + * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
142 + * @hw: pointer to hardware structure
144 + * Initialize the function pointers and assign the MAC type for 82598.
145 + * Does not touch the hardware.
147 +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
149 + struct ixgbe_mac_info *mac = &hw->mac;
150 + struct ixgbe_phy_info *phy = &hw->phy;
153 + ret_val = ixgbe_init_phy_ops_generic(hw);
154 + ret_val = ixgbe_init_ops_generic(hw);
157 + phy->ops.init = &ixgbe_init_phy_ops_82598;
160 + mac->ops.start_hw = &ixgbe_start_hw_82598;
161 + mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
162 + mac->ops.reset_hw = &ixgbe_reset_hw_82598;
163 + mac->ops.get_media_type = &ixgbe_get_media_type_82598;
164 + mac->ops.get_supported_physical_layer =
165 + &ixgbe_get_supported_physical_layer_82598;
166 + mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
167 + mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
168 + mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
170 + /* RAR, Multicast, VLAN */
171 + mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
172 + mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
173 + mac->ops.set_vfta = &ixgbe_set_vfta_82598;
174 + mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
177 + mac->ops.fc_enable = &ixgbe_fc_enable_82598;
179 + mac->mcft_size = 128;
180 + mac->vft_size = 128;
181 + mac->num_rar_entries = 16;
182 + mac->rx_pb_size = 512;
183 + mac->max_tx_queues = 32;
184 + mac->max_rx_queues = 64;
185 + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
188 + phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
191 + mac->ops.check_link = &ixgbe_check_mac_link_82598;
192 + mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
193 + mac->ops.flap_tx_laser = NULL;
194 + mac->ops.get_link_capabilities =
195 + &ixgbe_get_link_capabilities_82598;
201 + * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
202 + * @hw: pointer to hardware structure
204 + * Initialize any function pointers that were not able to be
205 + * set during init_shared_code because the PHY/SFP type was
206 + * not known. Perform the SFP init if necessary.
209 +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
211 + struct ixgbe_mac_info *mac = &hw->mac;
212 + struct ixgbe_phy_info *phy = &hw->phy;
214 + u16 list_offset, data_offset;
216 + /* Identify the PHY */
217 + phy->ops.identify(hw);
219 + /* Overwrite the link function pointers if copper PHY */
220 + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
221 + mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
222 + mac->ops.get_link_capabilities =
223 + &ixgbe_get_copper_link_capabilities_generic;
226 + switch (hw->phy.type) {
228 + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
229 + phy->ops.check_link = &ixgbe_check_phy_link_tnx;
230 + phy->ops.get_firmware_version =
231 + &ixgbe_get_phy_firmware_version_tnx;
234 + phy->ops.get_firmware_version =
235 + &ixgbe_get_phy_firmware_version_generic;
238 + phy->ops.reset = &ixgbe_reset_phy_nl;
240 + /* Call SFP+ identify routine to get the SFP+ module type */
241 + ret_val = phy->ops.identify_sfp(hw);
244 + else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
245 + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
249 + /* Check to see if SFP+ module is supported */
250 + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
253 + if (ret_val != 0) {
254 + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
267 + * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
268 + * @hw: pointer to hardware structure
270 + * Starts the hardware using the generic start_hw function.
271 + * Disables relaxed ordering Then set pcie completion timeout
274 +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
280 + ret_val = ixgbe_start_hw_generic(hw);
282 + /* Disable relaxed ordering */
283 + for (i = 0; ((i < hw->mac.max_tx_queues) &&
284 + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
285 + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
286 + regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
287 + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
290 + for (i = 0; ((i < hw->mac.max_rx_queues) &&
291 + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
292 + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
293 + regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
294 + IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
295 + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
298 + /* set the completion timeout for interface */
300 + ixgbe_set_pcie_completion_timeout(hw);
306 + * ixgbe_get_link_capabilities_82598 - Determines link capabilities
307 + * @hw: pointer to hardware structure
308 + * @speed: pointer to link speed
309 + * @autoneg: boolean auto-negotiation value
311 + * Determines the link capabilities by reading the AUTOC register.
313 +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
314 + ixgbe_link_speed *speed,
321 + * Determine link capabilities based on the stored value of AUTOC,
322 + * which represents EEPROM defaults. If AUTOC value has not been
323 + * stored, use the current register value.
325 + if (hw->mac.orig_link_settings_stored)
326 + autoc = hw->mac.orig_autoc;
328 + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
330 + switch (autoc & IXGBE_AUTOC_LMS_MASK) {
331 + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
332 + *speed = IXGBE_LINK_SPEED_1GB_FULL;
336 + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
337 + *speed = IXGBE_LINK_SPEED_10GB_FULL;
341 + case IXGBE_AUTOC_LMS_1G_AN:
342 + *speed = IXGBE_LINK_SPEED_1GB_FULL;
346 + case IXGBE_AUTOC_LMS_KX4_AN:
347 + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
348 + *speed = IXGBE_LINK_SPEED_UNKNOWN;
349 + if (autoc & IXGBE_AUTOC_KX4_SUPP)
350 + *speed |= IXGBE_LINK_SPEED_10GB_FULL;
351 + if (autoc & IXGBE_AUTOC_KX_SUPP)
352 + *speed |= IXGBE_LINK_SPEED_1GB_FULL;
357 + status = IXGBE_ERR_LINK_SETUP;
365 + * ixgbe_get_media_type_82598 - Determines media type
366 + * @hw: pointer to hardware structure
368 + * Returns the media type (fiber, copper, backplane)
370 +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
372 + enum ixgbe_media_type media_type;
374 + /* Detect if there is a copper PHY attached. */
375 + if (hw->phy.type == ixgbe_phy_cu_unknown ||
376 + hw->phy.type == ixgbe_phy_tn ||
377 + hw->phy.type == ixgbe_phy_aq) {
378 + media_type = ixgbe_media_type_copper;
382 + /* Media type for I82598 is based on device ID */
383 + switch (hw->device_id) {
384 + case IXGBE_DEV_ID_82598:
385 + case IXGBE_DEV_ID_82598_BX:
386 + /* Default device ID is mezzanine card KX/KX4 */
387 + media_type = ixgbe_media_type_backplane;
389 + case IXGBE_DEV_ID_82598AF_DUAL_PORT:
390 + case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
391 + case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
392 + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
393 + case IXGBE_DEV_ID_82598EB_XF_LR:
394 + case IXGBE_DEV_ID_82598EB_SFP_LOM:
395 + media_type = ixgbe_media_type_fiber;
397 + case IXGBE_DEV_ID_82598EB_CX4:
398 + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
399 + media_type = ixgbe_media_type_cx4;
401 + case IXGBE_DEV_ID_82598AT:
402 + case IXGBE_DEV_ID_82598AT2:
403 + media_type = ixgbe_media_type_copper;
406 + media_type = ixgbe_media_type_unknown;
414 + * ixgbe_fc_enable_82598 - Enable flow control
415 + * @hw: pointer to hardware structure
416 + * @packetbuf_num: packet buffer number (0-7)
418 + * Enable flow control according to the current settings.
420 +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
426 + u32 link_speed = 0;
430 + if (hw->fc.requested_mode == ixgbe_fc_pfc)
433 +#endif /* CONFIG_DCB */
435 + * On 82598 having Rx FC on causes resets while doing 1G
436 + * so if it's on turn it off once we know link_speed. For
437 + * more details see 82598 Specification update.
439 + hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
440 + if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
441 + switch (hw->fc.requested_mode) {
442 + case ixgbe_fc_full:
443 + hw->fc.requested_mode = ixgbe_fc_tx_pause;
445 + case ixgbe_fc_rx_pause:
446 + hw->fc.requested_mode = ixgbe_fc_none;
454 + /* Negotiate the fc mode to use */
455 + ret_val = ixgbe_fc_autoneg(hw);
456 + if (ret_val == IXGBE_ERR_FLOW_CONTROL)
459 + /* Disable any previous flow control settings */
460 + fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
461 + fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
463 + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
464 + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
467 + * The possible values of fc.current_mode are:
468 + * 0: Flow control is completely disabled
469 + * 1: Rx flow control is enabled (we can receive pause frames,
470 + * but not send pause frames).
471 + * 2: Tx flow control is enabled (we can send pause frames but
472 + * we do not support receiving pause frames).
473 + * 3: Both Rx and Tx flow control (symmetric) are enabled.
475 + * 4: Priority Flow Control is enabled.
479 + switch (hw->fc.current_mode) {
480 + case ixgbe_fc_none:
481 + /* Flow control is disabled by software override or autoneg.
482 + * The code below will actually disable it in the HW.
485 + case ixgbe_fc_rx_pause:
487 + * Rx Flow control is enabled and Tx Flow control is
488 + * disabled by software override. Since there really
489 + * isn't a way to advertise that we are capable of RX
490 + * Pause ONLY, we will advertise that we support both
491 + * symmetric and asymmetric Rx PAUSE. Later, we will
492 + * disable the adapter's ability to send PAUSE frames.
494 + fctrl_reg |= IXGBE_FCTRL_RFCE;
496 + case ixgbe_fc_tx_pause:
498 + * Tx Flow control is enabled, and Rx Flow control is
499 + * disabled by software override.
501 + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
503 + case ixgbe_fc_full:
504 + /* Flow control (both Rx and Tx) is enabled by SW override. */
505 + fctrl_reg |= IXGBE_FCTRL_RFCE;
506 + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
512 +#endif /* CONFIG_DCB */
514 + hw_dbg(hw, "Flow control param set incorrectly\n");
515 + ret_val = IXGBE_ERR_CONFIG;
520 + /* Set 802.3x based flow control settings. */
521 + fctrl_reg |= IXGBE_FCTRL_DPF;
522 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
523 + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
525 + /* Set up and enable Rx high/low water mark thresholds, enable XON. */
526 + if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
527 + if (hw->fc.send_xon) {
528 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
529 + (hw->fc.low_water | IXGBE_FCRTL_XONE));
531 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
535 + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
536 + (hw->fc.high_water | IXGBE_FCRTH_FCEN));
539 + /* Configure pause time (2 TCs per register) */
540 + reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
541 + if ((packetbuf_num & 1) == 0)
542 + reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
544 + reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
545 + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
547 + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
554 + * ixgbe_start_mac_link_82598 - Configures MAC link settings
555 + * @hw: pointer to hardware structure
557 + * Configures link settings based on values in the ixgbe_hw struct.
558 + * Restarts the link. Performs autonegotiation if needed.
560 +static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
561 + bool autoneg_wait_to_complete)
569 + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
570 + autoc_reg |= IXGBE_AUTOC_AN_RESTART;
571 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
573 + /* Only poll for autoneg to complete if specified to do so */
574 + if (autoneg_wait_to_complete) {
575 + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
576 + IXGBE_AUTOC_LMS_KX4_AN ||
577 + (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
578 + IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
579 + links_reg = 0; /* Just in case Autoneg time = 0 */
580 + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
581 + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
582 + if (links_reg & IXGBE_LINKS_KX_AN_COMP)
586 + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
587 + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
588 + hw_dbg(hw, "Autonegotiation did not complete.\n");
593 + /* Add delay to filter out noises during initial link setup */
600 + * ixgbe_check_mac_link_82598 - Get link/speed status
601 + * @hw: pointer to hardware structure
602 + * @speed: pointer to link speed
603 + * @link_up: true is link is up, false otherwise
604 + * @link_up_wait_to_complete: bool used to wait for link up or not
606 + * Reads the links register to determine if link is up and the current speed
608 +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
609 + ixgbe_link_speed *speed, bool *link_up,
610 + bool link_up_wait_to_complete)
614 + u16 link_reg, adapt_comp_reg;
617 + * SERDES PHY requires us to read link status from undocumented
618 + * register 0xC79F. Bit 0 set indicates link is up/ready; clear
619 + * indicates link down. OxC00C is read to check that the XAUI lanes
620 + * are active. Bit 0 clear indicates active; set indicates inactive.
622 + if (hw->phy.type == ixgbe_phy_nl) {
623 + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
624 + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
625 + hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
627 + if (link_up_wait_to_complete) {
628 + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
629 + if ((link_reg & 1) &&
630 + ((adapt_comp_reg & 1) == 0)) {
637 + hw->phy.ops.read_reg(hw, 0xC79F,
640 + hw->phy.ops.read_reg(hw, 0xC00C,
645 + if ((link_reg & 1) &&
646 + ((adapt_comp_reg & 1) == 0))
652 + if (*link_up == false)
656 + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
657 + if (link_up_wait_to_complete) {
658 + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
659 + if (links_reg & IXGBE_LINKS_UP) {
666 + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
669 + if (links_reg & IXGBE_LINKS_UP)
675 + if (links_reg & IXGBE_LINKS_SPEED)
676 + *speed = IXGBE_LINK_SPEED_10GB_FULL;
678 + *speed = IXGBE_LINK_SPEED_1GB_FULL;
680 + if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
681 + (ixgbe_validate_link_ready(hw) != 0))
684 + /* if link is down, zero out the current_mode */
685 + if (*link_up == false) {
686 + hw->fc.current_mode = ixgbe_fc_none;
687 + hw->fc.fc_was_autonegged = false;
695 + * ixgbe_setup_mac_link_82598 - Set MAC link speed
696 + * @hw: pointer to hardware structure
697 + * @speed: new link speed
698 + * @autoneg: true if autonegotiation enabled
699 + * @autoneg_wait_to_complete: true when waiting for completion is needed
701 + * Set the link speed in the AUTOC register and restarts link.
703 +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
704 + ixgbe_link_speed speed, bool autoneg,
705 + bool autoneg_wait_to_complete)
708 + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
709 + u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
710 + u32 autoc = curr_autoc;
711 + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
713 + /* Check to see if speed passed in is supported. */
714 + ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
715 + speed &= link_capabilities;
717 + if (speed == IXGBE_LINK_SPEED_UNKNOWN)
718 + status = IXGBE_ERR_LINK_SETUP;
720 + /* Set KX4/KX support according to speed requested */
721 + else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
722 + link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
723 + autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
724 + if (speed & IXGBE_LINK_SPEED_10GB_FULL)
725 + autoc |= IXGBE_AUTOC_KX4_SUPP;
726 + if (speed & IXGBE_LINK_SPEED_1GB_FULL)
727 + autoc |= IXGBE_AUTOC_KX_SUPP;
728 + if (autoc != curr_autoc)
729 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
734 + * Setup and restart the link based on the new values in
735 + * ixgbe_hw This will write the AUTOC register based on the new
738 + status = ixgbe_start_mac_link_82598(hw,
739 + autoneg_wait_to_complete);
747 + * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
748 + * @hw: pointer to hardware structure
749 + * @speed: new link speed
750 + * @autoneg: true if autonegotiation enabled
751 + * @autoneg_wait_to_complete: true if waiting is needed to complete
753 + * Sets the link speed in the AUTOC register in the MAC and restarts link.
755 +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
756 + ixgbe_link_speed speed,
758 + bool autoneg_wait_to_complete)
762 + /* Setup the PHY according to input speed */
763 + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
764 + autoneg_wait_to_complete);
766 + ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
772 + * ixgbe_reset_hw_82598 - Performs hardware reset
773 + * @hw: pointer to hardware structure
775 + * Resets the hardware by resetting the transmit and receive units, masks and
776 + * clears all interrupts, performing a PHY reset, and performing a link (MAC)
779 +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
782 + s32 phy_status = 0;
789 + /* Call adapter stop to disable tx/rx and clear interrupts */
790 + hw->mac.ops.stop_adapter(hw);
793 + * Power up the Atlas Tx lanes if they are currently powered down.
794 + * Atlas Tx lanes are powered down for MAC loopback tests, but
795 + * they are not automatically restored on reset.
797 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
798 + if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
799 + /* Enable Tx Atlas so packets can be transmitted again */
800 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
802 + analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
803 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
806 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
808 + analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
809 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
812 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
814 + analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
815 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
818 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
820 + analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
821 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
826 + if (hw->phy.reset_disable == false) {
827 + /* PHY ops must be identified and initialized prior to reset */
829 + /* Init PHY and function pointers, perform SFP setup */
830 + phy_status = hw->phy.ops.init(hw);
831 + if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
833 + else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
836 + hw->phy.ops.reset(hw);
841 + * Prevent the PCI-E bus from from hanging by disabling PCI-E master
842 + * access and verify no pending requests before reset
844 + ixgbe_disable_pcie_master(hw);
848 + * Issue global reset to the MAC. This needs to be a SW reset.
849 + * If link reset is used, it might reset the MAC when mng is using it
851 + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
852 + IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
853 + IXGBE_WRITE_FLUSH(hw);
855 + /* Poll for reset bit to self-clear indicating reset is complete */
856 + for (i = 0; i < 10; i++) {
858 + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
859 + if (!(ctrl & IXGBE_CTRL_RST))
862 + if (ctrl & IXGBE_CTRL_RST) {
863 + status = IXGBE_ERR_RESET_FAILED;
864 + hw_dbg(hw, "Reset polling failed to complete.\n");
868 + * Double resets are required for recovery from certain error
869 + * conditions. Between resets, it is necessary to stall to allow time
870 + * for any pending HW events to complete. We use 1usec since that is
871 + * what is needed for ixgbe_disable_pcie_master(). The second reset
872 + * then clears out any effects of those events.
874 + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
875 + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
877 + goto mac_reset_top;
882 + gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
883 + gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
884 + IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
887 + * Store the original AUTOC value if it has not been
888 + * stored off yet. Otherwise restore the stored original
889 + * AUTOC value since the reset operation sets back to deaults.
891 + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
892 + if (hw->mac.orig_link_settings_stored == false) {
893 + hw->mac.orig_autoc = autoc;
894 + hw->mac.orig_link_settings_stored = true;
895 + } else if (autoc != hw->mac.orig_autoc)
896 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
898 + /* Store the permanent mac address */
899 + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
902 + * Store MAC address from RAR0, clear receive address registers, and
903 + * clear the multicast table
905 + hw->mac.ops.init_rx_addrs(hw);
910 + if (phy_status != 0)
911 + status = phy_status;
916 + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
917 + * @hw: pointer to hardware struct
918 + * @rar: receive address register index to associate with a VMDq index
919 + * @vmdq: VMDq set index
921 +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
925 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
926 + rar_high &= ~IXGBE_RAH_VIND_MASK;
927 + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
928 + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
933 + * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
934 + * @hw: pointer to hardware struct
935 + * @rar: receive address register index to associate with a VMDq index
936 + * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
938 +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
941 + u32 rar_entries = hw->mac.num_rar_entries;
944 + if (rar < rar_entries) {
945 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
946 + if (rar_high & IXGBE_RAH_VIND_MASK) {
947 + rar_high &= ~IXGBE_RAH_VIND_MASK;
948 + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
951 + hw_dbg(hw, "RAR index %d is out of range.\n", rar);
958 + * ixgbe_set_vfta_82598 - Set VLAN filter table
959 + * @hw: pointer to hardware structure
960 + * @vlan: VLAN id to write to VLAN filter
961 + * @vind: VMDq output index that maps queue to VLAN id in VFTA
962 + * @vlan_on: boolean flag to turn on/off VLAN in VFTA
964 + * Turn on/off specified VLAN in the VLAN filter table.
966 +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
975 + return IXGBE_ERR_PARAM;
977 + /* Determine 32-bit word position in array */
978 + regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
980 + /* Determine the location of the (VMD) queue index */
981 + vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
982 + bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
984 + /* Set the nibble for VMD queue index */
985 + bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
986 + bits &= (~(0x0F << bitindex));
987 + bits |= (vind << bitindex);
988 + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
990 + /* Determine the location of the bit for this VLAN id */
991 + bitindex = vlan & 0x1F; /* lower five bits */
993 + bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
995 + /* Turn on this VLAN id */
996 + bits |= (1 << bitindex);
998 + /* Turn off this VLAN id */
999 + bits &= ~(1 << bitindex);
1000 + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1006 + * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1007 + * @hw: pointer to hardware structure
1009 + * Clears the VLAN filer table, and the VMDq index associated with the filter
1011 +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1016 + for (offset = 0; offset < hw->mac.vft_size; offset++)
1017 + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1019 + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1020 + for (offset = 0; offset < hw->mac.vft_size; offset++)
1021 + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1028 + * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1029 + * @hw: pointer to hardware structure
1030 + * @reg: analog register to read
1031 + * @val: read value
1033 + * Performs read operation to Atlas analog register specified.
1035 +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1039 + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1040 + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1041 + IXGBE_WRITE_FLUSH(hw);
1043 + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1044 + *val = (u8)atlas_ctl;
1050 + * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1051 + * @hw: pointer to hardware structure
1052 + * @reg: atlas register to write
1053 + * @val: value to write
1055 + * Performs write operation to Atlas analog register specified.
1057 +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1061 + atlas_ctl = (reg << 8) | val;
1062 + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1063 + IXGBE_WRITE_FLUSH(hw);
1070 + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1071 + * @hw: pointer to hardware structure
1072 + * @byte_offset: EEPROM byte offset to read
1073 + * @eeprom_data: value read
1075 + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1077 +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1086 + if (hw->phy.type == ixgbe_phy_nl) {
1088 + * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1089 + * 0xC30D. These registers are used to talk to the SFP+
1090 + * module's EEPROM through the SDA/SCL (I2C) interface.
1092 + sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1093 + sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1094 + hw->phy.ops.write_reg(hw,
1095 + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1096 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1100 + for (i = 0; i < 100; i++) {
1101 + hw->phy.ops.read_reg(hw,
1102 + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1103 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1105 + sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1106 + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1111 + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1112 + hw_dbg(hw, "EEPROM read did not pass.\n");
1113 + status = IXGBE_ERR_SFP_NOT_PRESENT;
1118 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1119 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1121 + *eeprom_data = (u8)(sfp_data >> 8);
1123 + status = IXGBE_ERR_PHY;
1132 + * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1133 + * @hw: pointer to hardware structure
1135 + * Determines physical layer capabilities of the current configuration.
1137 +u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1139 + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1140 + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1141 + u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1142 + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1143 + u16 ext_ability = 0;
1145 + hw->phy.ops.identify(hw);
1147 + /* Copper PHY must be checked before AUTOC LMS to determine correct
1148 + * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1149 + if (hw->phy.type == ixgbe_phy_tn ||
1150 + hw->phy.type == ixgbe_phy_cu_unknown) {
1151 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1152 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1153 + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1154 + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1155 + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1156 + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1157 + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1158 + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1162 + switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1163 + case IXGBE_AUTOC_LMS_1G_AN:
1164 + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1165 + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1166 + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1168 + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1170 + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1171 + if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1172 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1173 + else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1174 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1176 + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1178 + case IXGBE_AUTOC_LMS_KX4_AN:
1179 + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1180 + if (autoc & IXGBE_AUTOC_KX_SUPP)
1181 + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1182 + if (autoc & IXGBE_AUTOC_KX4_SUPP)
1183 + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1189 + if (hw->phy.type == ixgbe_phy_nl) {
1190 + hw->phy.ops.identify_sfp(hw);
1192 + switch (hw->phy.sfp_type) {
1193 + case ixgbe_sfp_type_da_cu:
1194 + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1196 + case ixgbe_sfp_type_sr:
1197 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1199 + case ixgbe_sfp_type_lr:
1200 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1203 + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1208 + switch (hw->device_id) {
1209 + case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1210 + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1212 + case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1213 + case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1214 + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1215 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1217 + case IXGBE_DEV_ID_82598EB_XF_LR:
1218 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1225 + return physical_layer;
1229 + * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1231 + * @hw: pointer to the HW structure
1233 + * Calls common function and corrects issue with some single port devices
1234 + * that enable LAN1 but not LAN0.
1236 +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1238 + struct ixgbe_bus_info *bus = &hw->bus;
1239 + u16 pci_gen, pci_ctrl2;
1241 + ixgbe_set_lan_id_multi_port_pcie(hw);
1243 + /* check if LAN0 is disabled */
1244 + hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1245 + if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1247 + hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1249 + /* if LAN0 is completely disabled force function to 0 */
1250 + if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1251 + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1252 + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1260 + * ixgbe_validate_link_ready - Function looks for phy link
1261 + * @hw: pointer to hardware structure
1263 + * Function indicates success when phy link is available. If phy is not ready
1264 + * within 5 seconds of MAC indicating link, the function returns error.
1266 +static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
1271 + if (hw->device_id != IXGBE_DEV_ID_82598AT2)
1275 + timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
1276 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
1277 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
1279 + if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
1280 + (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
1286 + if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
1287 + hw_dbg(hw, "Link was indicated but link is down\n");
1288 + return IXGBE_ERR_LINK_SETUP;
1295 + * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1296 + * @hw: pointer to hardware structure
1299 +void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1304 + /* Enable relaxed ordering */
1305 + for (i = 0; ((i < hw->mac.max_tx_queues) &&
1306 + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1307 + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1308 + regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1309 + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1312 + for (i = 0; ((i < hw->mac.max_rx_queues) &&
1313 + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1314 + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1315 + regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1316 + IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1317 + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1321 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_82599.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_82599.c
1322 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_82599.c 1969-12-31 19:00:00.000000000 -0500
1323 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_82599.c 2010-08-25 17:56:26.000000000 -0400
1325 +/*******************************************************************************
1327 + Intel 10 Gigabit PCI Express Linux driver
1328 + Copyright(c) 1999 - 2010 Intel Corporation.
1330 + This program is free software; you can redistribute it and/or modify it
1331 + under the terms and conditions of the GNU General Public License,
1332 + version 2, as published by the Free Software Foundation.
1334 + This program is distributed in the hope it will be useful, but WITHOUT
1335 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1336 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1339 + You should have received a copy of the GNU General Public License along with
1340 + this program; if not, write to the Free Software Foundation, Inc.,
1341 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1343 + The full GNU General Public License is included in this distribution in
1344 + the file called "COPYING".
1346 + Contact Information:
1347 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1348 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
1350 +*******************************************************************************/
1352 +#include "ixgbe_type.h"
1353 +#include "ixgbe_api.h"
1354 +#include "ixgbe_common.h"
1355 +#include "ixgbe_phy.h"
1357 +s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
1358 +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
1359 + ixgbe_link_speed *speed,
1361 +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
1362 +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
1363 +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
1364 +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
1365 +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
1366 + ixgbe_link_speed speed, bool autoneg,
1367 + bool autoneg_wait_to_complete);
1368 +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
1369 + ixgbe_link_speed speed, bool autoneg,
1370 + bool autoneg_wait_to_complete);
1371 +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
1372 + bool autoneg_wait_to_complete);
1373 +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1374 + ixgbe_link_speed speed,
1376 + bool autoneg_wait_to_complete);
1377 +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1378 + ixgbe_link_speed speed,
1380 + bool autoneg_wait_to_complete);
1381 +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
1382 +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
1383 +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
1384 +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
1385 +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
1386 +s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
1387 +void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw);
1388 +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
1389 +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
1390 +u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
1391 +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
1392 +s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps);
1393 +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
1395 +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
1397 + struct ixgbe_mac_info *mac = &hw->mac;
1399 + if (hw->phy.multispeed_fiber) {
1400 + /* Set up dual speed SFP+ support */
1401 + mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
1402 + mac->ops.disable_tx_laser =
1403 + &ixgbe_disable_tx_laser_multispeed_fiber;
1404 + mac->ops.enable_tx_laser =
1405 + &ixgbe_enable_tx_laser_multispeed_fiber;
1406 + mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
1408 + mac->ops.disable_tx_laser = NULL;
1409 + mac->ops.enable_tx_laser = NULL;
1410 + mac->ops.flap_tx_laser = NULL;
1411 + if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
1412 + (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
1413 + hw->phy.smart_speed == ixgbe_smart_speed_on))
1414 + mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
1416 + mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
1421 + * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
1422 + * @hw: pointer to hardware structure
1424 + * Initialize any function pointers that were not able to be
1425 + * set during init_shared_code because the PHY/SFP type was
1426 + * not known. Perform the SFP init if necessary.
1429 +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
1431 + struct ixgbe_mac_info *mac = &hw->mac;
1432 + struct ixgbe_phy_info *phy = &hw->phy;
1435 + /* Identify the PHY or SFP module */
1436 + ret_val = phy->ops.identify(hw);
1437 + if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
1438 + goto init_phy_ops_out;
1440 + /* Setup function pointers based on detected SFP module and speeds */
1441 + ixgbe_init_mac_link_ops_82599(hw);
1442 + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
1443 + hw->phy.ops.reset = NULL;
1445 + /* If copper media, overwrite with copper function pointers */
1446 + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
1447 + mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
1448 + mac->ops.get_link_capabilities =
1449 + &ixgbe_get_copper_link_capabilities_generic;
1452 + /* Set necessary function pointers based on phy type */
1453 + switch (hw->phy.type) {
1454 + case ixgbe_phy_tn:
1455 + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
1456 + phy->ops.check_link = &ixgbe_check_phy_link_tnx;
1457 + phy->ops.get_firmware_version =
1458 + &ixgbe_get_phy_firmware_version_tnx;
1460 + case ixgbe_phy_aq:
1461 + phy->ops.get_firmware_version =
1462 + &ixgbe_get_phy_firmware_version_generic;
1471 +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
1474 + u32 reg_anlp1 = 0;
1476 + u16 list_offset, data_offset, data_value;
1478 + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
1479 + ixgbe_init_mac_link_ops_82599(hw);
1481 + hw->phy.ops.reset = NULL;
1483 + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
1486 + goto setup_sfp_out;
1488 + /* PHY config will finish before releasing the semaphore */
1489 + ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
1490 + if (ret_val != 0) {
1491 + ret_val = IXGBE_ERR_SWFW_SYNC;
1492 + goto setup_sfp_out;
1495 + hw->eeprom.ops.read(hw, ++data_offset, &data_value);
1496 + while (data_value != 0xffff) {
1497 + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
1498 + IXGBE_WRITE_FLUSH(hw);
1499 + hw->eeprom.ops.read(hw, ++data_offset, &data_value);
1502 + /* Release the semaphore */
1503 + ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
1504 + /* Delay obtaining semaphore again to allow FW access */
1505 + msleep(hw->eeprom.semaphore_delay);
1507 + /* Now restart DSP by setting Restart_AN and clearing LMS */
1508 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
1509 + IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
1510 + IXGBE_AUTOC_AN_RESTART));
1512 + /* Wait for AN to leave state 0 */
1513 + for (i = 0; i < 10; i++) {
1515 + reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1516 + if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
1519 + if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
1520 + hw_dbg(hw, "sfp module setup not complete\n");
1521 + ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
1522 + goto setup_sfp_out;
1525 + /* Restart DSP by setting Restart_AN and return to SFI mode */
1526 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
1527 + IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
1528 + IXGBE_AUTOC_AN_RESTART));
1536 + * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
1537 + * @hw: pointer to hardware structure
1539 + * Initialize the function pointers and assign the MAC type for 82599.
1540 + * Does not touch the hardware.
1543 +s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
1545 + struct ixgbe_mac_info *mac = &hw->mac;
1546 + struct ixgbe_phy_info *phy = &hw->phy;
1549 + ret_val = ixgbe_init_phy_ops_generic(hw);
1550 + ret_val = ixgbe_init_ops_generic(hw);
1553 + phy->ops.identify = &ixgbe_identify_phy_82599;
1554 + phy->ops.init = &ixgbe_init_phy_ops_82599;
1557 + mac->ops.reset_hw = &ixgbe_reset_hw_82599;
1558 + mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82599;
1559 + mac->ops.get_media_type = &ixgbe_get_media_type_82599;
1560 + mac->ops.get_supported_physical_layer =
1561 + &ixgbe_get_supported_physical_layer_82599;
1562 + mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
1563 + mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
1564 + mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
1565 + mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
1566 + mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
1567 + mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
1568 + mac->ops.get_device_caps = &ixgbe_get_device_caps_82599;
1569 + mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
1570 + mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
1572 + /* RAR, Multicast, VLAN */
1573 + mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
1574 + mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
1575 + mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
1576 + mac->rar_highwater = 1;
1577 + mac->ops.set_vfta = &ixgbe_set_vfta_generic;
1578 + mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
1579 + mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
1580 + mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
1583 + mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
1584 + mac->ops.check_link = &ixgbe_check_mac_link_generic;
1585 + ixgbe_init_mac_link_ops_82599(hw);
1587 + mac->mcft_size = 128;
1588 + mac->vft_size = 128;
1589 + mac->num_rar_entries = 128;
1590 + mac->rx_pb_size = 512;
1591 + mac->max_tx_queues = 128;
1592 + mac->max_rx_queues = 128;
1593 + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
1595 + hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
1601 + * ixgbe_get_link_capabilities_82599 - Determines link capabilities
1602 + * @hw: pointer to hardware structure
1603 + * @speed: pointer to link speed
1604 + * @negotiation: true when autoneg or autotry is enabled
1606 + * Determines the link capabilities by reading the AUTOC register.
1608 +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
1609 + ixgbe_link_speed *speed,
1610 + bool *negotiation)
1615 + /* Check if 1G SFP module. */
1616 + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1617 + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
1618 + *speed = IXGBE_LINK_SPEED_1GB_FULL;
1619 + *negotiation = true;
1624 + * Determine link capabilities based on the stored value of AUTOC,
1625 + * which represents EEPROM defaults. If AUTOC value has not
1626 + * been stored, use the current register values.
1628 + if (hw->mac.orig_link_settings_stored)
1629 + autoc = hw->mac.orig_autoc;
1631 + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1633 + switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1634 + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1635 + *speed = IXGBE_LINK_SPEED_1GB_FULL;
1636 + *negotiation = false;
1639 + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1640 + *speed = IXGBE_LINK_SPEED_10GB_FULL;
1641 + *negotiation = false;
1644 + case IXGBE_AUTOC_LMS_1G_AN:
1645 + *speed = IXGBE_LINK_SPEED_1GB_FULL;
1646 + *negotiation = true;
1649 + case IXGBE_AUTOC_LMS_10G_SERIAL:
1650 + *speed = IXGBE_LINK_SPEED_10GB_FULL;
1651 + *negotiation = false;
1654 + case IXGBE_AUTOC_LMS_KX4_KX_KR:
1655 + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
1656 + *speed = IXGBE_LINK_SPEED_UNKNOWN;
1657 + if (autoc & IXGBE_AUTOC_KR_SUPP)
1658 + *speed |= IXGBE_LINK_SPEED_10GB_FULL;
1659 + if (autoc & IXGBE_AUTOC_KX4_SUPP)
1660 + *speed |= IXGBE_LINK_SPEED_10GB_FULL;
1661 + if (autoc & IXGBE_AUTOC_KX_SUPP)
1662 + *speed |= IXGBE_LINK_SPEED_1GB_FULL;
1663 + *negotiation = true;
1666 + case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
1667 + *speed = IXGBE_LINK_SPEED_100_FULL;
1668 + if (autoc & IXGBE_AUTOC_KR_SUPP)
1669 + *speed |= IXGBE_LINK_SPEED_10GB_FULL;
1670 + if (autoc & IXGBE_AUTOC_KX4_SUPP)
1671 + *speed |= IXGBE_LINK_SPEED_10GB_FULL;
1672 + if (autoc & IXGBE_AUTOC_KX_SUPP)
1673 + *speed |= IXGBE_LINK_SPEED_1GB_FULL;
1674 + *negotiation = true;
1677 + case IXGBE_AUTOC_LMS_SGMII_1G_100M:
1678 + *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
1679 + *negotiation = false;
1683 + status = IXGBE_ERR_LINK_SETUP;
1688 + if (hw->phy.multispeed_fiber) {
1689 + *speed |= IXGBE_LINK_SPEED_10GB_FULL |
1690 + IXGBE_LINK_SPEED_1GB_FULL;
1691 + *negotiation = true;
1699 + * ixgbe_get_media_type_82599 - Get media type
1700 + * @hw: pointer to hardware structure
1702 + * Returns the media type (fiber, copper, backplane)
1704 +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
1706 + enum ixgbe_media_type media_type;
1708 + /* Detect if there is a copper PHY attached. */
1709 + if (hw->phy.type == ixgbe_phy_cu_unknown ||
1710 + hw->phy.type == ixgbe_phy_tn ||
1711 + hw->phy.type == ixgbe_phy_aq) {
1712 + media_type = ixgbe_media_type_copper;
1716 + switch (hw->device_id) {
1717 + case IXGBE_DEV_ID_82599_KX4:
1718 + case IXGBE_DEV_ID_82599_KX4_MEZZ:
1719 + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1720 + case IXGBE_DEV_ID_82599_KR:
1721 + case IXGBE_DEV_ID_82599_XAUI_LOM:
1722 + /* Default device ID is mezzanine card KX/KX4 */
1723 + media_type = ixgbe_media_type_backplane;
1725 + case IXGBE_DEV_ID_82599_SFP:
1726 + case IXGBE_DEV_ID_82599_SFP_EM:
1727 + media_type = ixgbe_media_type_fiber;
1729 + case IXGBE_DEV_ID_82599_CX4:
1730 + media_type = ixgbe_media_type_cx4;
1732 + case IXGBE_DEV_ID_82599_T3_LOM:
1733 + media_type = ixgbe_media_type_copper;
1736 + media_type = ixgbe_media_type_unknown;
1740 + return media_type;
1744 + * ixgbe_start_mac_link_82599 - Setup MAC link settings
1745 + * @hw: pointer to hardware structure
1747 + * Configures link settings based on values in the ixgbe_hw struct.
1748 + * Restarts the link. Performs autonegotiation if needed.
1750 +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
1751 + bool autoneg_wait_to_complete)
1758 + /* Restart link */
1759 + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1760 + autoc_reg |= IXGBE_AUTOC_AN_RESTART;
1761 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
1763 + /* Only poll for autoneg to complete if specified to do so */
1764 + if (autoneg_wait_to_complete) {
1765 + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
1766 + IXGBE_AUTOC_LMS_KX4_KX_KR ||
1767 + (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
1768 + IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN
1769 + || (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
1770 + IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1771 + links_reg = 0; /* Just in case Autoneg time = 0 */
1772 + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
1773 + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
1774 + if (links_reg & IXGBE_LINKS_KX_AN_COMP)
1778 + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1779 + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1780 + hw_dbg(hw, "Autoneg did not complete.\n");
1785 + /* Add delay to filter out noises during initial link setup */
1792 + * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
1793 + * @hw: pointer to hardware structure
1795 + * The base drivers may require better control over SFP+ module
1796 + * PHY states. This includes selectively shutting down the Tx
1797 + * laser on the PHY, effectively halting physical link.
1799 +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
1801 + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
1803 + /* Disable tx laser; allow 100us to go dark per spec */
1804 + esdp_reg |= IXGBE_ESDP_SDP3;
1805 + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
1806 + IXGBE_WRITE_FLUSH(hw);
1811 + * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
1812 + * @hw: pointer to hardware structure
1814 + * The base drivers may require better control over SFP+ module
1815 + * PHY states. This includes selectively turning on the Tx
1816 + * laser on the PHY, effectively starting physical link.
1818 +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
1820 + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
1822 + /* Enable tx laser; allow 100ms to light up */
1823 + esdp_reg &= ~IXGBE_ESDP_SDP3;
1824 + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
1825 + IXGBE_WRITE_FLUSH(hw);
1830 + * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
1831 + * @hw: pointer to hardware structure
1833 + * When the driver changes the link speeds that it can support,
1834 + * it sets autotry_restart to true to indicate that we need to
1835 + * initiate a new autotry session with the link partner. To do
1836 + * so, we set the speed then disable and re-enable the tx laser, to
1837 + * alert the link partner that it also needs to restart autotry on its
1838 + * end. This is consistent with true clause 37 autoneg, which also
1839 + * involves a loss of signal.
1841 +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
1843 + if (hw->mac.autotry_restart) {
1844 + ixgbe_disable_tx_laser_multispeed_fiber(hw);
1845 + ixgbe_enable_tx_laser_multispeed_fiber(hw);
1846 + hw->mac.autotry_restart = false;
1851 + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
1852 + * @hw: pointer to hardware structure
1853 + * @speed: new link speed
1854 + * @autoneg: true if autonegotiation enabled
1855 + * @autoneg_wait_to_complete: true when waiting for completion is needed
1857 + * Set the link speed in the AUTOC register and restarts link.
1859 +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
1860 + ixgbe_link_speed speed, bool autoneg,
1861 + bool autoneg_wait_to_complete)
1864 + ixgbe_link_speed link_speed;
1865 + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
1867 + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
1869 + bool link_up = false;
1872 + /* Mask off requested but non-supported speeds */
1873 + status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
1877 + speed &= link_speed;
1880 + * Try each speed one by one, highest priority first. We do this in
1881 + * software because 10gb fiber doesn't support speed autonegotiation.
1883 + if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1885 + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
1887 + /* If we already have link at this speed, just jump out */
1888 + status = ixgbe_check_link(hw, &link_speed, &link_up, false);
1892 + if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
1895 + /* Set the module link speed */
1896 + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
1897 + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
1898 + IXGBE_WRITE_FLUSH(hw);
1900 + /* Allow module to change analog characteristics (1G->10G) */
1903 + status = ixgbe_setup_mac_link_82599(
1904 + hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg,
1905 + autoneg_wait_to_complete);
1909 + /* Flap the tx laser if it has not already been done */
1910 + ixgbe_flap_tx_laser(hw);
1913 + * Wait for the controller to acquire link. Per IEEE 802.3ap,
1914 + * Section 73.10.2, we may have to wait up to 500ms if KR is
1915 + * attempted. 82599 uses the same timing for 10g SFI.
1917 + for (i = 0; i < 5; i++) {
1918 + /* Wait for the link partner to also set speed */
1921 + /* If we have link, just jump out */
1922 + status = ixgbe_check_link(hw, &link_speed,
1932 + if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
1934 + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
1935 + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
1937 + /* If we already have link at this speed, just jump out */
1938 + status = ixgbe_check_link(hw, &link_speed, &link_up, false);
1942 + if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
1945 + /* Set the module link speed */
1946 + esdp_reg &= ~IXGBE_ESDP_SDP5;
1947 + esdp_reg |= IXGBE_ESDP_SDP5_DIR;
1948 + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
1949 + IXGBE_WRITE_FLUSH(hw);
1951 + /* Allow module to change analog characteristics (10G->1G) */
1954 + status = ixgbe_setup_mac_link_82599(
1955 + hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
1956 + autoneg_wait_to_complete);
1960 + /* Flap the tx laser if it has not already been done */
1961 + ixgbe_flap_tx_laser(hw);
1963 + /* Wait for the link partner to also set speed */
1966 + /* If we have link, just jump out */
1967 + status = ixgbe_check_link(hw, &link_speed, &link_up, false);
1976 + * We didn't get link. Configure back to the highest speed we tried,
1977 + * (if there was more than one). We call ourselves back with just the
1978 + * single highest speed that the user requested.
1981 + status = ixgbe_setup_mac_link_multispeed_fiber(hw,
1982 + highest_link_speed, autoneg, autoneg_wait_to_complete);
1985 + /* Set autoneg_advertised value based on input link speed */
1986 + hw->phy.autoneg_advertised = 0;
1988 + if (speed & IXGBE_LINK_SPEED_10GB_FULL)
1989 + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
1991 + if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1992 + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
1998 + * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
1999 + * @hw: pointer to hardware structure
2000 + * @speed: new link speed
2001 + * @autoneg: true if autonegotiation enabled
2002 + * @autoneg_wait_to_complete: true when waiting for completion is needed
2004 + * Implements the Intel SmartSpeed algorithm.
2006 +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
2007 + ixgbe_link_speed speed, bool autoneg,
2008 + bool autoneg_wait_to_complete)
2011 + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
2013 + bool link_up = false;
2014 + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2016 + /* Set autoneg_advertised value based on input link speed */
2017 + hw->phy.autoneg_advertised = 0;
2019 + if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2020 + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
2022 + if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2023 + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
2025 + if (speed & IXGBE_LINK_SPEED_100_FULL)
2026 + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
2029 + * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
2030 + * autoneg advertisement if link is unable to be established at the
2031 + * highest negotiated rate. This can sometimes happen due to integrity
2032 + * issues with the physical media connection.
2035 + /* First, try to get link with full advertisement */
2036 + hw->phy.smart_speed_active = false;
2037 + for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
2038 + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
2039 + autoneg_wait_to_complete);
2044 + * Wait for the controller to acquire link. Per IEEE 802.3ap,
2045 + * Section 73.10.2, we may have to wait up to 500ms if KR is
2046 + * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
2047 + * Table 9 in the AN MAS.
2049 + for (i = 0; i < 5; i++) {
2052 + /* If we have link, just jump out */
2053 + status = ixgbe_check_link(hw, &link_speed, &link_up,
2064 + * We didn't get link. If we advertised KR plus one of KX4/KX
2065 + * (or BX4/BX), then disable KR and try again.
2067 + if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
2068 + ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
2071 + /* Turn SmartSpeed on to disable KR support */
2072 + hw->phy.smart_speed_active = true;
2073 + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
2074 + autoneg_wait_to_complete);
2079 + * Wait for the controller to acquire link. 600ms will allow for
2080 + * the AN link_fail_inhibit_timer as well for multiple cycles of
2081 + * parallel detect, both 10g and 1g. This allows for the maximum
2082 + * connect attempts as defined in the AN MAS table 73-7.
2084 + for (i = 0; i < 6; i++) {
2087 + /* If we have link, just jump out */
2088 + status = ixgbe_check_link(hw, &link_speed, &link_up, false);
2096 + /* We didn't get link. Turn SmartSpeed back off. */
2097 + hw->phy.smart_speed_active = false;
2098 + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
2099 + autoneg_wait_to_complete);
2102 + if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
2103 + hw_dbg(hw, "Smartspeed has downgraded the link speed "
2104 + "from the maximum advertised\n");
2109 + * ixgbe_setup_mac_link_82599 - Set MAC link speed
2110 + * @hw: pointer to hardware structure
2111 + * @speed: new link speed
2112 + * @autoneg: true if autonegotiation enabled
2113 + * @autoneg_wait_to_complete: true when waiting for completion is needed
2115 + * Set the link speed in the AUTOC register and restarts link.
2117 +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
2118 + ixgbe_link_speed speed, bool autoneg,
2119 + bool autoneg_wait_to_complete)
2122 + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2123 + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2124 + u32 start_autoc = autoc;
2125 + u32 orig_autoc = 0;
2126 + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
2127 + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2128 + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2131 + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
2133 + /* Check to see if speed passed in is supported. */
2134 + status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
2138 + speed &= link_capabilities;
2140 + if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
2141 + status = IXGBE_ERR_LINK_SETUP;
2145 + /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
2146 + if (hw->mac.orig_link_settings_stored)
2147 + orig_autoc = hw->mac.orig_autoc;
2149 + orig_autoc = autoc;
2151 + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
2152 + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
2153 + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
2154 + /* Set KX4/KX/KR support according to speed requested */
2155 + autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
2156 + if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2157 + if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
2158 + autoc |= IXGBE_AUTOC_KX4_SUPP;
2159 + if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
2160 + (hw->phy.smart_speed_active == false))
2161 + autoc |= IXGBE_AUTOC_KR_SUPP;
2162 + if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2163 + autoc |= IXGBE_AUTOC_KX_SUPP;
2164 + } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
2165 + (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
2166 + link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
2167 + /* Switch from 1G SFI to 10G SFI if requested */
2168 + if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
2169 + (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
2170 + autoc &= ~IXGBE_AUTOC_LMS_MASK;
2171 + autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
2173 + } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
2174 + (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
2175 + /* Switch from 10G SFI to 1G SFI if requested */
2176 + if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
2177 + (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
2178 + autoc &= ~IXGBE_AUTOC_LMS_MASK;
2180 + autoc |= IXGBE_AUTOC_LMS_1G_AN;
2182 + autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
2186 + if (autoc != start_autoc) {
2188 + /* Restart link */
2189 + autoc |= IXGBE_AUTOC_AN_RESTART;
2190 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
2192 + /* Only poll for autoneg to complete if specified to do so */
2193 + if (autoneg_wait_to_complete) {
2194 + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
2195 + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
2196 + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
2197 + links_reg = 0; /*Just in case Autoneg time=0*/
2198 + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
2200 + IXGBE_READ_REG(hw, IXGBE_LINKS);
2201 + if (links_reg & IXGBE_LINKS_KX_AN_COMP)
2205 + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
2207 + IXGBE_ERR_AUTONEG_NOT_COMPLETE;
2208 + hw_dbg(hw, "Autoneg did not complete.\n");
2213 + /* Add delay to filter out noises during initial link setup */
2222 + * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
2223 + * @hw: pointer to hardware structure
2224 + * @speed: new link speed
2225 + * @autoneg: true if autonegotiation enabled
2226 + * @autoneg_wait_to_complete: true if waiting is needed to complete
2228 + * Restarts link on PHY and MAC based on settings passed in.
2230 +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
2231 + ixgbe_link_speed speed,
2233 + bool autoneg_wait_to_complete)
2237 + /* Setup the PHY according to input speed */
2238 + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
2239 + autoneg_wait_to_complete);
2241 + ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
2246 + * ixgbe_reset_hw_82599 - Perform hardware reset
2247 + * @hw: pointer to hardware structure
2249 + * Resets the hardware by resetting the transmit and receive units, masks
2250 + * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2253 +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
2261 + /* Call adapter stop to disable tx/rx and clear interrupts */
2262 + hw->mac.ops.stop_adapter(hw);
2264 + /* PHY ops must be identified and initialized prior to reset */
2266 + /* Identify PHY and related function pointers */
2267 + status = hw->phy.ops.init(hw);
2269 + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2270 + goto reset_hw_out;
2272 + /* Setup SFP module if there is one present. */
2273 + if (hw->phy.sfp_setup_needed) {
2274 + status = hw->mac.ops.setup_sfp(hw);
2275 + hw->phy.sfp_setup_needed = false;
2278 + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2279 + goto reset_hw_out;
2282 + if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
2283 + hw->phy.ops.reset(hw);
2286 + * Prevent the PCI-E bus from from hanging by disabling PCI-E master
2287 + * access and verify no pending requests before reset
2289 + ixgbe_disable_pcie_master(hw);
2293 + * Issue global reset to the MAC. This needs to be a SW reset.
2294 + * If link reset is used, it might reset the MAC when mng is using it
2296 + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2297 + IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
2298 + IXGBE_WRITE_FLUSH(hw);
2300 + /* Poll for reset bit to self-clear indicating reset is complete */
2301 + for (i = 0; i < 10; i++) {
2303 + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2304 + if (!(ctrl & IXGBE_CTRL_RST))
2307 + if (ctrl & IXGBE_CTRL_RST) {
2308 + status = IXGBE_ERR_RESET_FAILED;
2309 + hw_dbg(hw, "Reset polling failed to complete.\n");
2313 + * Double resets are required for recovery from certain error
2314 + * conditions. Between resets, it is necessary to stall to allow time
2315 + * for any pending HW events to complete. We use 1usec since that is
2316 + * what is needed for ixgbe_disable_pcie_master(). The second reset
2317 + * then clears out any effects of those events.
2319 + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2320 + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2322 + goto mac_reset_top;
2328 + * Store the original AUTOC/AUTOC2 values if they have not been
2329 + * stored off yet. Otherwise restore the stored original
2330 + * values since the reset operation sets back to defaults.
2332 + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2333 + autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2334 + if (hw->mac.orig_link_settings_stored == false) {
2335 + hw->mac.orig_autoc = autoc;
2336 + hw->mac.orig_autoc2 = autoc2;
2337 + hw->mac.orig_link_settings_stored = true;
2339 + if (autoc != hw->mac.orig_autoc)
2340 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
2341 + IXGBE_AUTOC_AN_RESTART));
2343 + if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
2344 + (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
2345 + autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
2346 + autoc2 |= (hw->mac.orig_autoc2 &
2347 + IXGBE_AUTOC2_UPPER_MASK);
2348 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
2352 + /* Store the permanent mac address */
2353 + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2356 + * Store MAC address from RAR0, clear receive address registers, and
2357 + * clear the multicast table. Also reset num_rar_entries to 128,
2358 + * since we modify this value when programming the SAN MAC address.
2360 + hw->mac.num_rar_entries = 128;
2361 + hw->mac.ops.init_rx_addrs(hw);
2363 + /* Store the permanent SAN mac address */
2364 + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
2366 + /* Add the SAN MAC address to the RAR only if it's a valid address */
2367 + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
2368 + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
2369 + hw->mac.san_addr, 0, IXGBE_RAH_AV);
2371 + /* Reserve the last RAR for the SAN MAC address */
2372 + hw->mac.num_rar_entries--;
2375 + /* Store the alternative WWNN/WWPN prefix */
2376 + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
2377 + &hw->mac.wwpn_prefix);
2384 + * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
2385 + * @hw: pointer to hardware structure
2387 +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
2390 + u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
2391 + fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
2394 + * Before starting reinitialization process,
2395 + * FDIRCMD.CMD must be zero.
2397 + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
2398 + if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
2399 + IXGBE_FDIRCMD_CMD_MASK))
2403 + if (i >= IXGBE_FDIRCMD_CMD_POLL) {
2404 + hw_dbg(hw, "Flow Director previous command isn't complete, "
2405 + "aborting table re-initialization. \n");
2406 + return IXGBE_ERR_FDIR_REINIT_FAILED;
2409 + IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
2410 + IXGBE_WRITE_FLUSH(hw);
2412 + * 82599 adapters flow director init flow cannot be restarted,
2413 + * Workaround 82599 silicon errata by performing the following steps
2414 + * before re-writing the FDIRCTRL control register with the same value.
2415 + * - write 1 to bit 8 of FDIRCMD register &
2416 + * - write 0 to bit 8 of FDIRCMD register
2418 + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
2419 + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
2420 + IXGBE_FDIRCMD_CLEARHT));
2421 + IXGBE_WRITE_FLUSH(hw);
2422 + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
2423 + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
2424 + ~IXGBE_FDIRCMD_CLEARHT));
2425 + IXGBE_WRITE_FLUSH(hw);
2427 + * Clear FDIR Hash register to clear any leftover hashes
2428 + * waiting to be programmed.
2430 + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
2431 + IXGBE_WRITE_FLUSH(hw);
2433 + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
2434 + IXGBE_WRITE_FLUSH(hw);
2436 + /* Poll init-done after we write FDIRCTRL register */
2437 + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
2438 + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
2439 + IXGBE_FDIRCTRL_INIT_DONE)
2443 + if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
2444 + hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
2445 + return IXGBE_ERR_FDIR_REINIT_FAILED;
2448 + /* Clear FDIR statistics registers (read to clear) */
2449 + IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
2450 + IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
2451 + IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
2452 + IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
2453 + IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
2459 + * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
2460 + * @hw: pointer to hardware structure
2461 + * @pballoc: which mode to allocate filters with
2463 +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
2470 + * Before enabling Flow Director, the Rx Packet Buffer size
2471 + * must be reduced. The new value is the current size minus
2472 + * flow director memory usage size.
2474 + pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
2475 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
2476 + (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
2479 + * The defaults in the HW for RX PB 1-7 are not zero and so should be
2480 + * intialized to zero for non DCB mode otherwise actual total RX PB
2481 + * would be bigger than programmed and filter space would run into
2482 + * the PB 0 region.
2484 + for (i = 1; i < 8; i++)
2485 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
2487 + /* Send interrupt when 64 filters are left */
2488 + fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
2490 + /* Set the maximum length per hash bucket to 0xA filters */
2491 + fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
2493 + switch (pballoc) {
2494 + case IXGBE_FDIR_PBALLOC_64K:
2495 + /* 8k - 1 signature filters */
2496 + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
2498 + case IXGBE_FDIR_PBALLOC_128K:
2499 + /* 16k - 1 signature filters */
2500 + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
2502 + case IXGBE_FDIR_PBALLOC_256K:
2503 + /* 32k - 1 signature filters */
2504 + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
2508 + return IXGBE_ERR_CONFIG;
2511 + /* Move the flexible bytes to use the ethertype - shift 6 words */
2512 + fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
2515 + /* Prime the keys for hashing */
2516 + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
2517 + IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
2518 + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
2519 + IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
2522 + * Poll init-done after we write the register. Estimated times:
2523 + * 10G: PBALLOC = 11b, timing is 60us
2524 + * 1G: PBALLOC = 11b, timing is 600us
2525 + * 100M: PBALLOC = 11b, timing is 6ms
2527 + * Multiple these timings by 4 if under full Rx load
2529 + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
2530 + * 1 msec per poll time. If we're at line rate and drop to 100M, then
2531 + * this might not finish in our poll time, but we can live with that
2534 + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
2535 + IXGBE_WRITE_FLUSH(hw);
2536 + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
2537 + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
2538 + IXGBE_FDIRCTRL_INIT_DONE)
2542 + if (i >= IXGBE_FDIR_INIT_DONE_POLL)
2543 + hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
2549 + * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
2550 + * @hw: pointer to hardware structure
2551 + * @pballoc: which mode to allocate filters with
2553 +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
2560 + * Before enabling Flow Director, the Rx Packet Buffer size
2561 + * must be reduced. The new value is the current size minus
2562 + * flow director memory usage size.
2565 + pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
2566 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
2567 + (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
2570 + * The defaults in the HW for RX PB 1-7 are not zero and so should be
2571 + * intialized to zero for non DCB mode otherwise actual total RX PB
2572 + * would be bigger than programmed and filter space would run into
2573 + * the PB 0 region.
2575 + for (i = 1; i < 8; i++)
2576 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
2578 + /* Send interrupt when 64 filters are left */
2579 + fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
2581 + /* Initialize the drop queue to Rx queue 127 */
2582 + fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
2584 + switch (pballoc) {
2585 + case IXGBE_FDIR_PBALLOC_64K:
2586 + /* 2k - 1 perfect filters */
2587 + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
2589 + case IXGBE_FDIR_PBALLOC_128K:
2590 + /* 4k - 1 perfect filters */
2591 + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
2593 + case IXGBE_FDIR_PBALLOC_256K:
2594 + /* 8k - 1 perfect filters */
2595 + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
2599 + return IXGBE_ERR_CONFIG;
2602 + /* Turn perfect match filtering on */
2603 + fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
2604 + fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
2606 + /* Move the flexible bytes to use the ethertype - shift 6 words */
2607 + fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
2609 + /* Prime the keys for hashing */
2610 + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
2611 + IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
2612 + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
2613 + IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
2616 + * Poll init-done after we write the register. Estimated times:
2617 + * 10G: PBALLOC = 11b, timing is 60us
2618 + * 1G: PBALLOC = 11b, timing is 600us
2619 + * 100M: PBALLOC = 11b, timing is 6ms
2621 + * Multiple these timings by 4 if under full Rx load
2623 + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
2624 + * 1 msec per poll time. If we're at line rate and drop to 100M, then
2625 + * this might not finish in our poll time, but we can live with that
2629 + /* Set the maximum length per hash bucket to 0xA filters */
2630 + fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
2632 + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
2633 + IXGBE_WRITE_FLUSH(hw);
2634 + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
2635 + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
2636 + IXGBE_FDIRCTRL_INIT_DONE)
2640 + if (i >= IXGBE_FDIR_INIT_DONE_POLL)
2641 + hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
2648 + * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
2649 + * @stream: input bitstream to compute the hash on
2650 + * @key: 32-bit hash key
2652 +u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
2655 + * The algorithm is as follows:
2656 + * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
2657 + * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
2658 + * and A[n] x B[n] is bitwise AND between same length strings
2660 + * K[n] is 16 bits, defined as:
2661 + * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
2662 + * for n modulo 32 < 15, K[n] =
2663 + * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
2665 + * S[n] is 16 bits, defined as:
2666 + * for n >= 15, S[n] = S[n:n - 15]
2667 + * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
2669 + * To simplify for programming, the algorithm is implemented
2670 + * in software this way:
2672 + * Key[31:0], Stream[335:0]
2674 + * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
2675 + * int_key[350:0] = tmp_key[351:1]
2676 + * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
2679 + * for (i = 0; i < 351; i++) {
2681 + * hash ^= int_stream[(i + 15):i];
2688 + u8 key_stream[44];
2691 + u8 *stream = (u8 *)atr_input;
2692 + u8 int_key[44]; /* upper-most bit unused */
2693 + u8 hash_str[46]; /* upper-most 2 bits unused */
2694 + u16 hash_result = 0;
2698 + * Initialize the fill member to prevent warnings
2699 + * on some compilers
2701 + tmp_key.fill[0] = 0;
2703 + /* First load the temporary key stream */
2704 + for (i = 0; i < 6; i++) {
2705 + u64 fillkey = ((u64)key << 32) | key;
2706 + tmp_key.fill[i] = fillkey;
2710 + * Set the interim key for the hashing. Bit 352 is unused, so we must
2711 + * shift and compensate when building the key.
2714 + int_key[0] = tmp_key.key_stream[0] >> 1;
2715 + for (i = 1, j = 0; i < 44; i++) {
2716 + unsigned int this_key = tmp_key.key_stream[j] << 7;
2718 + int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
2722 + * Set the interim bit string for the hashing. Bits 368 and 367 are
2723 + * unused, so shift and compensate when building the string.
2725 + hash_str[0] = (stream[40] & 0x7f) >> 1;
2726 + for (i = 1, j = 40; i < 46; i++) {
2727 + unsigned int this_str = stream[j] << 7;
2731 + hash_str[i] = (u8)(this_str | (stream[j] >> 1));
2735 + * Now compute the hash. i is the index into hash_str, j is into our
2736 + * key stream, k is counting the number of bits, and h interates within
2739 + for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
2740 + for (h = 0; h < 8 && k < 351; h++, k++) {
2741 + if (int_key[j] & (1 << h)) {
2743 + * Key bit is set, XOR in the current 16-bit
2744 + * string. Example of processing:
2746 + * tmp = (hash_str[i - 2] & 0 << 16) |
2747 + * (hash_str[i - 1] & 0xff << 8) |
2748 + * (hash_str[i] & 0xff >> 0)
2749 + * So tmp = hash_str[15 + k:k], since the
2750 + * i + 2 clause rolls off the 16-bit value
2752 + * tmp = (hash_str[i - 2] & 0x7f << 9) |
2753 + * (hash_str[i - 1] & 0xff << 1) |
2754 + * (hash_str[i] & 0x80 >> 7)
2756 + int tmp = (hash_str[i] >> h);
2757 + tmp |= (hash_str[i - 1] << (8 - h));
2758 + tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
2760 + hash_result ^= (u16)tmp;
2765 + return hash_result;
2769 + * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
2770 + * @input: input stream to modify
2771 + * @vlan: the VLAN id to load
2773 +s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
2775 + input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
2776 + input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
2782 + * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
2783 + * @input: input stream to modify
2784 + * @src_addr: the IP address to load
2786 +s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
2788 + input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
2789 + input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
2790 + (src_addr >> 16) & 0xff;
2791 + input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
2792 + (src_addr >> 8) & 0xff;
2793 + input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
2799 + * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
2800 + * @input: input stream to modify
2801 + * @dst_addr: the IP address to load
2803 +s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
2805 + input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
2806 + input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
2807 + (dst_addr >> 16) & 0xff;
2808 + input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
2809 + (dst_addr >> 8) & 0xff;
2810 + input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
2816 + * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
2817 + * @input: input stream to modify
2818 + * @src_addr_1: the first 4 bytes of the IP address to load
2819 + * @src_addr_2: the second 4 bytes of the IP address to load
2820 + * @src_addr_3: the third 4 bytes of the IP address to load
2821 + * @src_addr_4: the fourth 4 bytes of the IP address to load
2823 +s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
2824 + u32 src_addr_1, u32 src_addr_2,
2825 + u32 src_addr_3, u32 src_addr_4)
2827 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
2828 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
2829 + (src_addr_4 >> 8) & 0xff;
2830 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
2831 + (src_addr_4 >> 16) & 0xff;
2832 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
2834 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
2835 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
2836 + (src_addr_3 >> 8) & 0xff;
2837 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
2838 + (src_addr_3 >> 16) & 0xff;
2839 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
2841 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
2842 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
2843 + (src_addr_2 >> 8) & 0xff;
2844 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
2845 + (src_addr_2 >> 16) & 0xff;
2846 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
2848 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
2849 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
2850 + (src_addr_1 >> 8) & 0xff;
2851 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
2852 + (src_addr_1 >> 16) & 0xff;
2853 + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
2859 + * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
2860 + * @input: input stream to modify
2861 + * @dst_addr_1: the first 4 bytes of the IP address to load
2862 + * @dst_addr_2: the second 4 bytes of the IP address to load
2863 + * @dst_addr_3: the third 4 bytes of the IP address to load
2864 + * @dst_addr_4: the fourth 4 bytes of the IP address to load
2866 +s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
2867 + u32 dst_addr_1, u32 dst_addr_2,
2868 + u32 dst_addr_3, u32 dst_addr_4)
2870 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
2871 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
2872 + (dst_addr_4 >> 8) & 0xff;
2873 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
2874 + (dst_addr_4 >> 16) & 0xff;
2875 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
2877 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
2878 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
2879 + (dst_addr_3 >> 8) & 0xff;
2880 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
2881 + (dst_addr_3 >> 16) & 0xff;
2882 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
2884 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
2885 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
2886 + (dst_addr_2 >> 8) & 0xff;
2887 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
2888 + (dst_addr_2 >> 16) & 0xff;
2889 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
2891 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
2892 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
2893 + (dst_addr_1 >> 8) & 0xff;
2894 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
2895 + (dst_addr_1 >> 16) & 0xff;
2896 + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
2902 + * ixgbe_atr_set_src_port_82599 - Sets the source port
2903 + * @input: input stream to modify
2904 + * @src_port: the source port to load
2906 +s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
2908 + input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
2909 + input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
2915 + * ixgbe_atr_set_dst_port_82599 - Sets the destination port
2916 + * @input: input stream to modify
2917 + * @dst_port: the destination port to load
2919 +s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
2921 + input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
2922 + input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
2928 + * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
2929 + * @input: input stream to modify
2930 + * @flex_bytes: the flexible bytes to load
2932 +s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
2934 + input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
2935 + input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
2941 + * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
2942 + * @input: input stream to modify
2943 + * @vm_pool: the Virtual Machine pool to load
2945 +s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
2947 + input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
2953 + * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
2954 + * @input: input stream to modify
2955 + * @l4type: the layer 4 type value to load
2957 +s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
2959 + input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
2965 + * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
2966 + * @input: input stream to search
2967 + * @vlan: the VLAN id to load
2969 +s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
2971 + *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
2972 + *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
2978 + * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
2979 + * @input: input stream to search
2980 + * @src_addr: the IP address to load
2982 +s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
2984 + *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
2985 + *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
2986 + *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
2987 + *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
2993 + * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
2994 + * @input: input stream to search
2995 + * @dst_addr: the IP address to load
2997 +s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
2999 + *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
3000 + *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
3001 + *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
3002 + *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
3008 + * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
3009 + * @input: input stream to search
3010 + * @src_addr_1: the first 4 bytes of the IP address to load
3011 + * @src_addr_2: the second 4 bytes of the IP address to load
3012 + * @src_addr_3: the third 4 bytes of the IP address to load
3013 + * @src_addr_4: the fourth 4 bytes of the IP address to load
3015 +s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
3016 + u32 *src_addr_1, u32 *src_addr_2,
3017 + u32 *src_addr_3, u32 *src_addr_4)
3019 + *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
3020 + *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
3021 + *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
3022 + *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
3024 + *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
3025 + *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
3026 + *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
3027 + *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
3029 + *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
3030 + *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
3031 + *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
3032 + *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
3034 + *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
3035 + *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
3036 + *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
3037 + *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
3043 + * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
3044 + * @input: input stream to search
3045 + * @dst_addr_1: the first 4 bytes of the IP address to load
3046 + * @dst_addr_2: the second 4 bytes of the IP address to load
3047 + * @dst_addr_3: the third 4 bytes of the IP address to load
3048 + * @dst_addr_4: the fourth 4 bytes of the IP address to load
3050 +s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
3051 + u32 *dst_addr_1, u32 *dst_addr_2,
3052 + u32 *dst_addr_3, u32 *dst_addr_4)
3054 + *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
3055 + *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
3056 + *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
3057 + *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
3059 + *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
3060 + *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
3061 + *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
3062 + *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
3064 + *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
3065 + *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
3066 + *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
3067 + *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
3069 + *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
3070 + *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
3071 + *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
3072 + *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
3078 + * ixgbe_atr_get_src_port_82599 - Gets the source port
3079 + * @input: input stream to modify
3080 + * @src_port: the source port to load
3082 + * Even though the input is given in big-endian, the FDIRPORT registers
3083 + * expect the ports to be programmed in little-endian. Hence the need to swap
3084 + * endianness when retrieving the data. This can be confusing since the
3085 + * internal hash engine expects it to be big-endian.
3087 +s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
3089 + *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
3090 + *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
3096 + * ixgbe_atr_get_dst_port_82599 - Gets the destination port
3097 + * @input: input stream to modify
3098 + * @dst_port: the destination port to load
3100 + * Even though the input is given in big-endian, the FDIRPORT registers
3101 + * expect the ports to be programmed in little-endian. Hence the need to swap
3102 + * endianness when retrieving the data. This can be confusing since the
3103 + * internal hash engine expects it to be big-endian.
3105 +s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
3107 + *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
3108 + *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
3114 + * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
3115 + * @input: input stream to modify
3116 + * @flex_bytes: the flexible bytes to load
3118 +s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte)
3120 + *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
3121 + *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
3127 + * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
3128 + * @input: input stream to modify
3129 + * @vm_pool: the Virtual Machine pool to load
3131 +s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
3133 + *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
3139 + * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
3140 + * @input: input stream to modify
3141 + * @l4type: the layer 4 type value to load
3143 +s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
3145 + *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
3151 + * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
3152 + * @hw: pointer to hardware structure
3153 + * @stream: input bitstream
3154 + * @queue: queue index to direct traffic to
3156 +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
3157 + struct ixgbe_atr_input *input,
3163 + u16 bucket_hash, sig_hash;
3166 + bucket_hash = ixgbe_atr_compute_hash_82599(input,
3167 + IXGBE_ATR_BUCKET_HASH_KEY);
3169 + /* bucket_hash is only 15 bits */
3170 + bucket_hash &= IXGBE_ATR_HASH_MASK;
3172 + sig_hash = ixgbe_atr_compute_hash_82599(input,
3173 + IXGBE_ATR_SIGNATURE_HASH_KEY);
3175 + /* Get the l4type in order to program FDIRCMD properly */
3176 + /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
3177 + ixgbe_atr_get_l4type_82599(input, &l4type);
3180 + * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
3181 + * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
3183 + fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
3185 + fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
3186 + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
3188 + switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
3189 + case IXGBE_ATR_L4TYPE_TCP:
3190 + fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
3192 + case IXGBE_ATR_L4TYPE_UDP:
3193 + fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
3195 + case IXGBE_ATR_L4TYPE_SCTP:
3196 + fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
3199 + hw_dbg(hw, " Error on l4type input\n");
3200 + return IXGBE_ERR_CONFIG;
3203 + if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
3204 + fdircmd |= IXGBE_FDIRCMD_IPV6;
3206 + fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
3207 + fdirhashcmd = ((fdircmd << 32) | fdirhash);
3209 + hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF);
3210 + IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
3216 + * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
3217 + * @hw: pointer to hardware structure
3218 + * @input: input bitstream
3219 + * @input_masks: masks for the input bitstream
3220 + * @soft_id: software index for the filters
3221 + * @queue: queue index to direct traffic to
3223 + * Note that the caller to this function must lock before calling, since the
3224 + * hardware writes must be protected from one another.
3226 +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
3227 + struct ixgbe_atr_input *input,
3228 + struct ixgbe_atr_input_masks *input_masks,
3229 + u16 soft_id, u8 queue)
3233 + u32 src_ipv4 = 0, dst_ipv4 = 0;
3234 + u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
3235 + u16 src_port, dst_port, vlan_id, flex_bytes;
3240 + /* Get our input values */
3241 + ixgbe_atr_get_l4type_82599(input, &l4type);
3244 + * Check l4type formatting, and bail out before we touch the hardware
3245 + * if there's a configuration issue
3247 + switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
3248 + case IXGBE_ATR_L4TYPE_TCP:
3249 + fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
3251 + case IXGBE_ATR_L4TYPE_UDP:
3252 + fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
3254 + case IXGBE_ATR_L4TYPE_SCTP:
3255 + fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
3258 + hw_dbg(hw, " Error on l4type input\n");
3259 + return IXGBE_ERR_CONFIG;
3262 + bucket_hash = ixgbe_atr_compute_hash_82599(input,
3263 + IXGBE_ATR_BUCKET_HASH_KEY);
3265 + /* bucket_hash is only 15 bits */
3266 + bucket_hash &= IXGBE_ATR_HASH_MASK;
3268 + ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
3269 + ixgbe_atr_get_src_port_82599(input, &src_port);
3270 + ixgbe_atr_get_dst_port_82599(input, &dst_port);
3271 + ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
3273 + fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
3275 + /* Now figure out if we're IPv4 or IPv6 */
3276 + if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
3278 + ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
3279 + &src_ipv6_3, &src_ipv6_4);
3281 + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
3282 + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
3283 + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
3284 + /* The last 4 bytes is the same register as IPv4 */
3285 + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
3287 + fdircmd |= IXGBE_FDIRCMD_IPV6;
3288 + fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
3291 + ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
3292 + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
3295 + ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
3296 + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
3298 + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
3299 + (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
3300 + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
3301 + (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
3304 + * Program the relevant mask registers. If src/dst_port or src/dst_addr
3305 + * are zero, then assume a full mask for that field. Also assume that
3306 + * a VLAN of 0 is unspecified, so mask that out as well. L4type
3307 + * cannot be masked out in this implementation.
3309 + * This also assumes IPv4 only. IPv6 masking isn't supported at this
3312 + if (src_ipv4 == 0)
3313 + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
3315 + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
3317 + if (dst_ipv4 == 0)
3318 + IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
3320 + IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
3322 + switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
3323 + case IXGBE_ATR_L4TYPE_TCP:
3324 + if (src_port == 0)
3325 + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
3327 + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
3328 + input_masks->src_port_mask);
3330 + if (dst_port == 0)
3331 + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
3332 + (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
3335 + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
3336 + (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
3337 + (input_masks->dst_port_mask << 16)));
3339 + case IXGBE_ATR_L4TYPE_UDP:
3340 + if (src_port == 0)
3341 + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
3343 + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
3344 + input_masks->src_port_mask);
3346 + if (dst_port == 0)
3347 + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
3348 + (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
3351 + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
3352 + (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
3353 + (input_masks->src_port_mask << 16)));
3356 + /* this already would have failed above */
3360 + /* Program the last mask register, FDIRM */
3361 + if (input_masks->vlan_id_mask || !vlan_id)
3362 + /* Mask both VLAN and VLANP - bits 0 and 1 */
3363 + fdirm |= (IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP);
3365 + if (input_masks->data_mask || !flex_bytes)
3366 + /* Flex bytes need masking, so mask the whole thing - bit 4 */
3367 + fdirm |= IXGBE_FDIRM_FLEX;
3369 + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
3370 + fdirm |= (IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6);
3372 + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
3374 + fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
3375 + fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
3376 + fdircmd |= IXGBE_FDIRCMD_LAST;
3377 + fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
3378 + fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
3380 + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
3381 + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
3387 + * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
3388 + * @hw: pointer to hardware structure
3389 + * @reg: analog register to read
3390 + * @val: read value
3392 + * Performs read operation to Omer analog register specified.
3394 +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
3398 + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
3400 + IXGBE_WRITE_FLUSH(hw);
3402 + core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
3403 + *val = (u8)core_ctl;
3409 + * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
3410 + * @hw: pointer to hardware structure
3411 + * @reg: atlas register to write
3412 + * @val: value to write
3414 + * Performs write operation to Omer analog register specified.
3416 +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
3420 + core_ctl = (reg << 8) | val;
3421 + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
3422 + IXGBE_WRITE_FLUSH(hw);
3429 + * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
3430 + * @hw: pointer to hardware structure
3432 + * Starts the hardware using the generic start_hw function.
3433 + * Then performs revision-specific operations:
3434 + * Clears the rate limiter registers.
3436 +s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
3442 + ret_val = ixgbe_start_hw_generic(hw);
3444 + /* Clear the rate limiters */
3445 + for (i = 0; i < hw->mac.max_tx_queues; i++) {
3446 + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
3447 + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
3449 + IXGBE_WRITE_FLUSH(hw);
3451 + /* Disable relaxed ordering */
3452 + for (i = 0; i < hw->mac.max_tx_queues; i++) {
3453 + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3454 + regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
3455 + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3458 + for (i = 0; i < hw->mac.max_rx_queues; i++) {
3459 + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3460 + regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
3461 + IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
3462 + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3465 + /* We need to run link autotry after the driver loads */
3466 + hw->mac.autotry_restart = true;
3469 + ret_val = ixgbe_verify_fw_version_82599(hw);
3474 + * ixgbe_identify_phy_82599 - Get physical layer module
3475 + * @hw: pointer to hardware structure
3477 + * Determines the physical layer module found on the current adapter.
3478 + * If PHY already detected, maintains current PHY type in hw struct,
3479 + * otherwise executes the PHY detection routine.
3481 +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
3483 + s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
3485 + /* Detect PHY if not unknown - returns success if already detected. */
3486 + status = ixgbe_identify_phy_generic(hw);
3488 + status = ixgbe_identify_sfp_module_generic(hw);
3489 + /* Set PHY type none if no PHY detected */
3490 + if (hw->phy.type == ixgbe_phy_unknown) {
3491 + hw->phy.type = ixgbe_phy_none;
3495 + /* Return error if SFP module has been detected but is not supported */
3496 + if (hw->phy.type == ixgbe_phy_sfp_unsupported)
3497 + status = IXGBE_ERR_SFP_NOT_SUPPORTED;
3503 + * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
3504 + * @hw: pointer to hardware structure
3506 + * Determines physical layer capabilities of the current configuration.
3508 +u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
3510 + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3511 + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3512 + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
3513 + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
3514 + u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
3515 + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
3516 + u16 ext_ability = 0;
3517 + u8 comp_codes_10g = 0;
3518 + u8 comp_codes_1g = 0;
3520 + hw->phy.ops.identify(hw);
3522 + if (hw->phy.type == ixgbe_phy_tn ||
3523 + hw->phy.type == ixgbe_phy_aq ||
3524 + hw->phy.type == ixgbe_phy_cu_unknown) {
3525 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3526 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
3527 + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3528 + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3529 + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3530 + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3531 + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
3532 + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3536 + switch (autoc & IXGBE_AUTOC_LMS_MASK) {
3537 + case IXGBE_AUTOC_LMS_1G_AN:
3538 + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
3539 + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
3540 + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
3541 + IXGBE_PHYSICAL_LAYER_1000BASE_BX;
3544 + /* SFI mode so read SFP module */
3547 + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
3548 + if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
3549 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
3550 + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
3551 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
3552 + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
3553 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
3556 + case IXGBE_AUTOC_LMS_10G_SERIAL:
3557 + if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
3558 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
3560 + } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
3563 + case IXGBE_AUTOC_LMS_KX4_KX_KR:
3564 + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
3565 + if (autoc & IXGBE_AUTOC_KX_SUPP)
3566 + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3567 + if (autoc & IXGBE_AUTOC_KX4_SUPP)
3568 + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
3569 + if (autoc & IXGBE_AUTOC_KR_SUPP)
3570 + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
3579 + /* SFP check must be done last since DA modules are sometimes used to
3580 + * test KR mode - we need to id KR mode correctly before SFP module.
3581 + * Call identify_sfp because the pluggable module may have changed */
3582 + hw->phy.ops.identify_sfp(hw);
3583 + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3586 + switch (hw->phy.type) {
3587 + case ixgbe_phy_sfp_passive_tyco:
3588 + case ixgbe_phy_sfp_passive_unknown:
3589 + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
3591 + case ixgbe_phy_sfp_ftl_active:
3592 + case ixgbe_phy_sfp_active_unknown:
3593 + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
3595 + case ixgbe_phy_sfp_avago:
3596 + case ixgbe_phy_sfp_ftl:
3597 + case ixgbe_phy_sfp_intel:
3598 + case ixgbe_phy_sfp_unknown:
3599 + hw->phy.ops.read_i2c_eeprom(hw,
3600 + IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
3601 + hw->phy.ops.read_i2c_eeprom(hw,
3602 + IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
3603 + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
3604 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
3605 + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
3606 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
3607 + else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
3608 + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3615 + return physical_layer;
3619 + * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
3620 + * @hw: pointer to hardware structure
3621 + * @regval: register value to write to RXCTRL
3623 + * Enables the Rx DMA unit for 82599
3625 +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
3627 +#define IXGBE_MAX_SECRX_POLL 30
3632 + * Workaround for 82599 silicon errata when enabling the Rx datapath.
3633 + * If traffic is incoming before we enable the Rx unit, it could hang
3634 + * the Rx DMA unit. Therefore, make sure the security engine is
3635 + * completely disabled prior to enabling the Rx unit.
3637 + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3638 + secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3639 + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3640 + for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3641 + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3642 + if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3645 + /* Use interrupt-safe sleep just in case */
3649 + /* For informational purposes only */
3650 + if (i >= IXGBE_MAX_SECRX_POLL)
3651 + hw_dbg(hw, "Rx unit being enabled before security "
3652 + "path fully disabled. Continuing with init.\n");
3654 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
3655 + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3656 + secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3657 + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3658 + IXGBE_WRITE_FLUSH(hw);
3664 + * ixgbe_get_device_caps_82599 - Get additional device capabilities
3665 + * @hw: pointer to hardware structure
3666 + * @device_caps: the EEPROM word with the extra device capabilities
3668 + * This function will read the EEPROM location for the device capabilities,
3669 + * and return the word through device_caps.
3671 +s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
3673 + hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3679 + * ixgbe_verify_fw_version_82599 - verify fw version for 82599
3680 + * @hw: pointer to hardware structure
3682 + * Verifies that installed the firmware version is 0.6 or higher
3683 + * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
3685 + * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
3686 + * if the FW version is not supported.
3688 +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
3690 + s32 status = IXGBE_ERR_EEPROM_VERSION;
3691 + u16 fw_offset, fw_ptp_cfg_offset;
3692 + u16 fw_version = 0;
3694 + /* firmware check is only necessary for SFI devices */
3695 + if (hw->phy.media_type != ixgbe_media_type_fiber) {
3697 + goto fw_version_out;
3700 + /* get the offset to the Firmware Module block */
3701 + hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
3703 + if ((fw_offset == 0) || (fw_offset == 0xFFFF))
3704 + goto fw_version_out;
3706 + /* get the offset to the Pass Through Patch Configuration block */
3707 + hw->eeprom.ops.read(hw, (fw_offset +
3708 + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
3709 + &fw_ptp_cfg_offset);
3711 + if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
3712 + goto fw_version_out;
3714 + /* get the firmware version */
3715 + hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
3716 + IXGBE_FW_PATCH_VERSION_4),
3719 + if (fw_version > 0x5)
3726 + * ixgbe_enable_relaxed_ordering_82599 - Enable relaxed ordering
3727 + * @hw: pointer to hardware structure
3730 +void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw)
3735 + /* Enable relaxed ordering */
3736 + for (i = 0; i < hw->mac.max_tx_queues; i++) {
3737 + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3738 + regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
3739 + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3742 + for (i = 0; i < hw->mac.max_rx_queues; i++) {
3743 + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3744 + regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
3745 + IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
3746 + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3750 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_api.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_api.c
3751 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_api.c 1969-12-31 19:00:00.000000000 -0500
3752 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_api.c 2010-08-25 17:56:26.000000000 -0400
3754 +/*******************************************************************************
3756 + Intel 10 Gigabit PCI Express Linux driver
3757 + Copyright(c) 1999 - 2010 Intel Corporation.
3759 + This program is free software; you can redistribute it and/or modify it
3760 + under the terms and conditions of the GNU General Public License,
3761 + version 2, as published by the Free Software Foundation.
3763 + This program is distributed in the hope it will be useful, but WITHOUT
3764 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3765 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3768 + You should have received a copy of the GNU General Public License along with
3769 + this program; if not, write to the Free Software Foundation, Inc.,
3770 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3772 + The full GNU General Public License is included in this distribution in
3773 + the file called "COPYING".
3775 + Contact Information:
3776 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
3777 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3779 +*******************************************************************************/
3781 +#include "ixgbe_api.h"
3782 +#include "ixgbe_common.h"
3784 +extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
3785 +extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
3788 + * ixgbe_init_shared_code - Initialize the shared code
3789 + * @hw: pointer to hardware structure
3791 + * This will assign function pointers and assign the MAC type and PHY code.
3792 + * Does not touch the hardware. This function must be called prior to any
3793 + * other function in the shared code. The ixgbe_hw structure should be
3794 + * memset to 0 prior to calling this function. The following fields in
3795 + * hw structure should be filled in prior to calling this function:
3796 + * hw_addr, back, device_id, vendor_id, subsystem_device_id,
3797 + * subsystem_vendor_id, and revision_id
3799 +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
3804 + * Set the mac type
3806 + ixgbe_set_mac_type(hw);
3808 + switch (hw->mac.type) {
3809 + case ixgbe_mac_82598EB:
3810 + status = ixgbe_init_ops_82598(hw);
3812 + case ixgbe_mac_82599EB:
3813 + status = ixgbe_init_ops_82599(hw);
3816 + status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
3824 + * ixgbe_set_mac_type - Sets MAC type
3825 + * @hw: pointer to the HW structure
3827 + * This function sets the mac type of the adapter based on the
3828 + * vendor ID and device ID stored in the hw structure.
3830 +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
3834 + if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
3835 + switch (hw->device_id) {
3836 + case IXGBE_DEV_ID_82598:
3837 + case IXGBE_DEV_ID_82598_BX:
3838 + case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
3839 + case IXGBE_DEV_ID_82598AF_DUAL_PORT:
3840 + case IXGBE_DEV_ID_82598AT:
3841 + case IXGBE_DEV_ID_82598AT2:
3842 + case IXGBE_DEV_ID_82598EB_CX4:
3843 + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
3844 + case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
3845 + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
3846 + case IXGBE_DEV_ID_82598EB_XF_LR:
3847 + case IXGBE_DEV_ID_82598EB_SFP_LOM:
3848 + hw->mac.type = ixgbe_mac_82598EB;
3850 + case IXGBE_DEV_ID_82599_KX4:
3851 + case IXGBE_DEV_ID_82599_KX4_MEZZ:
3852 + case IXGBE_DEV_ID_82599_XAUI_LOM:
3853 + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
3854 + case IXGBE_DEV_ID_82599_KR:
3855 + case IXGBE_DEV_ID_82599_SFP:
3856 + case IXGBE_DEV_ID_82599_SFP_EM:
3857 + case IXGBE_DEV_ID_82599_CX4:
3858 + case IXGBE_DEV_ID_82599_T3_LOM:
3859 + hw->mac.type = ixgbe_mac_82599EB;
3862 + ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
3866 + ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
3869 + hw_dbg(hw, "ixgbe_set_mac_type found mac: %d, returns: %d\n",
3870 + hw->mac.type, ret_val);
3875 + * ixgbe_init_hw - Initialize the hardware
3876 + * @hw: pointer to hardware structure
3878 + * Initialize the hardware by resetting and then starting the hardware
3880 +s32 ixgbe_init_hw(struct ixgbe_hw *hw)
3882 + return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
3883 + IXGBE_NOT_IMPLEMENTED);
3887 + * ixgbe_reset_hw - Performs a hardware reset
3888 + * @hw: pointer to hardware structure
3890 + * Resets the hardware by resetting the transmit and receive units, masks and
3891 + * clears all interrupts, performs a PHY reset, and performs a MAC reset
3893 +s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
3895 + return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
3896 + IXGBE_NOT_IMPLEMENTED);
3900 + * ixgbe_start_hw - Prepares hardware for Rx/Tx
3901 + * @hw: pointer to hardware structure
3903 + * Starts the hardware by filling the bus info structure and media type,
3904 + * clears all on chip counters, initializes receive address registers,
3905 + * multicast table, VLAN filter table, calls routine to setup link and
3906 + * flow control settings, and leaves transmit and receive units disabled
3907 + * and uninitialized.
3909 +s32 ixgbe_start_hw(struct ixgbe_hw *hw)
3911 + return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
3912 + IXGBE_NOT_IMPLEMENTED);
3916 + * ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering,
3917 + * which is disabled by default in ixgbe_start_hw();
3919 + * @hw: pointer to hardware structure
3921 + * Enable relaxed ordering;
3923 +void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw)
3925 + if (hw->mac.ops.enable_relaxed_ordering)
3926 + hw->mac.ops.enable_relaxed_ordering(hw);
3930 + * ixgbe_clear_hw_cntrs - Clear hardware counters
3931 + * @hw: pointer to hardware structure
3933 + * Clears all hardware statistics counters by reading them from the hardware
3934 + * Statistics counters are clear on read.
3936 +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
3938 + return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
3939 + IXGBE_NOT_IMPLEMENTED);
3943 + * ixgbe_get_media_type - Get media type
3944 + * @hw: pointer to hardware structure
3946 + * Returns the media type (fiber, copper, backplane)
3948 +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
3950 + return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
3951 + ixgbe_media_type_unknown);
3955 + * ixgbe_get_mac_addr - Get MAC address
3956 + * @hw: pointer to hardware structure
3957 + * @mac_addr: Adapter MAC address
3959 + * Reads the adapter's MAC address from the first Receive Address Register
3960 + * (RAR0) A reset of the adapter must have been performed prior to calling
3961 + * this function in order for the MAC address to have been loaded from the
3962 + * EEPROM into RAR0
3964 +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
3966 + return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
3967 + (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
3971 + * ixgbe_get_san_mac_addr - Get SAN MAC address
3972 + * @hw: pointer to hardware structure
3973 + * @san_mac_addr: SAN MAC address
3975 + * Reads the SAN MAC address from the EEPROM, if it's available. This is
3976 + * per-port, so set_lan_id() must be called before reading the addresses.
3978 +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
3980 + return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
3981 + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
3985 + * ixgbe_set_san_mac_addr - Write a SAN MAC address
3986 + * @hw: pointer to hardware structure
3987 + * @san_mac_addr: SAN MAC address
3989 + * Writes A SAN MAC address to the EEPROM.
3991 +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
3993 + return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
3994 + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
3998 + * ixgbe_get_device_caps - Get additional device capabilities
3999 + * @hw: pointer to hardware structure
4000 + * @device_caps: the EEPROM word for device capabilities
4002 + * Reads the extra device capabilities from the EEPROM
4004 +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
4006 + return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
4007 + (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
4011 + * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM
4012 + * @hw: pointer to hardware structure
4013 + * @wwnn_prefix: the alternative WWNN prefix
4014 + * @wwpn_prefix: the alternative WWPN prefix
4016 + * This function will read the EEPROM from the alternative SAN MAC address
4017 + * block to check the support for the alternative WWNN/WWPN prefix support.
4019 +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4022 + return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix,
4023 + (hw, wwnn_prefix, wwpn_prefix),
4024 + IXGBE_NOT_IMPLEMENTED);
4028 + * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM
4029 + * @hw: pointer to hardware structure
4030 + * @bs: the fcoe boot status
4032 + * This function will read the FCOE boot status from the iSCSI FCOE block
4034 +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
4036 + return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status,
4038 + IXGBE_NOT_IMPLEMENTED);
4042 + * ixgbe_get_bus_info - Set PCI bus info
4043 + * @hw: pointer to hardware structure
4045 + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
4047 +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
4049 + return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
4050 + IXGBE_NOT_IMPLEMENTED);
4054 + * ixgbe_get_num_of_tx_queues - Get Tx queues
4055 + * @hw: pointer to hardware structure
4057 + * Returns the number of transmit queues for the given adapter.
4059 +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
4061 + return hw->mac.max_tx_queues;
4065 + * ixgbe_get_num_of_rx_queues - Get Rx queues
4066 + * @hw: pointer to hardware structure
4068 + * Returns the number of receive queues for the given adapter.
4070 +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
4072 + return hw->mac.max_rx_queues;
4076 + * ixgbe_stop_adapter - Disable Rx/Tx units
4077 + * @hw: pointer to hardware structure
4079 + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
4080 + * disables transmit and receive units. The adapter_stopped flag is used by
4081 + * the shared code and drivers to determine if the adapter is in a stopped
4082 + * state and should not touch the hardware.
4084 +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
4086 + return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
4087 + IXGBE_NOT_IMPLEMENTED);
4091 + * ixgbe_read_pba_string - Reads part number string from EEPROM
4092 + * @hw: pointer to hardware structure
4093 + * @pba_num: stores the part number string from the EEPROM
4094 + * @pba_num_size: part number string buffer length
4096 + * Reads the part number string from the EEPROM.
4097 + * Returns expected buffer size in pba_num_size if passed in buffer was too
4100 +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 *pba_num_size)
4102 + return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size);
4106 + * ixgbe_read_pba_num - Reads part number from EEPROM
4107 + * @hw: pointer to hardware structure
4108 + * @pba_num: stores the part number from the EEPROM
4110 + * Reads the part number from the EEPROM.
4112 +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num)
4114 + return ixgbe_read_pba_num_generic(hw, pba_num);
4118 + * ixgbe_identify_phy - Get PHY type
4119 + * @hw: pointer to hardware structure
4121 + * Determines the physical layer module found on the current adapter.
4123 +s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
4127 + if (hw->phy.type == ixgbe_phy_unknown) {
4128 + status = ixgbe_call_func(hw,
4129 + hw->phy.ops.identify,
4131 + IXGBE_NOT_IMPLEMENTED);
4138 + * ixgbe_reset_phy - Perform a PHY reset
4139 + * @hw: pointer to hardware structure
4141 +s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
4145 + if (hw->phy.type == ixgbe_phy_unknown) {
4146 + if (ixgbe_identify_phy(hw) != 0)
4147 + status = IXGBE_ERR_PHY;
4150 + if (status == 0) {
4151 + status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
4152 + IXGBE_NOT_IMPLEMENTED);
4158 + * ixgbe_get_phy_firmware_version -
4159 + * @hw: pointer to hardware structure
4160 + * @firmware_version: pointer to firmware version
4162 +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
4166 + status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
4167 + (hw, firmware_version),
4168 + IXGBE_NOT_IMPLEMENTED);
4173 + * ixgbe_read_phy_reg - Read PHY register
4174 + * @hw: pointer to hardware structure
4175 + * @reg_addr: 32 bit address of PHY register to read
4176 + * @phy_data: Pointer to read data from PHY register
4178 + * Reads a value from a specified PHY register
4180 +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
4183 + if (hw->phy.id == 0)
4184 + ixgbe_identify_phy(hw);
4186 + return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
4187 + device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
4191 + * ixgbe_write_phy_reg - Write PHY register
4192 + * @hw: pointer to hardware structure
4193 + * @reg_addr: 32 bit PHY register to write
4194 + * @phy_data: Data to write to the PHY register
4196 + * Writes a value to specified PHY register
4198 +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
4201 + if (hw->phy.id == 0)
4202 + ixgbe_identify_phy(hw);
4204 + return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
4205 + device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
4209 + * ixgbe_setup_phy_link - Restart PHY autoneg
4210 + * @hw: pointer to hardware structure
4212 + * Restart autonegotiation and PHY and waits for completion.
4214 +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
4216 + return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
4217 + IXGBE_NOT_IMPLEMENTED);
4221 + * ixgbe_check_phy_link - Determine link and speed status
4222 + * @hw: pointer to hardware structure
4224 + * Reads a PHY register to determine if link is up and the current speed for
4227 +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4230 + return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
4231 + link_up), IXGBE_NOT_IMPLEMENTED);
4235 + * ixgbe_setup_phy_link_speed - Set auto advertise
4236 + * @hw: pointer to hardware structure
4237 + * @speed: new link speed
4238 + * @autoneg: true if autonegotiation enabled
4240 + * Sets the auto advertised capabilities
4242 +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
4244 + bool autoneg_wait_to_complete)
4246 + return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
4247 + autoneg, autoneg_wait_to_complete),
4248 + IXGBE_NOT_IMPLEMENTED);
4252 + * ixgbe_check_link - Get link and speed status
4253 + * @hw: pointer to hardware structure
4255 + * Reads the links register to determine if link is up and the current speed
4257 +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4258 + bool *link_up, bool link_up_wait_to_complete)
4260 + return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
4261 + link_up, link_up_wait_to_complete),
4262 + IXGBE_NOT_IMPLEMENTED);
4266 + * ixgbe_disable_tx_laser - Disable Tx laser
4267 + * @hw: pointer to hardware structure
4269 + * If the driver needs to disable the laser on SFI optics.
4271 +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw)
4273 + if (hw->mac.ops.disable_tx_laser)
4274 + hw->mac.ops.disable_tx_laser(hw);
4278 + * ixgbe_enable_tx_laser - Enable Tx laser
4279 + * @hw: pointer to hardware structure
4281 + * If the driver needs to enable the laser on SFI optics.
4283 +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw)
4285 + if (hw->mac.ops.enable_tx_laser)
4286 + hw->mac.ops.enable_tx_laser(hw);
4290 + * ixgbe_flap_tx_laser - flap Tx laser to start autotry process
4291 + * @hw: pointer to hardware structure
4293 + * When the driver changes the link speeds that it can support then
4294 + * flap the tx laser to alert the link partner to start autotry
4295 + * process on its end.
4297 +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
4299 + if (hw->mac.ops.flap_tx_laser)
4300 + hw->mac.ops.flap_tx_laser(hw);
4304 + * ixgbe_setup_link - Set link speed
4305 + * @hw: pointer to hardware structure
4306 + * @speed: new link speed
4307 + * @autoneg: true if autonegotiation enabled
4309 + * Configures link settings. Restarts the link.
4310 + * Performs autonegotiation if needed.
4312 +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
4314 + bool autoneg_wait_to_complete)
4316 + return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
4317 + autoneg, autoneg_wait_to_complete),
4318 + IXGBE_NOT_IMPLEMENTED);
4322 + * ixgbe_get_link_capabilities - Returns link capabilities
4323 + * @hw: pointer to hardware structure
4325 + * Determines the link capabilities of the current configuration.
4327 +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4330 + return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
4331 + speed, autoneg), IXGBE_NOT_IMPLEMENTED);
4335 + * ixgbe_led_on - Turn on LEDs
4336 + * @hw: pointer to hardware structure
4337 + * @index: led number to turn on
4339 + * Turns on the software controllable LEDs.
4341 +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
4343 + return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
4344 + IXGBE_NOT_IMPLEMENTED);
4348 + * ixgbe_led_off - Turn off LEDs
4349 + * @hw: pointer to hardware structure
4350 + * @index: led number to turn off
4352 + * Turns off the software controllable LEDs.
4354 +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
4356 + return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
4357 + IXGBE_NOT_IMPLEMENTED);
4361 + * ixgbe_blink_led_start - Blink LEDs
4362 + * @hw: pointer to hardware structure
4363 + * @index: led number to blink
4365 + * Blink LED based on index.
4367 +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
4369 + return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
4370 + IXGBE_NOT_IMPLEMENTED);
4374 + * ixgbe_blink_led_stop - Stop blinking LEDs
4375 + * @hw: pointer to hardware structure
4377 + * Stop blinking LED based on index.
4379 +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
4381 + return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
4382 + IXGBE_NOT_IMPLEMENTED);
4386 + * ixgbe_init_eeprom_params - Initialize EEPROM parameters
4387 + * @hw: pointer to hardware structure
4389 + * Initializes the EEPROM parameters ixgbe_eeprom_info within the
4390 + * ixgbe_hw struct in order to set up EEPROM access.
4392 +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
4394 + return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
4395 + IXGBE_NOT_IMPLEMENTED);
4400 + * ixgbe_write_eeprom - Write word to EEPROM
4401 + * @hw: pointer to hardware structure
4402 + * @offset: offset within the EEPROM to be written to
4403 + * @data: 16 bit word to be written to the EEPROM
4405 + * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
4406 + * called after this function, the EEPROM will most likely contain an
4407 + * invalid checksum.
4409 +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
4411 + return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
4412 + IXGBE_NOT_IMPLEMENTED);
4416 + * ixgbe_read_eeprom - Read word from EEPROM
4417 + * @hw: pointer to hardware structure
4418 + * @offset: offset within the EEPROM to be read
4419 + * @data: read 16 bit value from EEPROM
4421 + * Reads 16 bit value from EEPROM
4423 +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
4425 + return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
4426 + IXGBE_NOT_IMPLEMENTED);
4430 + * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
4431 + * @hw: pointer to hardware structure
4432 + * @checksum_val: calculated checksum
4434 + * Performs checksum calculation and validates the EEPROM checksum
4436 +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
4438 + return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
4439 + (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
4443 + * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
4444 + * @hw: pointer to hardware structure
4446 +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
4448 + return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
4449 + IXGBE_NOT_IMPLEMENTED);
4453 + * ixgbe_insert_mac_addr - Find a RAR for this mac address
4454 + * @hw: pointer to hardware structure
4455 + * @addr: Address to put into receive address register
4456 + * @vmdq: VMDq pool to assign
4458 + * Puts an ethernet address into a receive address register, or
4459 + * finds the rar that it is aleady in; adds to the pool list
4461 +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
4463 + return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr,
4465 + IXGBE_NOT_IMPLEMENTED);
4469 + * ixgbe_set_rar - Set Rx address register
4470 + * @hw: pointer to hardware structure
4471 + * @index: Receive address register to write
4472 + * @addr: Address to put into receive address register
4473 + * @vmdq: VMDq "set"
4474 + * @enable_addr: set flag that address is active
4476 + * Puts an ethernet address into a receive address register.
4478 +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
4481 + return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
4482 + enable_addr), IXGBE_NOT_IMPLEMENTED);
4486 + * ixgbe_clear_rar - Clear Rx address register
4487 + * @hw: pointer to hardware structure
4488 + * @index: Receive address register to write
4490 + * Puts an ethernet address into a receive address register.
4492 +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
4494 + return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
4495 + IXGBE_NOT_IMPLEMENTED);
4499 + * ixgbe_set_vmdq - Associate a VMDq index with a receive address
4500 + * @hw: pointer to hardware structure
4501 + * @rar: receive address register index to associate with VMDq index
4502 + * @vmdq: VMDq set or pool index
4504 +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
4506 + return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
4507 + IXGBE_NOT_IMPLEMENTED);
4511 + * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
4512 + * @hw: pointer to hardware structure
4513 + * @rar: receive address register index to disassociate with VMDq index
4514 + * @vmdq: VMDq set or pool index
4516 +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
4518 + return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
4519 + IXGBE_NOT_IMPLEMENTED);
4523 + * ixgbe_init_rx_addrs - Initializes receive address filters.
4524 + * @hw: pointer to hardware structure
4526 + * Places the MAC address in receive address register 0 and clears the rest
4527 + * of the receive address registers. Clears the multicast table. Assumes
4528 + * the receiver is in reset when the routine is called.
4530 +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
4532 + return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
4533 + IXGBE_NOT_IMPLEMENTED);
4537 + * ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
4538 + * @hw: pointer to hardware structure
4540 +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
4542 + return hw->mac.num_rar_entries;
4546 + * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
4547 + * @hw: pointer to hardware structure
4548 + * @addr_list: the list of new multicast addresses
4549 + * @addr_count: number of addresses
4550 + * @func: iterator function to walk the multicast address list
4552 + * The given list replaces any existing list. Clears the secondary addrs from
4553 + * receive address registers. Uses unused receive address registers for the
4554 + * first secondary addresses, and falls back to promiscuous mode as needed.
4556 +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
4557 + u32 addr_count, ixgbe_mc_addr_itr func)
4559 + return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
4560 + addr_list, addr_count, func),
4561 + IXGBE_NOT_IMPLEMENTED);
4565 + * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
4566 + * @hw: pointer to hardware structure
4567 + * @mc_addr_list: the list of new multicast addresses
4568 + * @mc_addr_count: number of addresses
4569 + * @func: iterator function to walk the multicast address list
4571 + * The given list replaces any existing list. Clears the MC addrs from receive
4572 + * address registers and the multicast table. Uses unused receive address
4573 + * registers for the first multicast addresses, and hashes the rest into the
4574 + * multicast table.
4576 +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
4577 + u32 mc_addr_count, ixgbe_mc_addr_itr func)
4579 + return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
4580 + mc_addr_list, mc_addr_count, func),
4581 + IXGBE_NOT_IMPLEMENTED);
4585 + * ixgbe_enable_mc - Enable multicast address in RAR
4586 + * @hw: pointer to hardware structure
4588 + * Enables multicast address in RAR and the use of the multicast hash table.
4590 +s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
4592 + return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
4593 + IXGBE_NOT_IMPLEMENTED);
4597 + * ixgbe_disable_mc - Disable multicast address in RAR
4598 + * @hw: pointer to hardware structure
4600 + * Disables multicast address in RAR and the use of the multicast hash table.
4602 +s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
4604 + return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
4605 + IXGBE_NOT_IMPLEMENTED);
4609 + * ixgbe_clear_vfta - Clear VLAN filter table
4610 + * @hw: pointer to hardware structure
4612 + * Clears the VLAN filer table, and the VMDq index associated with the filter
4614 +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
4616 + return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
4617 + IXGBE_NOT_IMPLEMENTED);
4621 + * ixgbe_set_vfta - Set VLAN filter table
4622 + * @hw: pointer to hardware structure
4623 + * @vlan: VLAN id to write to VLAN filter
4624 + * @vind: VMDq output index that maps queue to VLAN id in VFTA
4625 + * @vlan_on: boolean flag to turn on/off VLAN in VFTA
4627 + * Turn on/off specified VLAN in the VLAN filter table.
4629 +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
4631 + return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
4632 + vlan_on), IXGBE_NOT_IMPLEMENTED);
4636 + * ixgbe_fc_enable - Enable flow control
4637 + * @hw: pointer to hardware structure
4638 + * @packetbuf_num: packet buffer number (0-7)
4640 + * Configures the flow control settings based on SW configuration.
4642 +s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
4644 + return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw, packetbuf_num),
4645 + IXGBE_NOT_IMPLEMENTED);
4649 + * ixgbe_read_analog_reg8 - Reads 8 bit analog register
4650 + * @hw: pointer to hardware structure
4651 + * @reg: analog register to read
4652 + * @val: read value
4654 + * Performs write operation to analog register specified.
4656 +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
4658 + return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
4659 + val), IXGBE_NOT_IMPLEMENTED);
4663 + * ixgbe_write_analog_reg8 - Writes 8 bit analog register
4664 + * @hw: pointer to hardware structure
4665 + * @reg: analog register to write
4666 + * @val: value to write
4668 + * Performs write operation to Atlas analog register specified.
4670 +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
4672 + return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
4673 + val), IXGBE_NOT_IMPLEMENTED);
4677 + * ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
4678 + * @hw: pointer to hardware structure
4680 + * Initializes the Unicast Table Arrays to zero on device load. This
4681 + * is part of the Rx init addr execution path.
4683 +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
4685 + return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
4686 + IXGBE_NOT_IMPLEMENTED);
4690 + * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
4691 + * @hw: pointer to hardware structure
4692 + * @byte_offset: byte offset to read
4693 + * @data: value read
4695 + * Performs byte read operation to SFP module's EEPROM over I2C interface.
4697 +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
4700 + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset,
4701 + dev_addr, data), IXGBE_NOT_IMPLEMENTED);
4705 + * ixgbe_write_i2c_byte - Writes 8 bit word over I2C
4706 + * @hw: pointer to hardware structure
4707 + * @byte_offset: byte offset to write
4708 + * @data: value to write
4710 + * Performs byte write operation to SFP module's EEPROM over I2C interface
4711 + * at a specified device address.
4713 +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
4716 + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset,
4717 + dev_addr, data), IXGBE_NOT_IMPLEMENTED);
4721 + * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
4722 + * @hw: pointer to hardware structure
4723 + * @byte_offset: EEPROM byte offset to write
4724 + * @eeprom_data: value to write
4726 + * Performs byte write operation to SFP module's EEPROM over I2C interface.
4728 +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
4729 + u8 byte_offset, u8 eeprom_data)
4731 + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom,
4732 + (hw, byte_offset, eeprom_data),
4733 + IXGBE_NOT_IMPLEMENTED);
4737 + * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
4738 + * @hw: pointer to hardware structure
4739 + * @byte_offset: EEPROM byte offset to read
4740 + * @eeprom_data: value read
4742 + * Performs byte read operation to SFP module's EEPROM over I2C interface.
4744 +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
4746 + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
4747 + (hw, byte_offset, eeprom_data),
4748 + IXGBE_NOT_IMPLEMENTED);
4752 + * ixgbe_get_supported_physical_layer - Returns physical layer type
4753 + * @hw: pointer to hardware structure
4755 + * Determines physical layer capabilities of the current configuration.
4757 +u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
4759 + return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
4760 + (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
4764 + * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependant on device specifics
4765 + * @hw: pointer to hardware structure
4766 + * @regval: bitfield to write to the Rx DMA register
4768 + * Enables the Rx DMA unit of the device.
4770 +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
4772 + return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
4773 + (hw, regval), IXGBE_NOT_IMPLEMENTED);
4777 + * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
4778 + * @hw: pointer to hardware structure
4779 + * @mask: Mask to specify which semaphore to acquire
4781 + * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
4782 + * function (CSR, PHY0, PHY1, EEPROM, Flash)
4784 +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
4786 + return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
4787 + (hw, mask), IXGBE_NOT_IMPLEMENTED);
4791 + * ixgbe_release_swfw_semaphore - Release SWFW semaphore
4792 + * @hw: pointer to hardware structure
4793 + * @mask: Mask to specify which semaphore to release
4795 + * Releases the SWFW semaphore through SW_FW_SYNC register for the specified
4796 + * function (CSR, PHY0, PHY1, EEPROM, Flash)
4798 +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
4800 + if (hw->mac.ops.release_swfw_sync)
4801 + hw->mac.ops.release_swfw_sync(hw, mask);
4804 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_api.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_api.h
4805 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_api.h 1969-12-31 19:00:00.000000000 -0500
4806 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_api.h 2010-08-25 17:56:26.000000000 -0400
4808 +/*******************************************************************************
4810 + Intel 10 Gigabit PCI Express Linux driver
4811 + Copyright(c) 1999 - 2010 Intel Corporation.
4813 + This program is free software; you can redistribute it and/or modify it
4814 + under the terms and conditions of the GNU General Public License,
4815 + version 2, as published by the Free Software Foundation.
4817 + This program is distributed in the hope it will be useful, but WITHOUT
4818 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4819 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4822 + You should have received a copy of the GNU General Public License along with
4823 + this program; if not, write to the Free Software Foundation, Inc.,
4824 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4826 + The full GNU General Public License is included in this distribution in
4827 + the file called "COPYING".
4829 + Contact Information:
4830 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
4831 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
4833 +*******************************************************************************/
4835 +#ifndef _IXGBE_API_H_
4836 +#define _IXGBE_API_H_
4838 +#include "ixgbe_type.h"
4840 +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
4842 +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
4843 +s32 ixgbe_init_hw(struct ixgbe_hw *hw);
4844 +s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
4845 +s32 ixgbe_start_hw(struct ixgbe_hw *hw);
4846 +void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw);
4847 +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
4848 +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
4849 +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
4850 +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
4851 +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
4852 +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
4853 +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
4854 +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
4855 +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 *pba_num_size);
4857 +s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
4858 +s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
4859 +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
4861 +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
4864 +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
4865 +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
4866 + ixgbe_link_speed *speed,
4868 +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
4869 + ixgbe_link_speed speed,
4871 + bool autoneg_wait_to_complete);
4872 +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
4873 +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
4874 +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
4875 +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
4876 + bool autoneg, bool autoneg_wait_to_complete);
4877 +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4878 + bool *link_up, bool link_up_wait_to_complete);
4879 +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4881 +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
4882 +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
4883 +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
4884 +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
4886 +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
4887 +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
4888 +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
4889 +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
4890 +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
4892 +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
4893 +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
4895 +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
4896 +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
4897 +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
4898 +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
4899 +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
4900 +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
4901 + u32 addr_count, ixgbe_mc_addr_itr func);
4902 +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
4903 + u32 mc_addr_count, ixgbe_mc_addr_itr func);
4904 +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
4905 +s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
4906 +s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
4907 +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
4908 +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
4909 + u32 vind, bool vlan_on);
4911 +s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num);
4913 +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
4914 +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
4915 + u16 *firmware_version);
4916 +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
4917 +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
4918 +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
4919 +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
4920 +u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
4921 +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
4922 +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
4923 +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
4924 +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
4925 +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
4926 + struct ixgbe_atr_input *input,
4928 +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
4929 + struct ixgbe_atr_input *input,
4930 + struct ixgbe_atr_input_masks *masks,
4933 +u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *input, u32 key);
4934 +s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan_id);
4935 +s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr);
4936 +s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr);
4937 +s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, u32 src_addr_1,
4938 + u32 src_addr_2, u32 src_addr_3,
4940 +s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, u32 dst_addr_1,
4941 + u32 dst_addr_2, u32 dst_addr_3,
4943 +s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port);
4944 +s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port);
4945 +s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte);
4946 +s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool);
4947 +s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type);
4948 +s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan_id);
4949 +s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr);
4950 +s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr);
4951 +s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, u32 *src_addr_1,
4952 + u32 *src_addr_2, u32 *src_addr_3,
4954 +s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, u32 *dst_addr_1,
4955 + u32 *dst_addr_2, u32 *dst_addr_3,
4957 +s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port);
4958 +s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port);
4959 +s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
4961 +s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool);
4962 +s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type);
4963 +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
4965 +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
4967 +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
4968 +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
4969 +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
4970 +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
4971 +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
4972 +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
4973 +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4974 + u16 *wwpn_prefix);
4975 +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
4978 +#endif /* _IXGBE_API_H_ */
4979 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_common.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_common.c
4980 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_common.c 1969-12-31 19:00:00.000000000 -0500
4981 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_common.c 2010-08-25 17:56:26.000000000 -0400
4983 +/*******************************************************************************
4985 + Intel 10 Gigabit PCI Express Linux driver
4986 + Copyright(c) 1999 - 2010 Intel Corporation.
4988 + This program is free software; you can redistribute it and/or modify it
4989 + under the terms and conditions of the GNU General Public License,
4990 + version 2, as published by the Free Software Foundation.
4992 + This program is distributed in the hope it will be useful, but WITHOUT
4993 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4994 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4997 + You should have received a copy of the GNU General Public License along with
4998 + this program; if not, write to the Free Software Foundation, Inc.,
4999 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5001 + The full GNU General Public License is included in this distribution in
5002 + the file called "COPYING".
5004 + Contact Information:
5005 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
5006 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
5008 +*******************************************************************************/
5010 +#include "ixgbe_common.h"
5011 +#include "ixgbe_phy.h"
5012 +#include "ixgbe_api.h"
5014 +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
5015 +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
5016 +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
5017 +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
5018 +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
5019 +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
5021 +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
5022 +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
5023 +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
5024 +static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
5026 +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
5027 +static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
5028 + u16 *san_mac_offset);
5029 +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
5030 +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
5031 +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
5032 +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
5033 +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
5034 + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
5036 +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
5039 + * ixgbe_init_ops_generic - Inits function ptrs
5040 + * @hw: pointer to the hardware structure
5042 + * Initialize the function pointers.
5044 +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
5046 + struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
5047 + struct ixgbe_mac_info *mac = &hw->mac;
5048 + u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
5051 + eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
5052 + /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
5053 + if (eec & (1 << 8))
5054 + eeprom->ops.read = &ixgbe_read_eerd_generic;
5056 + eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
5057 + eeprom->ops.write = &ixgbe_write_eeprom_generic;
5058 + eeprom->ops.validate_checksum =
5059 + &ixgbe_validate_eeprom_checksum_generic;
5060 + eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
5061 + eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
5064 + mac->ops.init_hw = &ixgbe_init_hw_generic;
5065 + mac->ops.reset_hw = NULL;
5066 + mac->ops.start_hw = &ixgbe_start_hw_generic;
5067 + mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
5068 + mac->ops.get_media_type = NULL;
5069 + mac->ops.get_supported_physical_layer = NULL;
5070 + mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
5071 + mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
5072 + mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
5073 + mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
5074 + mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
5075 + mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
5076 + mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
5079 + mac->ops.led_on = &ixgbe_led_on_generic;
5080 + mac->ops.led_off = &ixgbe_led_off_generic;
5081 + mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
5082 + mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
5084 + /* RAR, Multicast, VLAN */
5085 + mac->ops.set_rar = &ixgbe_set_rar_generic;
5086 + mac->ops.clear_rar = &ixgbe_clear_rar_generic;
5087 + mac->ops.insert_mac_addr = NULL;
5088 + mac->ops.set_vmdq = NULL;
5089 + mac->ops.clear_vmdq = NULL;
5090 + mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
5091 + mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
5092 + mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
5093 + mac->ops.enable_mc = &ixgbe_enable_mc_generic;
5094 + mac->ops.disable_mc = &ixgbe_disable_mc_generic;
5095 + mac->ops.clear_vfta = NULL;
5096 + mac->ops.set_vfta = NULL;
5097 + mac->ops.init_uta_tables = NULL;
5099 + /* Flow Control */
5100 + mac->ops.fc_enable = &ixgbe_fc_enable_generic;
5103 + mac->ops.get_link_capabilities = NULL;
5104 + mac->ops.setup_link = NULL;
5105 + mac->ops.check_link = NULL;
5111 + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
5112 + * @hw: pointer to hardware structure
5114 + * Starts the hardware by filling the bus info structure and media type, clears
5115 + * all on chip counters, initializes receive address registers, multicast
5116 + * table, VLAN filter table, calls routine to set up link and flow control
5117 + * settings, and leaves transmit and receive units disabled and uninitialized
5119 +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
5124 + /* Set the media type */
5125 + hw->phy.media_type = hw->mac.ops.get_media_type(hw);
5127 + /* PHY ops initialization must be done in reset_hw() */
5129 + /* Clear the VLAN filter table */
5130 + hw->mac.ops.clear_vfta(hw);
5132 + /* Clear statistics registers */
5133 + hw->mac.ops.clear_hw_cntrs(hw);
5135 + /* Set No Snoop Disable */
5136 + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5137 + ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
5138 + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5139 + IXGBE_WRITE_FLUSH(hw);
5141 + /* Setup flow control */
5142 + ixgbe_setup_fc(hw, 0);
5144 + /* Clear adapter stopped flag */
5145 + hw->adapter_stopped = false;
5151 + * ixgbe_init_hw_generic - Generic hardware initialization
5152 + * @hw: pointer to hardware structure
5154 + * Initialize the hardware by resetting the hardware, filling the bus info
5155 + * structure and media type, clears all on chip counters, initializes receive
5156 + * address registers, multicast table, VLAN filter table, calls routine to set
5157 + * up link and flow control settings, and leaves transmit and receive units
5158 + * disabled and uninitialized
5160 +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
5164 + /* Reset the hardware */
5165 + status = hw->mac.ops.reset_hw(hw);
5167 + if (status == 0) {
5168 + /* Start the HW */
5169 + status = hw->mac.ops.start_hw(hw);
5176 + * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
5177 + * @hw: pointer to hardware structure
5179 + * Clears all hardware statistics counters by reading them from the hardware
5180 + * Statistics counters are clear on read.
5182 +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
5186 + IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5187 + IXGBE_READ_REG(hw, IXGBE_ILLERRC);
5188 + IXGBE_READ_REG(hw, IXGBE_ERRBC);
5189 + IXGBE_READ_REG(hw, IXGBE_MSPDC);
5190 + for (i = 0; i < 8; i++)
5191 + IXGBE_READ_REG(hw, IXGBE_MPC(i));
5193 + IXGBE_READ_REG(hw, IXGBE_MLFC);
5194 + IXGBE_READ_REG(hw, IXGBE_MRFC);
5195 + IXGBE_READ_REG(hw, IXGBE_RLEC);
5196 + IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5197 + IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5198 + if (hw->mac.type >= ixgbe_mac_82599EB) {
5199 + IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5200 + IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5202 + IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5203 + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
5206 + for (i = 0; i < 8; i++) {
5207 + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5208 + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
5209 + if (hw->mac.type >= ixgbe_mac_82599EB) {
5210 + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5211 + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5213 + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5214 + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
5217 + if (hw->mac.type >= ixgbe_mac_82599EB)
5218 + for (i = 0; i < 8; i++)
5219 + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
5220 + IXGBE_READ_REG(hw, IXGBE_PRC64);
5221 + IXGBE_READ_REG(hw, IXGBE_PRC127);
5222 + IXGBE_READ_REG(hw, IXGBE_PRC255);
5223 + IXGBE_READ_REG(hw, IXGBE_PRC511);
5224 + IXGBE_READ_REG(hw, IXGBE_PRC1023);
5225 + IXGBE_READ_REG(hw, IXGBE_PRC1522);
5226 + IXGBE_READ_REG(hw, IXGBE_GPRC);
5227 + IXGBE_READ_REG(hw, IXGBE_BPRC);
5228 + IXGBE_READ_REG(hw, IXGBE_MPRC);
5229 + IXGBE_READ_REG(hw, IXGBE_GPTC);
5230 + IXGBE_READ_REG(hw, IXGBE_GORCL);
5231 + IXGBE_READ_REG(hw, IXGBE_GORCH);
5232 + IXGBE_READ_REG(hw, IXGBE_GOTCL);
5233 + IXGBE_READ_REG(hw, IXGBE_GOTCH);
5234 + for (i = 0; i < 8; i++)
5235 + IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5236 + IXGBE_READ_REG(hw, IXGBE_RUC);
5237 + IXGBE_READ_REG(hw, IXGBE_RFC);
5238 + IXGBE_READ_REG(hw, IXGBE_ROC);
5239 + IXGBE_READ_REG(hw, IXGBE_RJC);
5240 + IXGBE_READ_REG(hw, IXGBE_MNGPRC);
5241 + IXGBE_READ_REG(hw, IXGBE_MNGPDC);
5242 + IXGBE_READ_REG(hw, IXGBE_MNGPTC);
5243 + IXGBE_READ_REG(hw, IXGBE_TORL);
5244 + IXGBE_READ_REG(hw, IXGBE_TORH);
5245 + IXGBE_READ_REG(hw, IXGBE_TPR);
5246 + IXGBE_READ_REG(hw, IXGBE_TPT);
5247 + IXGBE_READ_REG(hw, IXGBE_PTC64);
5248 + IXGBE_READ_REG(hw, IXGBE_PTC127);
5249 + IXGBE_READ_REG(hw, IXGBE_PTC255);
5250 + IXGBE_READ_REG(hw, IXGBE_PTC511);
5251 + IXGBE_READ_REG(hw, IXGBE_PTC1023);
5252 + IXGBE_READ_REG(hw, IXGBE_PTC1522);
5253 + IXGBE_READ_REG(hw, IXGBE_MPTC);
5254 + IXGBE_READ_REG(hw, IXGBE_BPTC);
5255 + for (i = 0; i < 16; i++) {
5256 + IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5257 + IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5258 + if (hw->mac.type >= ixgbe_mac_82599EB) {
5259 + IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
5260 + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
5261 + IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
5262 + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
5263 + IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5265 + IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5266 + IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5275 + * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
5276 + * @hw: pointer to hardware structure
5277 + * @pba_num: stores the part number string from the EEPROM
5278 + * @pba_num_size: part number string buffer length
5280 + * Reads the part number string from the EEPROM.
5281 + * Returns expected buffer size in pba_num_size if passed in buffer was too
5284 +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
5285 + u32 *pba_num_size)
5288 + u32 required_pba_num_size;
5294 + if (pba_num_size == NULL) {
5295 + hw_dbg(hw, "PBA string buffer size was null\n");
5296 + return IXGBE_ERR_INVALID_ARGUMENT;
5299 + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
5301 + hw_dbg(hw, "NVM Read Error\n");
5303 + } else if (data != IXGBE_PBANUM_PTR_GUARD) {
5304 + hw_dbg(hw, "NVM PBA number is not stored as string\n");
5305 + return IXGBE_NOT_IMPLEMENTED;
5308 + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pointer);
5310 + hw_dbg(hw, "NVM Read Error\n");
5314 + ret_val = hw->eeprom.ops.read(hw, pointer, &length);
5316 + hw_dbg(hw, "NVM Read Error\n");
5320 + if (length == 0xFFFF || length == 0) {
5321 + hw_dbg(hw, "NVM PBA number section invalid length\n");
5322 + return IXGBE_ERR_PBA_SECTION;
5324 + required_pba_num_size = (((u32)length - 1) * 2) + 1;
5326 + /* check if pba_num buffer is big enough */
5327 + if ((pba_num == NULL) || (*pba_num_size < required_pba_num_size)) {
5328 + hw_dbg(hw, "PBA string buffer too small\n");
5329 + *pba_num_size = required_pba_num_size;
5330 + return IXGBE_ERR_NO_SPACE;
5333 + for (offset = 1; offset < length; offset++) {
5334 + ret_val = hw->eeprom.ops.read(hw, pointer + offset, &data);
5336 + hw_dbg(hw, "NVM Read Error\n");
5339 + pba_num[(offset - 1) * 2] = (u8)(data >> 8);
5340 + pba_num[((offset - 1) * 2) + 1] = (u8)(data & 0xFF);
5342 + pba_num[(length - 1) * 2] = '\0';
5348 + * ixgbe_read_pba_num_generic - Reads part number from EEPROM
5349 + * @hw: pointer to hardware structure
5350 + * @pba_num: stores the part number from the EEPROM
5352 + * Reads the part number from the EEPROM.
5354 +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
5359 + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
5361 + hw_dbg(hw, "NVM Read Error\n");
5363 + } else if (data == IXGBE_PBANUM_PTR_GUARD) {
5364 + hw_dbg(hw, "NVM Not supported\n");
5365 + return IXGBE_NOT_IMPLEMENTED;
5367 + *pba_num = (u32)(data << 16);
5369 + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
5371 + hw_dbg(hw, "NVM Read Error\n");
5380 + * ixgbe_get_mac_addr_generic - Generic get MAC address
5381 + * @hw: pointer to hardware structure
5382 + * @mac_addr: Adapter MAC address
5384 + * Reads the adapter's MAC address from first Receive Address Register (RAR0)
5385 + * A reset of the adapter must be performed prior to calling this function
5386 + * in order for the MAC address to have been loaded from the EEPROM into RAR0
5388 +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
5394 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
5395 + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
5397 + for (i = 0; i < 4; i++)
5398 + mac_addr[i] = (u8)(rar_low >> (i*8));
5400 + for (i = 0; i < 2; i++)
5401 + mac_addr[i+4] = (u8)(rar_high >> (i*8));
5407 + * ixgbe_get_bus_info_generic - Generic set PCI bus info
5408 + * @hw: pointer to hardware structure
5410 + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
5412 +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
5414 + struct ixgbe_mac_info *mac = &hw->mac;
5417 + hw->bus.type = ixgbe_bus_type_pci_express;
5419 + /* Get the negotiated link width and speed from PCI config space */
5420 + link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
5422 + switch (link_status & IXGBE_PCI_LINK_WIDTH) {
5423 + case IXGBE_PCI_LINK_WIDTH_1:
5424 + hw->bus.width = ixgbe_bus_width_pcie_x1;
5426 + case IXGBE_PCI_LINK_WIDTH_2:
5427 + hw->bus.width = ixgbe_bus_width_pcie_x2;
5429 + case IXGBE_PCI_LINK_WIDTH_4:
5430 + hw->bus.width = ixgbe_bus_width_pcie_x4;
5432 + case IXGBE_PCI_LINK_WIDTH_8:
5433 + hw->bus.width = ixgbe_bus_width_pcie_x8;
5436 + hw->bus.width = ixgbe_bus_width_unknown;
5440 + switch (link_status & IXGBE_PCI_LINK_SPEED) {
5441 + case IXGBE_PCI_LINK_SPEED_2500:
5442 + hw->bus.speed = ixgbe_bus_speed_2500;
5444 + case IXGBE_PCI_LINK_SPEED_5000:
5445 + hw->bus.speed = ixgbe_bus_speed_5000;
5448 + hw->bus.speed = ixgbe_bus_speed_unknown;
5452 + mac->ops.set_lan_id(hw);
5458 + * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
5459 + * @hw: pointer to the HW structure
5461 + * Determines the LAN function id by reading memory-mapped registers
5462 + * and swaps the port value if requested.
5464 +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
5466 + struct ixgbe_bus_info *bus = &hw->bus;
5469 + reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
5470 + bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
5471 + bus->lan_id = bus->func;
5473 + /* check for a port swap */
5474 + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
5475 + if (reg & IXGBE_FACTPS_LFS)
5480 + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
5481 + * @hw: pointer to hardware structure
5483 + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
5484 + * disables transmit and receive units. The adapter_stopped flag is used by
5485 + * the shared code and drivers to determine if the adapter is in a stopped
5486 + * state and should not touch the hardware.
5488 +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
5490 + u32 number_of_queues;
5495 + * Set the adapter_stopped flag so other driver functions stop touching
5498 + hw->adapter_stopped = true;
5500 + /* Disable the receive unit */
5501 + reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5502 + reg_val &= ~(IXGBE_RXCTRL_RXEN);
5503 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
5504 + IXGBE_WRITE_FLUSH(hw);
5507 + /* Clear interrupt mask to stop from interrupts being generated */
5508 + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5510 + /* Clear any pending interrupts */
5511 + IXGBE_READ_REG(hw, IXGBE_EICR);
5513 + /* Disable the transmit unit. Each queue must be disabled. */
5514 + number_of_queues = hw->mac.max_tx_queues;
5515 + for (i = 0; i < number_of_queues; i++) {
5516 + reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
5517 + if (reg_val & IXGBE_TXDCTL_ENABLE) {
5518 + reg_val &= ~IXGBE_TXDCTL_ENABLE;
5519 + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
5524 + * Prevent the PCI-E bus from from hanging by disabling PCI-E master
5525 + * access and verify no pending requests
5527 + ixgbe_disable_pcie_master(hw);
5533 + * ixgbe_led_on_generic - Turns on the software controllable LEDs.
5534 + * @hw: pointer to hardware structure
5535 + * @index: led number to turn on
5537 +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
5539 + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
5541 + /* To turn on the LED, set mode to ON. */
5542 + led_reg &= ~IXGBE_LED_MODE_MASK(index);
5543 + led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
5544 + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
5545 + IXGBE_WRITE_FLUSH(hw);
5551 + * ixgbe_led_off_generic - Turns off the software controllable LEDs.
5552 + * @hw: pointer to hardware structure
5553 + * @index: led number to turn off
5555 +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
5557 + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
5559 + /* To turn off the LED, set mode to OFF. */
5560 + led_reg &= ~IXGBE_LED_MODE_MASK(index);
5561 + led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
5562 + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
5563 + IXGBE_WRITE_FLUSH(hw);
5569 + * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
5570 + * @hw: pointer to hardware structure
5572 + * Initializes the EEPROM parameters ixgbe_eeprom_info within the
5573 + * ixgbe_hw struct in order to set up EEPROM access.
5575 +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
5577 + struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
5581 + if (eeprom->type == ixgbe_eeprom_uninitialized) {
5582 + eeprom->type = ixgbe_eeprom_none;
5583 + /* Set default semaphore delay to 10ms which is a well
5585 + eeprom->semaphore_delay = 10;
5588 + * Check for EEPROM present first.
5589 + * If not present leave as none
5591 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
5592 + if (eec & IXGBE_EEC_PRES) {
5593 + eeprom->type = ixgbe_eeprom_spi;
5596 + * SPI EEPROM is assumed here. This code would need to
5597 + * change if a future EEPROM is not SPI.
5599 + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
5600 + IXGBE_EEC_SIZE_SHIFT);
5601 + eeprom->word_size = 1 << (eeprom_size +
5602 + IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT);
5605 + if (eec & IXGBE_EEC_ADDR_SIZE)
5606 + eeprom->address_bits = 16;
5608 + eeprom->address_bits = 8;
5609 + hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
5610 + "%d\n", eeprom->type, eeprom->word_size,
5611 + eeprom->address_bits);
5618 + * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
5619 + * @hw: pointer to hardware structure
5620 + * @offset: offset within the EEPROM to be written to
5621 + * @data: 16 bit word to be written to the EEPROM
5623 + * If ixgbe_eeprom_update_checksum is not called after this function, the
5624 + * EEPROM will most likely contain an invalid checksum.
5626 +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
5629 + u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
5631 + hw->eeprom.ops.init_params(hw);
5633 + if (offset >= hw->eeprom.word_size) {
5634 + status = IXGBE_ERR_EEPROM;
5638 + /* Prepare the EEPROM for writing */
5639 + status = ixgbe_acquire_eeprom(hw);
5641 + if (status == 0) {
5642 + if (ixgbe_ready_eeprom(hw) != 0) {
5643 + ixgbe_release_eeprom(hw);
5644 + status = IXGBE_ERR_EEPROM;
5648 + if (status == 0) {
5649 + ixgbe_standby_eeprom(hw);
5651 + /* Send the WRITE ENABLE command (8 bit opcode ) */
5652 + ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
5653 + IXGBE_EEPROM_OPCODE_BITS);
5655 + ixgbe_standby_eeprom(hw);
5658 + * Some SPI eeproms use the 8th address bit embedded in the
5661 + if ((hw->eeprom.address_bits == 8) && (offset >= 128))
5662 + write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
5664 + /* Send the Write command (8-bit opcode + addr) */
5665 + ixgbe_shift_out_eeprom_bits(hw, write_opcode,
5666 + IXGBE_EEPROM_OPCODE_BITS);
5667 + ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
5668 + hw->eeprom.address_bits);
5670 + /* Send the data */
5671 + data = (data >> 8) | (data << 8);
5672 + ixgbe_shift_out_eeprom_bits(hw, data, 16);
5673 + ixgbe_standby_eeprom(hw);
5675 + /* Done with writing - release the EEPROM */
5676 + ixgbe_release_eeprom(hw);
5684 + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
5685 + * @hw: pointer to hardware structure
5686 + * @offset: offset within the EEPROM to be read
5687 + * @data: read 16 bit value from EEPROM
5689 + * Reads 16 bit value from EEPROM through bit-bang method
5691 +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
5696 + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
5698 + hw->eeprom.ops.init_params(hw);
5700 + if (offset >= hw->eeprom.word_size) {
5701 + status = IXGBE_ERR_EEPROM;
5705 + /* Prepare the EEPROM for reading */
5706 + status = ixgbe_acquire_eeprom(hw);
5708 + if (status == 0) {
5709 + if (ixgbe_ready_eeprom(hw) != 0) {
5710 + ixgbe_release_eeprom(hw);
5711 + status = IXGBE_ERR_EEPROM;
5715 + if (status == 0) {
5716 + ixgbe_standby_eeprom(hw);
5719 + * Some SPI eeproms use the 8th address bit embedded in the
5722 + if ((hw->eeprom.address_bits == 8) && (offset >= 128))
5723 + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
5725 + /* Send the READ command (opcode + addr) */
5726 + ixgbe_shift_out_eeprom_bits(hw, read_opcode,
5727 + IXGBE_EEPROM_OPCODE_BITS);
5728 + ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
5729 + hw->eeprom.address_bits);
5731 + /* Read the data. */
5732 + word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
5733 + *data = (word_in >> 8) | (word_in << 8);
5735 + /* End this read operation */
5736 + ixgbe_release_eeprom(hw);
5744 + * ixgbe_read_eerd_generic - Read EEPROM word using EERD
5745 + * @hw: pointer to hardware structure
5746 + * @offset: offset of word in the EEPROM to read
5747 + * @data: word read from the EEPROM
5749 + * Reads a 16 bit word from the EEPROM using the EERD register.
5751 +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
5756 + hw->eeprom.ops.init_params(hw);
5758 + if (offset >= hw->eeprom.word_size) {
5759 + status = IXGBE_ERR_EEPROM;
5763 + eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
5764 + IXGBE_EEPROM_RW_REG_START;
5766 + IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
5767 + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
5770 + *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
5771 + IXGBE_EEPROM_RW_REG_DATA);
5773 + hw_dbg(hw, "Eeprom read timed out\n");
5780 + * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
5781 + * @hw: pointer to hardware structure
5782 + * @ee_reg: EEPROM flag for polling
5784 + * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
5785 + * read or write is done respectively.
5787 +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
5791 + s32 status = IXGBE_ERR_EEPROM;
5793 + for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
5794 + if (ee_reg == IXGBE_NVM_POLL_READ)
5795 + reg = IXGBE_READ_REG(hw, IXGBE_EERD);
5797 + reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
5799 + if (reg & IXGBE_EEPROM_RW_REG_DONE) {
5809 + * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
5810 + * @hw: pointer to hardware structure
5812 + * Prepares EEPROM for access using bit-bang method. This function should
5813 + * be called before issuing a command to the EEPROM.
5815 +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
5821 + if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
5822 + status = IXGBE_ERR_SWFW_SYNC;
5824 + if (status == 0) {
5825 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
5827 + /* Request EEPROM Access */
5828 + eec |= IXGBE_EEC_REQ;
5829 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
5831 + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
5832 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
5833 + if (eec & IXGBE_EEC_GNT)
5838 + /* Release if grant not acquired */
5839 + if (!(eec & IXGBE_EEC_GNT)) {
5840 + eec &= ~IXGBE_EEC_REQ;
5841 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
5842 + hw_dbg(hw, "Could not acquire EEPROM grant\n");
5844 + ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
5845 + status = IXGBE_ERR_EEPROM;
5849 + /* Setup EEPROM for Read/Write */
5850 + if (status == 0) {
5851 + /* Clear CS and SK */
5852 + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
5853 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
5854 + IXGBE_WRITE_FLUSH(hw);
5861 + * ixgbe_get_eeprom_semaphore - Get hardware semaphore
5862 + * @hw: pointer to hardware structure
5864 + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
5866 +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
5868 + s32 status = IXGBE_ERR_EEPROM;
5869 + u32 timeout = 2000;
5873 + /* Get SMBI software semaphore between device drivers first */
5874 + for (i = 0; i < timeout; i++) {
5876 + * If the SMBI bit is 0 when we read it, then the bit will be
5877 + * set and we have the semaphore
5879 + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
5880 + if (!(swsm & IXGBE_SWSM_SMBI)) {
5887 + /* Now get the semaphore between SW/FW through the SWESMBI bit */
5888 + if (status == 0) {
5889 + for (i = 0; i < timeout; i++) {
5890 + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
5892 + /* Set the SW EEPROM semaphore bit to request access */
5893 + swsm |= IXGBE_SWSM_SWESMBI;
5894 + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
5897 + * If we set the bit successfully then we got the
5900 + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
5901 + if (swsm & IXGBE_SWSM_SWESMBI)
5908 + * Release semaphores and return error if SW EEPROM semaphore
5909 + * was not granted because we don't have access to the EEPROM
5911 + if (i >= timeout) {
5912 + hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
5913 + "not granted.\n");
5914 + ixgbe_release_eeprom_semaphore(hw);
5915 + status = IXGBE_ERR_EEPROM;
5918 + hw_dbg(hw, "Software semaphore SMBI between device drivers "
5919 + "not granted.\n");
5926 + * ixgbe_release_eeprom_semaphore - Release hardware semaphore
5927 + * @hw: pointer to hardware structure
5929 + * This function clears hardware semaphore bits.
5931 +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
5935 + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
5937 + /* Release both semaphores by writing 0 to the bits SWESMBI
5940 + swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
5941 + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
5942 + IXGBE_WRITE_FLUSH(hw);
5946 + * ixgbe_ready_eeprom - Polls for EEPROM ready
5947 + * @hw: pointer to hardware structure
5949 +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
5956 + * Read "Status Register" repeatedly until the LSB is cleared. The
5957 + * EEPROM will signal that the command has been completed by clearing
5958 + * bit 0 of the internal status register. If it's not cleared within
5959 + * 5 milliseconds, then error out.
5961 + for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
5962 + ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
5963 + IXGBE_EEPROM_OPCODE_BITS);
5964 + spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
5965 + if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
5969 + ixgbe_standby_eeprom(hw);
5973 + * On some parts, SPI write time could vary from 0-20mSec on 3.3V
5974 + * devices (and only 0-5mSec on 5V devices)
5976 + if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
5977 + hw_dbg(hw, "SPI EEPROM Status error\n");
5978 + status = IXGBE_ERR_EEPROM;
5985 + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
5986 + * @hw: pointer to hardware structure
5988 +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
5992 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
5994 + /* Toggle CS to flush commands */
5995 + eec |= IXGBE_EEC_CS;
5996 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
5997 + IXGBE_WRITE_FLUSH(hw);
5999 + eec &= ~IXGBE_EEC_CS;
6000 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
6001 + IXGBE_WRITE_FLUSH(hw);
6006 + * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
6007 + * @hw: pointer to hardware structure
6008 + * @data: data to send to the EEPROM
6009 + * @count: number of bits to shift out
6011 +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
6018 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
6021 + * Mask is used to shift "count" bits of "data" out to the EEPROM
6022 + * one bit at a time. Determine the starting bit based on count
6024 + mask = 0x01 << (count - 1);
6026 + for (i = 0; i < count; i++) {
6028 + * A "1" is shifted out to the EEPROM by setting bit "DI" to a
6029 + * "1", and then raising and then lowering the clock (the SK
6030 + * bit controls the clock input to the EEPROM). A "0" is
6031 + * shifted out to the EEPROM by setting "DI" to "0" and then
6032 + * raising and then lowering the clock.
6035 + eec |= IXGBE_EEC_DI;
6037 + eec &= ~IXGBE_EEC_DI;
6039 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
6040 + IXGBE_WRITE_FLUSH(hw);
6044 + ixgbe_raise_eeprom_clk(hw, &eec);
6045 + ixgbe_lower_eeprom_clk(hw, &eec);
6048 + * Shift mask to signify next bit of data to shift in to the
6054 + /* We leave the "DI" bit set to "0" when we leave this routine. */
6055 + eec &= ~IXGBE_EEC_DI;
6056 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
6057 + IXGBE_WRITE_FLUSH(hw);
6061 + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
6062 + * @hw: pointer to hardware structure
6064 +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
6071 + * In order to read a register from the EEPROM, we need to shift
6072 + * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
6073 + * the clock input to the EEPROM (setting the SK bit), and then reading
6074 + * the value of the "DO" bit. During this "shifting in" process the
6075 + * "DI" bit should always be clear.
6077 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
6079 + eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
6081 + for (i = 0; i < count; i++) {
6083 + ixgbe_raise_eeprom_clk(hw, &eec);
6085 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
6087 + eec &= ~(IXGBE_EEC_DI);
6088 + if (eec & IXGBE_EEC_DO)
6091 + ixgbe_lower_eeprom_clk(hw, &eec);
6098 + * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
6099 + * @hw: pointer to hardware structure
6100 + * @eec: EEC register's current value
6102 +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
6105 + * Raise the clock input to the EEPROM
6106 + * (setting the SK bit), then delay
6108 + *eec = *eec | IXGBE_EEC_SK;
6109 + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
6110 + IXGBE_WRITE_FLUSH(hw);
6115 + * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
6116 + * @hw: pointer to hardware structure
6117 + * @eecd: EECD's current value
6119 +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
6122 + * Lower the clock input to the EEPROM (clearing the SK bit), then
6125 + *eec = *eec & ~IXGBE_EEC_SK;
6126 + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
6127 + IXGBE_WRITE_FLUSH(hw);
6132 + * ixgbe_release_eeprom - Release EEPROM, release semaphores
6133 + * @hw: pointer to hardware structure
6135 +static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
6139 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
6141 + eec |= IXGBE_EEC_CS; /* Pull CS high */
6142 + eec &= ~IXGBE_EEC_SK; /* Lower SCK */
6144 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
6145 + IXGBE_WRITE_FLUSH(hw);
6149 + /* Stop requesting EEPROM access */
6150 + eec &= ~IXGBE_EEC_REQ;
6151 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
6153 + ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
6155 + /* Delay before attempt to obtain semaphore again to allow FW access */
6156 + msleep(hw->eeprom.semaphore_delay);
6160 + * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
6161 + * @hw: pointer to hardware structure
6163 +u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
6172 + /* Include 0x0-0x3F in the checksum */
6173 + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
6174 + if (hw->eeprom.ops.read(hw, i, &word) != 0) {
6175 + hw_dbg(hw, "EEPROM read failed\n");
6181 + /* Include all data from pointers except for the fw pointer */
6182 + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
6183 + hw->eeprom.ops.read(hw, i, &pointer);
6185 + /* Make sure the pointer seems valid */
6186 + if (pointer != 0xFFFF && pointer != 0) {
6187 + hw->eeprom.ops.read(hw, pointer, &length);
6189 + if (length != 0xFFFF && length != 0) {
6190 + for (j = pointer+1; j <= pointer+length; j++) {
6191 + hw->eeprom.ops.read(hw, j, &word);
6198 + checksum = (u16)IXGBE_EEPROM_SUM - checksum;
6204 + * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
6205 + * @hw: pointer to hardware structure
6206 + * @checksum_val: calculated checksum
6208 + * Performs checksum calculation and validates the EEPROM checksum. If the
6209 + * caller does not need checksum_val, the value can be NULL.
6211 +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
6212 + u16 *checksum_val)
6216 + u16 read_checksum = 0;
6219 + * Read the first word from the EEPROM. If this times out or fails, do
6220 + * not continue or we could be in for a very long wait while every
6221 + * EEPROM read fails
6223 + status = hw->eeprom.ops.read(hw, 0, &checksum);
6225 + if (status == 0) {
6226 + checksum = hw->eeprom.ops.calc_checksum(hw);
6228 + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
6231 + * Verify read checksum from EEPROM is the same as
6232 + * calculated checksum
6234 + if (read_checksum != checksum)
6235 + status = IXGBE_ERR_EEPROM_CHECKSUM;
6237 + /* If the user cares, return the calculated checksum */
6239 + *checksum_val = checksum;
6241 + hw_dbg(hw, "EEPROM read failed\n");
6248 + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
6249 + * @hw: pointer to hardware structure
6251 +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
6257 + * Read the first word from the EEPROM. If this times out or fails, do
6258 + * not continue or we could be in for a very long wait while every
6259 + * EEPROM read fails
6261 + status = hw->eeprom.ops.read(hw, 0, &checksum);
6263 + if (status == 0) {
6264 + checksum = hw->eeprom.ops.calc_checksum(hw);
6265 + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
6268 + hw_dbg(hw, "EEPROM read failed\n");
6275 + * ixgbe_validate_mac_addr - Validate MAC address
6276 + * @mac_addr: pointer to MAC address.
6278 + * Tests a MAC address to ensure it is a valid Individual Address
6280 +s32 ixgbe_validate_mac_addr(u8 *mac_addr)
6284 + /* Make sure it is not a multicast address */
6285 + if (IXGBE_IS_MULTICAST(mac_addr)) {
6286 + hw_dbg(hw, "MAC address is multicast\n");
6287 + status = IXGBE_ERR_INVALID_MAC_ADDR;
6288 + /* Not a broadcast address */
6289 + } else if (IXGBE_IS_BROADCAST(mac_addr)) {
6290 + hw_dbg(hw, "MAC address is broadcast\n");
6291 + status = IXGBE_ERR_INVALID_MAC_ADDR;
6292 + /* Reject the zero address */
6293 + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
6294 + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
6295 + hw_dbg(hw, "MAC address is all zeros\n");
6296 + status = IXGBE_ERR_INVALID_MAC_ADDR;
6302 + * ixgbe_set_rar_generic - Set Rx address register
6303 + * @hw: pointer to hardware structure
6304 + * @index: Receive address register to write
6305 + * @addr: Address to put into receive address register
6306 + * @vmdq: VMDq "set" or "pool" index
6307 + * @enable_addr: set flag that address is active
6309 + * Puts an ethernet address into a receive address register.
6311 +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
6314 + u32 rar_low, rar_high;
6315 + u32 rar_entries = hw->mac.num_rar_entries;
6317 + /* setup VMDq pool selection before this RAR gets enabled */
6318 + hw->mac.ops.set_vmdq(hw, index, vmdq);
6320 + /* Make sure we are using a valid rar index range */
6321 + if (index < rar_entries) {
6323 + * HW expects these in little endian so we reverse the byte
6324 + * order from network order (big endian) to little endian
6326 + rar_low = ((u32)addr[0] |
6327 + ((u32)addr[1] << 8) |
6328 + ((u32)addr[2] << 16) |
6329 + ((u32)addr[3] << 24));
6331 + * Some parts put the VMDq setting in the extra RAH bits,
6332 + * so save everything except the lower 16 bits that hold part
6333 + * of the address and the address valid bit.
6335 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
6336 + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
6337 + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
6339 + if (enable_addr != 0)
6340 + rar_high |= IXGBE_RAH_AV;
6342 + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
6343 + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
6345 + hw_dbg(hw, "RAR index %d is out of range.\n", index);
6352 + * ixgbe_clear_rar_generic - Remove Rx address register
6353 + * @hw: pointer to hardware structure
6354 + * @index: Receive address register to write
6356 + * Clears an ethernet address from a receive address register.
6358 +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
6361 + u32 rar_entries = hw->mac.num_rar_entries;
6363 + /* Make sure we are using a valid rar index range */
6364 + if (index < rar_entries) {
6366 + * Some parts put the VMDq setting in the extra RAH bits,
6367 + * so save everything except the lower 16 bits that hold part
6368 + * of the address and the address valid bit.
6370 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
6371 + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
6373 + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
6374 + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
6376 + hw_dbg(hw, "RAR index %d is out of range.\n", index);
6379 + /* clear VMDq pool/queue selection for this RAR */
6380 + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
6386 + * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
6387 + * @hw: pointer to hardware structure
6389 + * Places the MAC address in receive address register 0 and clears the rest
6390 + * of the receive address registers. Clears the multicast table. Assumes
6391 + * the receiver is in reset when the routine is called.
6393 +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
6396 + u32 rar_entries = hw->mac.num_rar_entries;
6399 + * If the current mac address is valid, assume it is a software override
6400 + * to the permanent address.
6401 + * Otherwise, use the permanent address from the eeprom.
6403 + if (ixgbe_validate_mac_addr(hw->mac.addr) ==
6404 + IXGBE_ERR_INVALID_MAC_ADDR) {
6405 + /* Get the MAC address from the RAR0 for later reference */
6406 + hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
6408 + hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
6409 + hw->mac.addr[0], hw->mac.addr[1],
6411 + hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
6412 + hw->mac.addr[4], hw->mac.addr[5]);
6414 + /* Setup the receive address. */
6415 + hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
6416 + hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
6417 + hw->mac.addr[0], hw->mac.addr[1],
6419 + hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
6420 + hw->mac.addr[4], hw->mac.addr[5]);
6422 + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
6424 + hw->addr_ctrl.overflow_promisc = 0;
6426 + hw->addr_ctrl.rar_used_count = 1;
6428 + /* Zero out the other receive addresses. */
6429 + hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
6430 + for (i = 1; i < rar_entries; i++) {
6431 + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
6432 + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
6435 + /* Clear the MTA */
6436 + hw->addr_ctrl.mta_in_use = 0;
6437 + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
6439 + hw_dbg(hw, " Clearing MTA\n");
6440 + for (i = 0; i < hw->mac.mcft_size; i++)
6441 + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
6443 + ixgbe_init_uta_tables(hw);
6449 + * ixgbe_add_uc_addr - Adds a secondary unicast address.
6450 + * @hw: pointer to hardware structure
6451 + * @addr: new address
6453 + * Adds it to unused receive address register or goes into promiscuous mode.
6455 +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
6457 + u32 rar_entries = hw->mac.num_rar_entries;
6460 + hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
6461 + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
6464 + * Place this address in the RAR if there is room,
6465 + * else put the controller into promiscuous mode
6467 + if (hw->addr_ctrl.rar_used_count < rar_entries) {
6468 + rar = hw->addr_ctrl.rar_used_count;
6469 + hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
6470 + hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
6471 + hw->addr_ctrl.rar_used_count++;
6473 + hw->addr_ctrl.overflow_promisc++;
6476 + hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
6480 + * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
6481 + * @hw: pointer to hardware structure
6482 + * @addr_list: the list of new addresses
6483 + * @addr_count: number of addresses
6484 + * @next: iterator function to walk the address list
6486 + * The given list replaces any existing list. Clears the secondary addrs from
6487 + * receive address registers. Uses unused receive address registers for the
6488 + * first secondary addresses, and falls back to promiscuous mode as needed.
6490 + * Drivers using secondary unicast addresses must set user_set_promisc when
6491 + * manually putting the device into promiscuous mode.
6493 +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
6494 + u32 addr_count, ixgbe_mc_addr_itr next)
6498 + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
6499 + u32 uc_addr_in_use;
6504 + * Clear accounting of old secondary address list,
6505 + * don't count RAR[0]
6507 + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
6508 + hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
6509 + hw->addr_ctrl.overflow_promisc = 0;
6511 + /* Zero out the other receive addresses */
6512 + hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use+1);
6513 + for (i = 0; i < uc_addr_in_use; i++) {
6514 + IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
6515 + IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
6518 + /* Add the new addresses */
6519 + for (i = 0; i < addr_count; i++) {
6520 + hw_dbg(hw, " Adding the secondary addresses:\n");
6521 + addr = next(hw, &addr_list, &vmdq);
6522 + ixgbe_add_uc_addr(hw, addr, vmdq);
6525 + if (hw->addr_ctrl.overflow_promisc) {
6526 + /* enable promisc if not already in overflow or set by user */
6527 + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
6528 + hw_dbg(hw, " Entering address overflow promisc mode\n");
6529 + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6530 + fctrl |= IXGBE_FCTRL_UPE;
6531 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6534 + /* only disable if set by overflow, not by user */
6535 + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
6536 + hw_dbg(hw, " Leaving address overflow promisc mode\n");
6537 + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6538 + fctrl &= ~IXGBE_FCTRL_UPE;
6539 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6543 + hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
6548 + * ixgbe_mta_vector - Determines bit-vector in multicast table to set
6549 + * @hw: pointer to hardware structure
6550 + * @mc_addr: the multicast address
6552 + * Extracts the 12 bits, from a multicast address, to determine which
6553 + * bit-vector to set in the multicast table. The hardware uses 12 bits, from
6554 + * incoming rx multicast addresses, to determine the bit-vector to check in
6555 + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
6556 + * by the MO field of the MCSTCTRL. The MO field is set during initialization
6557 + * to mc_filter_type.
6559 +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
6563 + switch (hw->mac.mc_filter_type) {
6564 + case 0: /* use bits [47:36] of the address */
6565 + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
6567 + case 1: /* use bits [46:35] of the address */
6568 + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
6570 + case 2: /* use bits [45:34] of the address */
6571 + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
6573 + case 3: /* use bits [43:32] of the address */
6574 + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
6576 + default: /* Invalid mc_filter_type */
6577 + hw_dbg(hw, "MC filter type param set incorrectly\n");
6581 + /* vector can only be 12-bits or boundary will be exceeded */
6587 + * ixgbe_set_mta - Set bit-vector in multicast table
6588 + * @hw: pointer to hardware structure
6589 + * @hash_value: Multicast address hash value
6591 + * Sets the bit-vector in the multicast table.
6593 +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
6599 + hw->addr_ctrl.mta_in_use++;
6601 + vector = ixgbe_mta_vector(hw, mc_addr);
6602 + hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
6605 + * The MTA is a register array of 128 32-bit registers. It is treated
6606 + * like an array of 4096 bits. We want to set bit
6607 + * BitArray[vector_value]. So we figure out what register the bit is
6608 + * in, read it, OR in the new bit, then write back the new value. The
6609 + * register is determined by the upper 7 bits of the vector value and
6610 + * the bit within that register are determined by the lower 5 bits of
6613 + vector_reg = (vector >> 5) & 0x7F;
6614 + vector_bit = vector & 0x1F;
6615 + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
6619 + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
6620 + * @hw: pointer to hardware structure
6621 + * @mc_addr_list: the list of new multicast addresses
6622 + * @mc_addr_count: number of addresses
6623 + * @next: iterator function to walk the multicast address list
6625 + * The given list replaces any existing list. Clears the MC addrs from receive
6626 + * address registers and the multicast table. Uses unused receive address
6627 + * registers for the first multicast addresses, and hashes the rest into the
6628 + * multicast table.
6630 +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
6631 + u32 mc_addr_count, ixgbe_mc_addr_itr next)
6637 + * Set the new number of MC addresses that we are being requested to
6640 + hw->addr_ctrl.num_mc_addrs = mc_addr_count;
6641 + hw->addr_ctrl.mta_in_use = 0;
6643 + /* Clear mta_shadow */
6644 + hw_dbg(hw, " Clearing MTA\n");
6645 + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
6647 + /* Update mta_shadow */
6648 + for (i = 0; i < mc_addr_count; i++) {
6649 + hw_dbg(hw, " Adding the multicast addresses:\n");
6650 + ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
6654 + for (i = 0; i < hw->mac.mcft_size; i++)
6655 + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
6656 + hw->mac.mta_shadow[i]);
6658 + if (hw->addr_ctrl.mta_in_use > 0)
6659 + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
6660 + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
6662 + hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
6667 + * ixgbe_enable_mc_generic - Enable multicast address in RAR
6668 + * @hw: pointer to hardware structure
6670 + * Enables multicast address in RAR and the use of the multicast hash table.
6672 +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
6674 + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
6676 + if (a->mta_in_use > 0)
6677 + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
6678 + hw->mac.mc_filter_type);
6684 + * ixgbe_disable_mc_generic - Disable multicast address in RAR
6685 + * @hw: pointer to hardware structure
6687 + * Disables multicast address in RAR and the use of the multicast hash table.
6689 +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
6691 + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
6693 + if (a->mta_in_use > 0)
6694 + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
6700 + * ixgbe_fc_enable_generic - Enable flow control
6701 + * @hw: pointer to hardware structure
6702 + * @packetbuf_num: packet buffer number (0-7)
6704 + * Enable flow control according to the current settings.
6706 +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
6709 + u32 mflcn_reg, fccfg_reg;
6714 + if (hw->fc.requested_mode == ixgbe_fc_pfc)
6717 +#endif /* CONFIG_DCB */
6718 + /* Negotiate the fc mode to use */
6719 + ret_val = ixgbe_fc_autoneg(hw);
6720 + if (ret_val == IXGBE_ERR_FLOW_CONTROL)
6723 + /* Disable any previous flow control settings */
6724 + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6725 + mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
6727 + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6728 + fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
6731 + * The possible values of fc.current_mode are:
6732 + * 0: Flow control is completely disabled
6733 + * 1: Rx flow control is enabled (we can receive pause frames,
6734 + * but not send pause frames).
6735 + * 2: Tx flow control is enabled (we can send pause frames but
6736 + * we do not support receiving pause frames).
6737 + * 3: Both Rx and Tx flow control (symmetric) are enabled.
6739 + * 4: Priority Flow Control is enabled.
6743 + switch (hw->fc.current_mode) {
6744 + case ixgbe_fc_none:
6745 + /* Flow control is disabled by software override or autoneg.
6746 + * The code below will actually disable it in the HW.
6749 + case ixgbe_fc_rx_pause:
6751 + * Rx Flow control is enabled and Tx Flow control is
6752 + * disabled by software override. Since there really
6753 + * isn't a way to advertise that we are capable of RX
6754 + * Pause ONLY, we will advertise that we support both
6755 + * symmetric and asymmetric Rx PAUSE. Later, we will
6756 + * disable the adapter's ability to send PAUSE frames.
6758 + mflcn_reg |= IXGBE_MFLCN_RFCE;
6760 + case ixgbe_fc_tx_pause:
6762 + * Tx Flow control is enabled, and Rx Flow control is
6763 + * disabled by software override.
6765 + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
6767 + case ixgbe_fc_full:
6768 + /* Flow control (both Rx and Tx) is enabled by SW override. */
6769 + mflcn_reg |= IXGBE_MFLCN_RFCE;
6770 + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
6773 + case ixgbe_fc_pfc:
6776 +#endif /* CONFIG_DCB */
6778 + hw_dbg(hw, "Flow control param set incorrectly\n");
6779 + ret_val = IXGBE_ERR_CONFIG;
6784 + /* Set 802.3x based flow control settings. */
6785 + mflcn_reg |= IXGBE_MFLCN_DPF;
6786 + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
6787 + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
6789 + reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
6790 + /* Thresholds are different for link flow control when in DCB mode */
6791 + if (reg & IXGBE_MTQC_RT_ENA) {
6792 + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
6794 + /* Always disable XON for LFC when in DCB mode */
6795 + reg = (rx_pba_size >> 5) & 0xFFE0;
6796 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
6798 + reg = (rx_pba_size >> 2) & 0xFFE0;
6799 + if (hw->fc.current_mode & ixgbe_fc_tx_pause)
6800 + reg |= IXGBE_FCRTH_FCEN;
6801 + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
6803 + /* Set up and enable Rx high/low water mark thresholds,
6805 + if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
6806 + if (hw->fc.send_xon) {
6807 + IXGBE_WRITE_REG(hw,
6808 + IXGBE_FCRTL_82599(packetbuf_num),
6809 + (hw->fc.low_water |
6810 + IXGBE_FCRTL_XONE));
6812 + IXGBE_WRITE_REG(hw,
6813 + IXGBE_FCRTL_82599(packetbuf_num),
6814 + hw->fc.low_water);
6817 + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
6818 + (hw->fc.high_water | IXGBE_FCRTH_FCEN));
6822 + /* Configure pause time (2 TCs per register) */
6823 + reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
6824 + if ((packetbuf_num & 1) == 0)
6825 + reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
6827 + reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
6828 + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
6830 + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
6837 + * ixgbe_fc_autoneg - Configure flow control
6838 + * @hw: pointer to hardware structure
6840 + * Compares our advertised flow control capabilities to those advertised by
6841 + * our link partner, and determines the proper flow control mode to use.
6843 +s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
6845 + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
6846 + ixgbe_link_speed speed;
6849 + if (hw->fc.disable_fc_autoneg)
6853 + * AN should have completed when the cable was plugged in.
6854 + * Look for reasons to bail out. Bail out if:
6855 + * - FC autoneg is disabled, or if
6856 + * - link is not up.
6858 + * Since we're being called from an LSC, link is already known to be up.
6859 + * So use link_up_wait_to_complete=false.
6861 + hw->mac.ops.check_link(hw, &speed, &link_up, false);
6863 + ret_val = IXGBE_ERR_FLOW_CONTROL;
6867 + switch (hw->phy.media_type) {
6868 + /* Autoneg flow control on fiber adapters */
6869 + case ixgbe_media_type_fiber:
6870 + if (speed == IXGBE_LINK_SPEED_1GB_FULL)
6871 + ret_val = ixgbe_fc_autoneg_fiber(hw);
6874 + /* Autoneg flow control on backplane adapters */
6875 + case ixgbe_media_type_backplane:
6876 + ret_val = ixgbe_fc_autoneg_backplane(hw);
6879 + /* Autoneg flow control on copper adapters */
6880 + case ixgbe_media_type_copper:
6881 + if (ixgbe_device_supports_autoneg_fc(hw) == 0)
6882 + ret_val = ixgbe_fc_autoneg_copper(hw);
6890 + if (ret_val == 0) {
6891 + hw->fc.fc_was_autonegged = true;
6893 + hw->fc.fc_was_autonegged = false;
6894 + hw->fc.current_mode = hw->fc.requested_mode;
6900 + * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
6901 + * @hw: pointer to hardware structure
6905 + * Enable flow control according on 1 gig fiber.
6907 +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
6909 + u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
6913 + * On multispeed fiber at 1g, bail out if
6914 + * - link is up but AN did not complete, or if
6915 + * - link is up and AN completed but timed out
6918 + linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
6919 + if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
6920 + ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
6921 + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
6925 + pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
6926 + pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
6928 + ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
6929 + pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
6930 + IXGBE_PCS1GANA_ASM_PAUSE,
6931 + IXGBE_PCS1GANA_SYM_PAUSE,
6932 + IXGBE_PCS1GANA_ASM_PAUSE);
6939 + * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
6940 + * @hw: pointer to hardware structure
6942 + * Enable flow control according to IEEE clause 37.
6944 +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
6946 + u32 links2, anlp1_reg, autoc_reg, links;
6950 + * On backplane, bail out if
6951 + * - backplane autoneg was not completed, or if
6952 + * - we are 82599 and link partner is not AN enabled
6954 + links = IXGBE_READ_REG(hw, IXGBE_LINKS);
6955 + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
6956 + hw->fc.fc_was_autonegged = false;
6957 + hw->fc.current_mode = hw->fc.requested_mode;
6958 + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
6962 + if (hw->mac.type == ixgbe_mac_82599EB) {
6963 + links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
6964 + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
6965 + hw->fc.fc_was_autonegged = false;
6966 + hw->fc.current_mode = hw->fc.requested_mode;
6967 + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
6972 + * Read the 10g AN autoc and LP ability registers and resolve
6973 + * local flow control settings accordingly
6975 + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
6976 + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
6978 + ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
6979 + anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
6980 + IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
6987 + * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
6988 + * @hw: pointer to hardware structure
6990 + * Enable flow control according to IEEE clause 37.
6992 +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
6994 + u16 technology_ability_reg = 0;
6995 + u16 lp_technology_ability_reg = 0;
6997 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
6998 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
6999 + &technology_ability_reg);
7000 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
7001 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
7002 + &lp_technology_ability_reg);
7004 + return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
7005 + (u32)lp_technology_ability_reg,
7006 + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
7007 + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
7011 + * ixgbe_negotiate_fc - Negotiate flow control
7012 + * @hw: pointer to hardware structure
7013 + * @adv_reg: flow control advertised settings
7014 + * @lp_reg: link partner's flow control settings
7015 + * @adv_sym: symmetric pause bit in advertisement
7016 + * @adv_asm: asymmetric pause bit in advertisement
7017 + * @lp_sym: symmetric pause bit in link partner advertisement
7018 + * @lp_asm: asymmetric pause bit in link partner advertisement
7020 + * Find the intersection between advertised settings and link partner's
7021 + * advertised settings
7023 +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
7024 + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
7026 + if ((!(adv_reg)) || (!(lp_reg)))
7027 + return IXGBE_ERR_FC_NOT_NEGOTIATED;
7029 + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
7031 + * Now we need to check if the user selected Rx ONLY
7032 + * of pause frames. In this case, we had to advertise
7033 + * FULL flow control because we could not advertise RX
7034 + * ONLY. Hence, we must now check to see if we need to
7035 + * turn OFF the TRANSMISSION of PAUSE frames.
7037 + if (hw->fc.requested_mode == ixgbe_fc_full) {
7038 + hw->fc.current_mode = ixgbe_fc_full;
7039 + hw_dbg(hw, "Flow Control = FULL.\n");
7041 + hw->fc.current_mode = ixgbe_fc_rx_pause;
7042 + hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
7044 + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
7045 + (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
7046 + hw->fc.current_mode = ixgbe_fc_tx_pause;
7047 + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
7048 + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
7049 + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
7050 + hw->fc.current_mode = ixgbe_fc_rx_pause;
7051 + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
7053 + hw->fc.current_mode = ixgbe_fc_none;
7054 + hw_dbg(hw, "Flow Control = NONE.\n");
7060 + * ixgbe_setup_fc - Set up flow control
7061 + * @hw: pointer to hardware structure
7063 + * Called at init time to set up flow control.
7065 +s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
7068 + u32 reg = 0, reg_bp = 0;
7072 + if (hw->fc.requested_mode == ixgbe_fc_pfc) {
7073 + hw->fc.current_mode = hw->fc.requested_mode;
7077 +#endif /* CONFIG_DCB */
7079 + /* Validate the packetbuf configuration */
7080 + if (packetbuf_num < 0 || packetbuf_num > 7) {
7081 + hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
7082 + " 0-7\n", packetbuf_num);
7083 + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
7088 + * Validate the water mark configuration. Zero water marks are invalid
7089 + * because it causes the controller to just blast out fc packets.
7091 + if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
7092 + hw_dbg(hw, "Invalid water mark configuration\n");
7093 + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
7098 + * Validate the requested mode. Strict IEEE mode does not allow
7099 + * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
7101 + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
7102 + hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
7103 + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
7108 + * 10gig parts do not have a word in the EEPROM to determine the
7109 + * default flow control setting, so we explicitly set it to full.
7111 + if (hw->fc.requested_mode == ixgbe_fc_default)
7112 + hw->fc.requested_mode = ixgbe_fc_full;
7115 + * Set up the 1G and 10G flow control advertisement registers so the
7116 + * HW will be able to do fc autoneg once the cable is plugged in. If
7117 + * we link at 10G, the 1G advertisement is harmless and vice versa.
7120 + switch (hw->phy.media_type) {
7121 + case ixgbe_media_type_fiber:
7122 + case ixgbe_media_type_backplane:
7123 + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
7124 + reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
7127 + case ixgbe_media_type_copper:
7128 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
7129 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
7137 + * The possible values of fc.requested_mode are:
7138 + * 0: Flow control is completely disabled
7139 + * 1: Rx flow control is enabled (we can receive pause frames,
7140 + * but not send pause frames).
7141 + * 2: Tx flow control is enabled (we can send pause frames but
7142 + * we do not support receiving pause frames).
7143 + * 3: Both Rx and Tx flow control (symmetric) are enabled.
7145 + * 4: Priority Flow Control is enabled.
7149 + switch (hw->fc.requested_mode) {
7150 + case ixgbe_fc_none:
7151 + /* Flow control completely disabled by software override. */
7152 + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
7153 + if (hw->phy.media_type == ixgbe_media_type_backplane)
7154 + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
7155 + IXGBE_AUTOC_ASM_PAUSE);
7156 + else if (hw->phy.media_type == ixgbe_media_type_copper)
7157 + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
7159 + case ixgbe_fc_rx_pause:
7161 + * Rx Flow control is enabled and Tx Flow control is
7162 + * disabled by software override. Since there really
7163 + * isn't a way to advertise that we are capable of RX
7164 + * Pause ONLY, we will advertise that we support both
7165 + * symmetric and asymmetric Rx PAUSE. Later, we will
7166 + * disable the adapter's ability to send PAUSE frames.
7168 + reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
7169 + if (hw->phy.media_type == ixgbe_media_type_backplane)
7170 + reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
7171 + IXGBE_AUTOC_ASM_PAUSE);
7172 + else if (hw->phy.media_type == ixgbe_media_type_copper)
7173 + reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
7175 + case ixgbe_fc_tx_pause:
7177 + * Tx Flow control is enabled, and Rx Flow control is
7178 + * disabled by software override.
7180 + reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
7181 + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
7182 + if (hw->phy.media_type == ixgbe_media_type_backplane) {
7183 + reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
7184 + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
7185 + } else if (hw->phy.media_type == ixgbe_media_type_copper) {
7186 + reg_cu |= (IXGBE_TAF_ASM_PAUSE);
7187 + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
7190 + case ixgbe_fc_full:
7191 + /* Flow control (both Rx and Tx) is enabled by SW override. */
7192 + reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
7193 + if (hw->phy.media_type == ixgbe_media_type_backplane)
7194 + reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
7195 + IXGBE_AUTOC_ASM_PAUSE);
7196 + else if (hw->phy.media_type == ixgbe_media_type_copper)
7197 + reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
7200 + case ixgbe_fc_pfc:
7203 +#endif /* CONFIG_DCB */
7205 + hw_dbg(hw, "Flow control param set incorrectly\n");
7206 + ret_val = IXGBE_ERR_CONFIG;
7211 + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
7212 + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
7214 + /* Disable AN timeout */
7215 + if (hw->fc.strict_ieee)
7216 + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
7218 + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
7219 + hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
7222 + * AUTOC restart handles negotiation of 1G and 10G. There is
7223 + * no need to set the PCS1GCTL register.
7225 + if (hw->phy.media_type == ixgbe_media_type_backplane) {
7226 + reg_bp |= IXGBE_AUTOC_AN_RESTART;
7227 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
7228 + } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
7229 + (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
7230 + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
7231 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
7234 + hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
7240 + * ixgbe_disable_pcie_master - Disable PCI-express master access
7241 + * @hw: pointer to hardware structure
7243 + * Disables PCI-Express master access and verifies there are no pending
7244 + * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
7245 + * bit hasn't caused the master requests to be disabled, else 0
7246 + * is returned signifying master requests disabled.
7248 +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
7252 + u32 number_of_queues;
7255 + /* Just jump out if bus mastering is already disabled */
7256 + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
7259 + /* Disable the receive unit by stopping each queue */
7260 + number_of_queues = hw->mac.max_rx_queues;
7261 + for (i = 0; i < number_of_queues; i++) {
7262 + reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
7263 + if (reg_val & IXGBE_RXDCTL_ENABLE) {
7264 + reg_val &= ~IXGBE_RXDCTL_ENABLE;
7265 + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
7269 + reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
7270 + reg_val |= IXGBE_CTRL_GIO_DIS;
7271 + IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
7273 + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
7274 + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
7279 + hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
7280 + status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
7283 + * The GIO Master Disable bit didn't clear. There are multiple reasons
7284 + * for this listed in the datasheet 5.2.5.3.2 Master Disable, and they
7285 + * all require a double reset to recover from. Before proceeding, we
7286 + * first wait a little more to try to ensure that, at a minimum, the
7287 + * PCIe block has no transactions pending.
7289 + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
7290 + if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
7291 + IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
7296 + if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
7297 + hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
7300 + * Two consecutive resets are required via CTRL.RST per datasheet
7301 + * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
7302 + * of this need. The first reset prevents new master requests from
7303 + * being issued by our device. We then must wait 1usec for any
7304 + * remaining completions from the PCIe bus to trickle in, and then reset
7305 + * again to clear out any effects they may have had on our device.
7307 + hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
7315 + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
7316 + * @hw: pointer to hardware structure
7317 + * @mask: Mask to specify which semaphore to acquire
7319 + * Acquires the SWFW semaphore thought the GSSR register for the specified
7320 + * function (CSR, PHY0, PHY1, EEPROM, Flash)
7322 +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
7325 + u32 swmask = mask;
7326 + u32 fwmask = mask << 5;
7327 + s32 timeout = 200;
7331 + * SW EEPROM semaphore bit is used for access to all
7332 + * SW_FW_SYNC/GSSR bits (not just EEPROM)
7334 + if (ixgbe_get_eeprom_semaphore(hw))
7335 + return IXGBE_ERR_SWFW_SYNC;
7337 + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
7338 + if (!(gssr & (fwmask | swmask)))
7342 + * Firmware currently using resource (fwmask) or other software
7343 + * thread currently using resource (swmask)
7345 + ixgbe_release_eeprom_semaphore(hw);
7351 + hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
7352 + return IXGBE_ERR_SWFW_SYNC;
7356 + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
7358 + ixgbe_release_eeprom_semaphore(hw);
7363 + * ixgbe_release_swfw_sync - Release SWFW semaphore
7364 + * @hw: pointer to hardware structure
7365 + * @mask: Mask to specify which semaphore to release
7367 + * Releases the SWFW semaphore thought the GSSR register for the specified
7368 + * function (CSR, PHY0, PHY1, EEPROM, Flash)
7370 +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
7373 + u32 swmask = mask;
7375 + ixgbe_get_eeprom_semaphore(hw);
7377 + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
7379 + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
7381 + ixgbe_release_eeprom_semaphore(hw);
7385 + * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
7386 + * @hw: pointer to hardware structure
7387 + * @regval: register value to write to RXCTRL
7389 + * Enables the Rx DMA unit
7391 +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
7393 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
7399 + * ixgbe_blink_led_start_generic - Blink LED based on index.
7400 + * @hw: pointer to hardware structure
7401 + * @index: led number to blink
7403 +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
7405 + ixgbe_link_speed speed = 0;
7407 + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
7408 + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
7411 + * Link must be up to auto-blink the LEDs;
7412 + * Force it if link is down.
7414 + hw->mac.ops.check_link(hw, &speed, &link_up, false);
7418 + autoc_reg |= IXGBE_AUTOC_AN_RESTART;
7419 + autoc_reg |= IXGBE_AUTOC_FLU;
7420 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
7424 + led_reg &= ~IXGBE_LED_MODE_MASK(index);
7425 + led_reg |= IXGBE_LED_BLINK(index);
7426 + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
7427 + IXGBE_WRITE_FLUSH(hw);
7433 + * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
7434 + * @hw: pointer to hardware structure
7435 + * @index: led number to stop blinking
7437 +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
7439 + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
7440 + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
7442 + autoc_reg &= ~IXGBE_AUTOC_FLU;
7443 + autoc_reg |= IXGBE_AUTOC_AN_RESTART;
7444 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
7446 + led_reg &= ~IXGBE_LED_MODE_MASK(index);
7447 + led_reg &= ~IXGBE_LED_BLINK(index);
7448 + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
7449 + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
7450 + IXGBE_WRITE_FLUSH(hw);
7456 + * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
7457 + * @hw: pointer to hardware structure
7458 + * @san_mac_offset: SAN MAC address offset
7460 + * This function will read the EEPROM location for the SAN MAC address
7461 + * pointer, and returns the value at that location. This is used in both
7462 + * get and set mac_addr routines.
7464 +static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
7465 + u16 *san_mac_offset)
7468 + * First read the EEPROM pointer to see if the MAC addresses are
7471 + hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
7477 + * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
7478 + * @hw: pointer to hardware structure
7479 + * @san_mac_addr: SAN MAC address
7481 + * Reads the SAN MAC address from the EEPROM, if it's available. This is
7482 + * per-port, so set_lan_id() must be called before reading the addresses.
7483 + * set_lan_id() is called by identify_sfp(), but this cannot be relied
7484 + * upon for non-SFP connections, so we must call it here.
7486 +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
7488 + u16 san_mac_data, san_mac_offset;
7492 + * First read the EEPROM pointer to see if the MAC addresses are
7493 + * available. If they're not, no point in calling set_lan_id() here.
7495 + ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
7497 + if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
7499 + * No addresses available in this EEPROM. It's not an
7500 + * error though, so just wipe the local address and return.
7502 + for (i = 0; i < 6; i++)
7503 + san_mac_addr[i] = 0xFF;
7505 + goto san_mac_addr_out;
7508 + /* make sure we know which port we need to program */
7509 + hw->mac.ops.set_lan_id(hw);
7510 + /* apply the port offset to the address offset */
7511 + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
7512 + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
7513 + for (i = 0; i < 3; i++) {
7514 + hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
7515 + san_mac_addr[i * 2] = (u8)(san_mac_data);
7516 + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
7525 + * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
7526 + * @hw: pointer to hardware structure
7527 + * @san_mac_addr: SAN MAC address
7529 + * Write a SAN MAC address to the EEPROM.
7531 +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
7534 + u16 san_mac_data, san_mac_offset;
7537 + /* Look for SAN mac address pointer. If not defined, return */
7538 + ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
7540 + if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
7541 + status = IXGBE_ERR_NO_SAN_ADDR_PTR;
7542 + goto san_mac_addr_out;
7545 + /* Make sure we know which port we need to write */
7546 + hw->mac.ops.set_lan_id(hw);
7547 + /* Apply the port offset to the address offset */
7548 + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
7549 + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
7551 + for (i = 0; i < 3; i++) {
7552 + san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
7553 + san_mac_data |= (u16)(san_mac_addr[i * 2]);
7554 + hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
7563 + * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
7564 + * @hw: pointer to hardware structure
7566 + * Read PCIe configuration space, and get the MSI-X vector count from
7567 + * the capabilities table.
7569 +u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
7571 + u32 msix_count = 64;
7573 + if (hw->mac.msix_vectors_from_pcie) {
7574 + msix_count = IXGBE_READ_PCIE_WORD(hw,
7575 + IXGBE_PCIE_MSIX_82599_CAPS);
7576 + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
7578 + /* MSI-X count is zero-based in HW, so increment to give
7583 + return msix_count;
7587 + * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
7588 + * @hw: pointer to hardware structure
7589 + * @addr: Address to put into receive address register
7590 + * @vmdq: VMDq pool to assign
7592 + * Puts an ethernet address into a receive address register, or
7593 + * finds the rar that it is aleady in; adds to the pool list
7595 +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
7597 + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
7598 + u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
7600 + u32 rar_low, rar_high;
7601 + u32 addr_low, addr_high;
7603 + /* swap bytes for HW little endian */
7604 + addr_low = addr[0] | (addr[1] << 8)
7606 + | (addr[3] << 24);
7607 + addr_high = addr[4] | (addr[5] << 8);
7610 + * Either find the mac_id in rar or find the first empty space.
7611 + * rar_highwater points to just after the highest currently used
7612 + * rar in order to shorten the search. It grows when we add a new
7615 + for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
7616 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
7618 + if (((IXGBE_RAH_AV & rar_high) == 0)
7619 + && first_empty_rar == NO_EMPTY_RAR_FOUND) {
7620 + first_empty_rar = rar;
7621 + } else if ((rar_high & 0xFFFF) == addr_high) {
7622 + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
7623 + if (rar_low == addr_low)
7624 + break; /* found it already in the rars */
7628 + if (rar < hw->mac.rar_highwater) {
7629 + /* already there so just add to the pool bits */
7630 + ixgbe_set_vmdq(hw, rar, vmdq);
7631 + } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
7632 + /* stick it into first empty RAR slot we found */
7633 + rar = first_empty_rar;
7634 + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
7635 + } else if (rar == hw->mac.rar_highwater) {
7636 + /* add it to the top of the list and inc the highwater mark */
7637 + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
7638 + hw->mac.rar_highwater++;
7639 + } else if (rar >= hw->mac.num_rar_entries) {
7640 + return IXGBE_ERR_INVALID_MAC_ADDR;
7644 + * If we found rar[0], make sure the default pool bit (we use pool 0)
7645 + * remains cleared to be sure default pool packets will get delivered
7648 + ixgbe_clear_vmdq(hw, rar, 0);
7654 + * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
7655 + * @hw: pointer to hardware struct
7656 + * @rar: receive address register index to disassociate
7657 + * @vmdq: VMDq pool index to remove from the rar
7659 +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
7661 + u32 mpsar_lo, mpsar_hi;
7662 + u32 rar_entries = hw->mac.num_rar_entries;
7664 + if (rar < rar_entries) {
7665 + mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
7666 + mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
7668 + if (!mpsar_lo && !mpsar_hi)
7671 + if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
7673 + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
7677 + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
7680 + } else if (vmdq < 32) {
7681 + mpsar_lo &= ~(1 << vmdq);
7682 + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
7684 + mpsar_hi &= ~(1 << (vmdq - 32));
7685 + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
7688 + /* was that the last pool using this rar? */
7689 + if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
7690 + hw->mac.ops.clear_rar(hw, rar);
7692 + hw_dbg(hw, "RAR index %d is out of range.\n", rar);
7700 + * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
7701 + * @hw: pointer to hardware struct
7702 + * @rar: receive address register index to associate with a VMDq index
7703 + * @vmdq: VMDq pool index
7705 +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
7708 + u32 rar_entries = hw->mac.num_rar_entries;
7710 + if (rar < rar_entries) {
7712 + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
7713 + mpsar |= 1 << vmdq;
7714 + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
7716 + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
7717 + mpsar |= 1 << (vmdq - 32);
7718 + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
7721 + hw_dbg(hw, "RAR index %d is out of range.\n", rar);
7727 + * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
7728 + * @hw: pointer to hardware structure
7730 +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
7734 + hw_dbg(hw, " Clearing UTA\n");
7736 + for (i = 0; i < 128; i++)
7737 + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
7743 + * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
7744 + * @hw: pointer to hardware structure
7745 + * @vlan: VLAN id to write to VLAN filter
7747 + * return the VLVF index where this VLAN id should be placed
7750 +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
7753 + u32 first_empty_slot = 0;
7756 + /* short cut the special case */
7761 + * Search for the vlan id in the VLVF entries. Save off the first empty
7762 + * slot found along the way
7764 + for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
7765 + bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
7766 + if (!bits && !(first_empty_slot))
7767 + first_empty_slot = regindex;
7768 + else if ((bits & 0x0FFF) == vlan)
7773 + * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
7774 + * in the VLVF. Else use the first empty VLVF register for this
7777 + if (regindex >= IXGBE_VLVF_ENTRIES) {
7778 + if (first_empty_slot)
7779 + regindex = first_empty_slot;
7781 + hw_dbg(hw, "No space in VLVF.\n");
7782 + regindex = IXGBE_ERR_NO_SPACE;
7790 + * ixgbe_set_vfta_generic - Set VLAN filter table
7791 + * @hw: pointer to hardware structure
7792 + * @vlan: VLAN id to write to VLAN filter
7793 + * @vind: VMDq output index that maps queue to VLAN id in VFVFB
7794 + * @vlan_on: boolean flag to turn on/off VLAN in VFVF
7796 + * Turn on/off specified VLAN in the VLAN filter table.
7798 +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
7807 + bool vfta_changed = false;
7810 + return IXGBE_ERR_PARAM;
7813 + * this is a 2 part operation - first the VFTA, then the
7814 + * VLVF and VLVFB if VT Mode is set
7815 + * We don't write the VFTA until we know the VLVF part succeeded.
7819 + * The VFTA is a bitstring made up of 128 32-bit registers
7820 + * that enable the particular VLAN id, much like the MTA:
7821 + * bits[11-5]: which register
7822 + * bits[4-0]: which bit in the register
7824 + regindex = (vlan >> 5) & 0x7F;
7825 + bitindex = vlan & 0x1F;
7826 + targetbit = (1 << bitindex);
7827 + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
7830 + if (!(vfta & targetbit)) {
7831 + vfta |= targetbit;
7832 + vfta_changed = true;
7835 + if ((vfta & targetbit)) {
7836 + vfta &= ~targetbit;
7837 + vfta_changed = true;
7842 + * If VT Mode is set
7844 + * make sure the vlan is in VLVF
7845 + * set the vind bit in the matching VLVFB
7847 + * clear the pool bit and possibly the vind
7849 + vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
7850 + if (vt & IXGBE_VT_CTL_VT_ENABLE) {
7853 + vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
7854 + if (vlvf_index < 0)
7855 + return vlvf_index;
7858 + /* set the pool bit */
7860 + bits = IXGBE_READ_REG(hw,
7861 + IXGBE_VLVFB(vlvf_index*2));
7862 + bits |= (1 << vind);
7863 + IXGBE_WRITE_REG(hw,
7864 + IXGBE_VLVFB(vlvf_index*2),
7867 + bits = IXGBE_READ_REG(hw,
7868 + IXGBE_VLVFB((vlvf_index*2)+1));
7869 + bits |= (1 << (vind-32));
7870 + IXGBE_WRITE_REG(hw,
7871 + IXGBE_VLVFB((vlvf_index*2)+1),
7875 + /* clear the pool bit */
7877 + bits = IXGBE_READ_REG(hw,
7878 + IXGBE_VLVFB(vlvf_index*2));
7879 + bits &= ~(1 << vind);
7880 + IXGBE_WRITE_REG(hw,
7881 + IXGBE_VLVFB(vlvf_index*2),
7883 + bits |= IXGBE_READ_REG(hw,
7884 + IXGBE_VLVFB((vlvf_index*2)+1));
7886 + bits = IXGBE_READ_REG(hw,
7887 + IXGBE_VLVFB((vlvf_index*2)+1));
7888 + bits &= ~(1 << (vind-32));
7889 + IXGBE_WRITE_REG(hw,
7890 + IXGBE_VLVFB((vlvf_index*2)+1),
7892 + bits |= IXGBE_READ_REG(hw,
7893 + IXGBE_VLVFB(vlvf_index*2));
7898 + * If there are still bits set in the VLVFB registers
7899 + * for the VLAN ID indicated we need to see if the
7900 + * caller is requesting that we clear the VFTA entry bit.
7901 + * If the caller has requested that we clear the VFTA
7902 + * entry bit but there are still pools/VFs using this VLAN
7903 + * ID entry then ignore the request. We're not worried
7904 + * about the case where we're turning the VFTA VLAN ID
7905 + * entry bit on, only when requested to turn it off as
7906 + * there may be multiple pools and/or VFs using the
7907 + * VLAN ID entry. In that case we cannot clear the
7908 + * VFTA bit until all pools/VFs using that VLAN ID have also
7909 + * been cleared. This will be indicated by "bits" being
7913 + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
7914 + (IXGBE_VLVF_VIEN | vlan));
7916 + /* someone wants to clear the vfta entry
7917 + * but some pools/VFs are still using it.
7919 + vfta_changed = false;
7923 + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
7927 + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
7933 + * ixgbe_clear_vfta_generic - Clear VLAN filter table
7934 + * @hw: pointer to hardware structure
7936 + * Clears the VLAN filer table, and the VMDq index associated with the filter
7938 +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
7942 + for (offset = 0; offset < hw->mac.vft_size; offset++)
7943 + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
7945 + for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
7946 + IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
7947 + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
7948 + IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
7955 + * ixgbe_check_mac_link_generic - Determine link and speed status
7956 + * @hw: pointer to hardware structure
7957 + * @speed: pointer to link speed
7958 + * @link_up: true when link is up
7959 + * @link_up_wait_to_complete: bool used to wait for link up or not
7961 + * Reads the links register to determine if link is up and the current speed
7963 +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
7964 + bool *link_up, bool link_up_wait_to_complete)
7966 + u32 links_reg, links_orig;
7969 + /* clear the old state */
7970 + links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
7972 + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
7974 + if (links_orig != links_reg) {
7975 + hw_dbg(hw, "LINKS changed from %08X to %08X\n",
7976 + links_orig, links_reg);
7979 + if (link_up_wait_to_complete) {
7980 + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
7981 + if (links_reg & IXGBE_LINKS_UP) {
7988 + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
7991 + if (links_reg & IXGBE_LINKS_UP)
7997 + if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
7998 + IXGBE_LINKS_SPEED_10G_82599)
7999 + *speed = IXGBE_LINK_SPEED_10GB_FULL;
8000 + else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
8001 + IXGBE_LINKS_SPEED_1G_82599)
8002 + *speed = IXGBE_LINK_SPEED_1GB_FULL;
8003 + else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
8004 + IXGBE_LINKS_SPEED_100_82599)
8005 + *speed = IXGBE_LINK_SPEED_100_FULL;
8007 + *speed = IXGBE_LINK_SPEED_UNKNOWN;
8009 + /* if link is down, zero out the current_mode */
8010 + if (*link_up == false) {
8011 + hw->fc.current_mode = ixgbe_fc_none;
8012 + hw->fc.fc_was_autonegged = false;
8019 + * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
8021 + * @hw: pointer to hardware structure
8022 + * @wwnn_prefix: the alternative WWNN prefix
8023 + * @wwpn_prefix: the alternative WWPN prefix
8025 + * This function will read the EEPROM from the alternative SAN MAC address
8026 + * block to check the support for the alternative WWNN/WWPN prefix support.
8028 +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
8032 + u16 alt_san_mac_blk_offset;
8034 + /* clear output first */
8035 + *wwnn_prefix = 0xFFFF;
8036 + *wwpn_prefix = 0xFFFF;
8038 + /* check if alternative SAN MAC is supported */
8039 + hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
8040 + &alt_san_mac_blk_offset);
8042 + if ((alt_san_mac_blk_offset == 0) ||
8043 + (alt_san_mac_blk_offset == 0xFFFF))
8044 + goto wwn_prefix_out;
8046 + /* check capability in alternative san mac address block */
8047 + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
8048 + hw->eeprom.ops.read(hw, offset, &caps);
8049 + if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
8050 + goto wwn_prefix_out;
8052 + /* get the corresponding prefix for WWNN/WWPN */
8053 + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
8054 + hw->eeprom.ops.read(hw, offset, wwnn_prefix);
8056 + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
8057 + hw->eeprom.ops.read(hw, offset, wwpn_prefix);
8064 + * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
8065 + * @hw: pointer to hardware structure
8066 + * @bs: the fcoe boot status
8068 + * This function will read the FCOE boot status from the iSCSI FCOE block
8070 +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
8072 + u16 offset, caps, flags;
8075 + /* clear output first */
8076 + *bs = ixgbe_fcoe_bootstatus_unavailable;
8078 + /* check if FCOE IBA block is present */
8079 + offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
8080 + status = hw->eeprom.ops.read(hw, offset, &caps);
8084 + if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
8087 + /* check if iSCSI FCOE block is populated */
8088 + status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
8092 + if ((offset == 0) || (offset == 0xFFFF))
8095 + /* read fcoe flags in iSCSI FCOE block */
8096 + offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
8097 + status = hw->eeprom.ops.read(hw, offset, &flags);
8101 + if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
8102 + *bs = ixgbe_fcoe_bootstatus_enabled;
8104 + *bs = ixgbe_fcoe_bootstatus_disabled;
8111 + * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
8113 + * @hw: pointer to hardware structure
8115 + * There are several phys that do not support autoneg flow control. This
8116 + * function check the device id to see if the associated phy supports
8117 + * autoneg flow control.
8119 +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
8122 + switch (hw->device_id) {
8123 + case IXGBE_DEV_ID_82599_T3_LOM:
8126 + return IXGBE_ERR_FC_NOT_SUPPORTED;
8129 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_common.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_common.h
8130 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_common.h 1969-12-31 19:00:00.000000000 -0500
8131 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_common.h 2010-08-25 17:56:26.000000000 -0400
8133 +/*******************************************************************************
8135 + Intel 10 Gigabit PCI Express Linux driver
8136 + Copyright(c) 1999 - 2010 Intel Corporation.
8138 + This program is free software; you can redistribute it and/or modify it
8139 + under the terms and conditions of the GNU General Public License,
8140 + version 2, as published by the Free Software Foundation.
8142 + This program is distributed in the hope it will be useful, but WITHOUT
8143 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8144 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8147 + You should have received a copy of the GNU General Public License along with
8148 + this program; if not, write to the Free Software Foundation, Inc.,
8149 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8151 + The full GNU General Public License is included in this distribution in
8152 + the file called "COPYING".
8154 + Contact Information:
8155 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8156 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8158 +*******************************************************************************/
8160 +#ifndef _IXGBE_COMMON_H_
8161 +#define _IXGBE_COMMON_H_
8163 +#include "ixgbe_type.h"
8165 +u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
8167 +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
8168 +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
8169 +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
8170 +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
8171 +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
8172 +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
8173 + u32 *pba_num_size);
8174 +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
8175 +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
8176 +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
8177 +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
8179 +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
8180 +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
8182 +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
8183 +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
8184 +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
8185 +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
8187 +u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
8188 +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
8189 + u16 *checksum_val);
8190 +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
8191 +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
8193 +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
8195 +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
8196 +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
8197 +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
8198 + u32 mc_addr_count,
8199 + ixgbe_mc_addr_itr func);
8200 +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
8201 + u32 addr_count, ixgbe_mc_addr_itr func);
8202 +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
8203 +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
8204 +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
8206 +s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
8207 +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
8208 +s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
8210 +s32 ixgbe_validate_mac_addr(u8 *mac_addr);
8211 +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
8212 +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
8213 +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
8215 +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
8216 +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
8218 +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
8219 +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
8221 +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
8222 +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
8223 +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
8224 +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
8225 +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
8226 + u32 vind, bool vlan_on);
8227 +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
8229 +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
8230 + ixgbe_link_speed *speed,
8231 + bool *link_up, bool link_up_wait_to_complete);
8233 +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
8234 + u16 *wwpn_prefix);
8236 +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
8237 +#endif /* IXGBE_COMMON */
8238 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82598.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82598.c
8239 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82598.c 1969-12-31 19:00:00.000000000 -0500
8240 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82598.c 2010-08-25 17:56:26.000000000 -0400
8242 +/*******************************************************************************
8244 + Intel 10 Gigabit PCI Express Linux driver
8245 + Copyright(c) 1999 - 2010 Intel Corporation.
8247 + This program is free software; you can redistribute it and/or modify it
8248 + under the terms and conditions of the GNU General Public License,
8249 + version 2, as published by the Free Software Foundation.
8251 + This program is distributed in the hope it will be useful, but WITHOUT
8252 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8253 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8256 + You should have received a copy of the GNU General Public License along with
8257 + this program; if not, write to the Free Software Foundation, Inc.,
8258 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8260 + The full GNU General Public License is included in this distribution in
8261 + the file called "COPYING".
8263 + Contact Information:
8264 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8265 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8267 +*******************************************************************************/
8270 +#include "ixgbe_type.h"
8271 +#include "ixgbe_dcb.h"
8272 +#include "ixgbe_dcb_82598.h"
8275 + * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
8276 + * @hw: pointer to hardware structure
8277 + * @stats: pointer to statistics structure
8278 + * @tc_count: Number of elements in bwg_array.
8280 + * This function returns the status data for each of the Traffic Classes in use.
8282 +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
8283 + struct ixgbe_hw_stats *stats,
8288 + if (tc_count > MAX_TRAFFIC_CLASS)
8289 + return DCB_ERR_PARAM;
8290 + /* Statistics pertaining to each traffic class */
8291 + for (tc = 0; tc < tc_count; tc++) {
8292 + /* Transmitted Packets */
8293 + stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
8294 + /* Transmitted Bytes */
8295 + stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
8296 + /* Received Packets */
8297 + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
8298 + /* Received Bytes */
8299 + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
8302 + /* Can we get rid of these?? Consequently, getting rid
8303 + * of the tc_stats structure.
8305 + tc_stats_array[up]->in_overflow_discards = 0;
8306 + tc_stats_array[up]->out_overflow_discards = 0;
8314 + * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
8315 + * @hw: pointer to hardware structure
8316 + * @stats: pointer to statistics structure
8317 + * @tc_count: Number of elements in bwg_array.
8319 + * This function returns the CBFC status data for each of the Traffic Classes.
8321 +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
8322 + struct ixgbe_hw_stats *stats,
8327 + if (tc_count > MAX_TRAFFIC_CLASS)
8328 + return DCB_ERR_PARAM;
8329 + for (tc = 0; tc < tc_count; tc++) {
8330 + /* Priority XOFF Transmitted */
8331 + stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
8332 + /* Priority XOFF Received */
8333 + stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
8340 + * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
8341 + * @hw: pointer to hardware structure
8342 + * @dcb_config: pointer to ixgbe_dcb_config structure
8344 + * Configure packet buffers for DCB mode.
8346 +s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
8347 + struct ixgbe_dcb_config *dcb_config)
8350 + u32 value = IXGBE_RXPBSIZE_64KB;
8353 + /* Setup Rx packet buffer sizes */
8354 + switch (dcb_config->rx_pba_cfg) {
8356 + /* Setup the first four at 80KB */
8357 + value = IXGBE_RXPBSIZE_80KB;
8358 + for (; i < 4; i++)
8359 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
8360 + /* Setup the last four at 48KB...don't re-init i */
8361 + value = IXGBE_RXPBSIZE_48KB;
8362 + /* Fall Through */
8365 + for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
8366 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
8368 + /* Setup Tx packet buffer sizes */
8369 + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
8370 + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
8371 + IXGBE_TXPBSIZE_40KB);
8380 + * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
8381 + * @hw: pointer to hardware structure
8382 + * @dcb_config: pointer to ixgbe_dcb_config structure
8384 + * Configure Rx Data Arbiter and credits for each traffic class.
8386 +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
8387 + struct ixgbe_dcb_config *dcb_config)
8389 + struct tc_bw_alloc *p;
8391 + u32 credit_refill = 0;
8392 + u32 credit_max = 0;
8395 + reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
8396 + IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
8398 + reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
8399 + /* Enable Arbiter */
8400 + reg &= ~IXGBE_RMCS_ARBDIS;
8401 + /* Enable Receive Recycle within the BWG */
8402 + reg |= IXGBE_RMCS_RRM;
8403 + /* Enable Deficit Fixed Priority arbitration*/
8404 + reg |= IXGBE_RMCS_DFP;
8406 + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
8408 + /* Configure traffic class credits and priority */
8409 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8410 + p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
8411 + credit_refill = p->data_credits_refill;
8412 + credit_max = p->data_credits_max;
8414 + reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
8416 + if (p->prio_type == prio_link)
8417 + reg |= IXGBE_RT2CR_LSP;
8419 + IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
8422 + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
8423 + reg |= IXGBE_RDRXCTL_RDMTS_1_2;
8424 + reg |= IXGBE_RDRXCTL_MPBEN;
8425 + reg |= IXGBE_RDRXCTL_MCEN;
8426 + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
8428 + reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
8429 + /* Make sure there is enough descriptors before arbitration */
8430 + reg &= ~IXGBE_RXCTRL_DMBYPS;
8431 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
8437 + * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
8438 + * @hw: pointer to hardware structure
8439 + * @dcb_config: pointer to ixgbe_dcb_config structure
8441 + * Configure Tx Descriptor Arbiter and credits for each traffic class.
8443 +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
8444 + struct ixgbe_dcb_config *dcb_config)
8446 + struct tc_bw_alloc *p;
8447 + u32 reg, max_credits;
8450 + reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
8452 + /* Enable arbiter */
8453 + reg &= ~IXGBE_DPMCS_ARBDIS;
8454 + if (!(dcb_config->round_robin_enable)) {
8455 + /* Enable DFP and Recycle mode */
8456 + reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
8458 + reg |= IXGBE_DPMCS_TSOEF;
8459 + /* Configure Max TSO packet size 34KB including payload and headers */
8460 + reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
8462 + IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
8464 + /* Configure traffic class credits and priority */
8465 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8466 + p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
8467 + max_credits = dcb_config->tc_config[i].desc_credits_max;
8468 + reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
8469 + reg |= p->data_credits_refill;
8470 + reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
8472 + if (p->prio_type == prio_group)
8473 + reg |= IXGBE_TDTQ2TCCR_GSP;
8475 + if (p->prio_type == prio_link)
8476 + reg |= IXGBE_TDTQ2TCCR_LSP;
8478 + IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
8485 + * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
8486 + * @hw: pointer to hardware structure
8487 + * @dcb_config: pointer to ixgbe_dcb_config structure
8489 + * Configure Tx Data Arbiter and credits for each traffic class.
8491 +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
8492 + struct ixgbe_dcb_config *dcb_config)
8494 + struct tc_bw_alloc *p;
8498 + reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
8499 + /* Enable Data Plane Arbiter */
8500 + reg &= ~IXGBE_PDPMCS_ARBDIS;
8501 + /* Enable DFP and Transmit Recycle Mode */
8502 + reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
8504 + IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
8506 + /* Configure traffic class credits and priority */
8507 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8508 + p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
8509 + reg = p->data_credits_refill;
8510 + reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
8511 + reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
8513 + if (p->prio_type == prio_group)
8514 + reg |= IXGBE_TDPT2TCCR_GSP;
8516 + if (p->prio_type == prio_link)
8517 + reg |= IXGBE_TDPT2TCCR_LSP;
8519 + IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
8522 + /* Enable Tx packet buffer division */
8523 + reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
8524 + reg |= IXGBE_DTXCTL_ENDBUBD;
8525 + IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
8531 + * ixgbe_dcb_config_pfc_82598 - Config priority flow control
8532 + * @hw: pointer to hardware structure
8533 + * @dcb_config: pointer to ixgbe_dcb_config structure
8535 + * Configure Priority Flow Control for each traffic class.
8537 +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
8538 + struct ixgbe_dcb_config *dcb_config)
8540 + u32 reg, rx_pba_size;
8543 + if (!dcb_config->pfc_mode_enable)
8546 + /* Enable Transmit Priority Flow Control */
8547 + reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
8548 + reg &= ~IXGBE_RMCS_TFCE_802_3X;
8549 + /* correct the reporting of our flow control status */
8550 + reg |= IXGBE_RMCS_TFCE_PRIORITY;
8551 + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
8553 + /* Enable Receive Priority Flow Control */
8554 + reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
8555 + reg &= ~IXGBE_FCTRL_RFCE;
8556 + reg |= IXGBE_FCTRL_RPFCE;
8557 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
8560 + * Configure flow control thresholds and enable priority flow control
8561 + * for each traffic class.
8563 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8564 + if (dcb_config->rx_pba_cfg == pba_equal) {
8565 + rx_pba_size = IXGBE_RXPBSIZE_64KB;
8567 + rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
8568 + : IXGBE_RXPBSIZE_48KB;
8571 + reg = ((rx_pba_size >> 5) & 0xFFF0);
8572 + if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
8573 + dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
8574 + reg |= IXGBE_FCRTL_XONE;
8576 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
8578 + reg = ((rx_pba_size >> 2) & 0xFFF0);
8579 + if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
8580 + dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
8581 + reg |= IXGBE_FCRTH_FCEN;
8583 + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
8586 + /* Configure pause time */
8587 + for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
8588 + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
8590 + /* Configure flow control refresh threshold value */
8591 + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
8598 + * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
8599 + * @hw: pointer to hardware structure
8601 + * Configure queue statistics registers, all queues belonging to same traffic
8602 + * class uses a single set of queue statistics counters.
8604 +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
8610 + /* Receive Queues stats setting - 8 queues per statistics reg */
8611 + for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
8612 + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
8613 + reg |= ((0x1010101) * j);
8614 + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
8615 + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
8616 + reg |= ((0x1010101) * j);
8617 + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
8619 + /* Transmit Queues stats setting - 4 queues per statistics reg*/
8620 + for (i = 0; i < 8; i++) {
8621 + reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
8622 + reg |= ((0x1010101) * i);
8623 + IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
8630 + * ixgbe_dcb_hw_config_82598 - Config and enable DCB
8631 + * @hw: pointer to hardware structure
8632 + * @dcb_config: pointer to ixgbe_dcb_config structure
8634 + * Configure dcb settings and enable dcb mode.
8636 +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
8637 + struct ixgbe_dcb_config *dcb_config)
8640 + ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
8641 + ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
8642 + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
8643 + ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
8644 + ixgbe_dcb_config_pfc_82598(hw, dcb_config);
8645 + ixgbe_dcb_config_tc_stats_82598(hw);
8650 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82598.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82598.h
8651 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82598.h 1969-12-31 19:00:00.000000000 -0500
8652 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82598.h 2010-08-25 17:56:26.000000000 -0400
8654 +/*******************************************************************************
8656 + Intel 10 Gigabit PCI Express Linux driver
8657 + Copyright(c) 1999 - 2010 Intel Corporation.
8659 + This program is free software; you can redistribute it and/or modify it
8660 + under the terms and conditions of the GNU General Public License,
8661 + version 2, as published by the Free Software Foundation.
8663 + This program is distributed in the hope it will be useful, but WITHOUT
8664 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8665 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8668 + You should have received a copy of the GNU General Public License along with
8669 + this program; if not, write to the Free Software Foundation, Inc.,
8670 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8672 + The full GNU General Public License is included in this distribution in
8673 + the file called "COPYING".
8675 + Contact Information:
8676 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8677 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8679 +*******************************************************************************/
8681 +#ifndef _DCB_82598_CONFIG_H_
8682 +#define _DCB_82598_CONFIG_H_
8684 +/* DCB register definitions */
8686 +#define IXGBE_DPMCS_MTSOS_SHIFT 16
8687 +#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin,
8688 + * 1 DFP - Deficit Fixed Priority */
8689 +#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */
8690 +#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */
8691 +#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
8693 +#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */
8695 +#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
8696 +#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */
8698 +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
8699 + * buffers enable */
8700 +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
8703 +#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12
8704 +#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9
8705 +#define IXGBE_TDTQ2TCCR_GSP 0x40000000
8706 +#define IXGBE_TDTQ2TCCR_LSP 0x80000000
8708 +#define IXGBE_TDPT2TCCR_MCL_SHIFT 12
8709 +#define IXGBE_TDPT2TCCR_BWG_SHIFT 9
8710 +#define IXGBE_TDPT2TCCR_GSP 0x40000000
8711 +#define IXGBE_TDPT2TCCR_LSP 0x80000000
8713 +#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin,
8714 + * 1 DFP - Deficit Fixed Priority */
8715 +#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */
8716 +#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */
8718 +#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */
8720 +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
8721 +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
8722 +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
8723 +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
8725 +/* DCB hardware-specific driver APIs */
8727 +/* DCB PFC functions */
8728 +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
8729 + struct ixgbe_dcb_config *dcb_config);
8730 +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
8731 + struct ixgbe_hw_stats *stats,
8734 +/* DCB traffic class stats */
8735 +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw);
8736 +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
8737 + struct ixgbe_hw_stats *stats,
8740 +/* DCB config arbiters */
8741 +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
8742 + struct ixgbe_dcb_config *dcb_config);
8743 +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
8744 + struct ixgbe_dcb_config *dcb_config);
8745 +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
8746 + struct ixgbe_dcb_config *dcb_config);
8748 +/* DCB hw initialization */
8749 +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
8750 + struct ixgbe_dcb_config *config);
8752 +#endif /* _DCB_82598_CONFIG_H */
8753 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82599.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82599.c
8754 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82599.c 1969-12-31 19:00:00.000000000 -0500
8755 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82599.c 2010-08-25 17:56:26.000000000 -0400
8757 +/*******************************************************************************
8759 + Intel 10 Gigabit PCI Express Linux driver
8760 + Copyright(c) 1999 - 2010 Intel Corporation.
8762 + This program is free software; you can redistribute it and/or modify it
8763 + under the terms and conditions of the GNU General Public License,
8764 + version 2, as published by the Free Software Foundation.
8766 + This program is distributed in the hope it will be useful, but WITHOUT
8767 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8768 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8771 + You should have received a copy of the GNU General Public License along with
8772 + this program; if not, write to the Free Software Foundation, Inc.,
8773 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8775 + The full GNU General Public License is included in this distribution in
8776 + the file called "COPYING".
8778 + Contact Information:
8779 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8780 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8782 +*******************************************************************************/
8785 +#include "ixgbe_type.h"
8786 +#include "ixgbe_dcb.h"
8787 +#include "ixgbe_dcb_82599.h"
8790 + * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
8791 + * @hw: pointer to hardware structure
8792 + * @stats: pointer to statistics structure
8793 + * @tc_count: Number of elements in bwg_array.
8795 + * This function returns the status data for each of the Traffic Classes in use.
8797 +s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
8798 + struct ixgbe_hw_stats *stats,
8803 + if (tc_count > MAX_TRAFFIC_CLASS)
8804 + return DCB_ERR_PARAM;
8805 + /* Statistics pertaining to each traffic class */
8806 + for (tc = 0; tc < tc_count; tc++) {
8807 + /* Transmitted Packets */
8808 + stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
8809 + /* Transmitted Bytes (read low first to prevent missed carry) */
8810 + stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
8811 + stats->qbtc[tc] +=
8812 + (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
8813 + /* Received Packets */
8814 + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
8815 + /* Received Bytes (read low first to prevent missed carry) */
8816 + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
8817 + stats->qbrc[tc] +=
8818 + (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
8820 + /* Received Dropped Packet */
8821 + stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
8828 + * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
8829 + * @hw: pointer to hardware structure
8830 + * @stats: pointer to statistics structure
8831 + * @tc_count: Number of elements in bwg_array.
8833 + * This function returns the CBFC status data for each of the Traffic Classes.
8835 +s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
8836 + struct ixgbe_hw_stats *stats,
8841 + if (tc_count > MAX_TRAFFIC_CLASS)
8842 + return DCB_ERR_PARAM;
8843 + for (tc = 0; tc < tc_count; tc++) {
8844 + /* Priority XOFF Transmitted */
8845 + stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
8846 + /* Priority XOFF Received */
8847 + stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
8854 + * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
8855 + * @hw: pointer to hardware structure
8856 + * @dcb_config: pointer to ixgbe_dcb_config structure
8858 + * Configure packet buffers for DCB mode.
8860 +s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
8861 + struct ixgbe_dcb_config *dcb_config)
8865 + u32 maxtxpktsize = IXGBE_TXPBSIZE_MAX;
8870 + num_tcs = dcb_config->num_tcs.pg_tcs;
8871 + /* Setup Rx packet buffer sizes */
8872 + if (dcb_config->rx_pba_cfg == pba_80_48) {
8874 + * This really means configure the first half of the TCs
8875 + * (Traffic Classes) to use 5/8 of the Rx packet buffer
8876 + * space. To determine the size of the buffer for each TC,
8877 + * multiply the size of the entire packet buffer by 5/8
8878 + * then divide by half of the number of TCs.
8880 + rxpktsize = (hw->mac.rx_pb_size * 5 / 8) / (num_tcs / 2);
8881 + for (i = 0; i < (num_tcs / 2); i++)
8882 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i),
8883 + rxpktsize << IXGBE_RXPBSIZE_SHIFT);
8886 + * The second half of the TCs use the remaining 3/8
8887 + * of the Rx packet buffer space.
8889 + rxpktsize = (hw->mac.rx_pb_size * 3 / 8) / (num_tcs / 2);
8890 + for (; i < num_tcs; i++)
8891 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i),
8892 + rxpktsize << IXGBE_RXPBSIZE_SHIFT);
8894 + /* Divide the Rx packet buffer evenly among the TCs */
8895 + rxpktsize = hw->mac.rx_pb_size / num_tcs;
8896 + for (i = 0; i < num_tcs; i++)
8897 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i),
8898 + rxpktsize << IXGBE_RXPBSIZE_SHIFT);
8900 + /* Setup remainig TCs, if any, to zero buffer size*/
8901 + for (; i < MAX_TRAFFIC_CLASS; i++)
8902 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
8904 + /* Setup Tx packet buffer and threshold equally for all TCs */
8905 + txpktsize = maxtxpktsize/num_tcs;
8906 + for (i = 0; i < num_tcs; i++) {
8907 + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
8908 + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i),
8909 + ((txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX));
8912 + /* Setup remainig TCs, if any, to zero buffer size*/
8913 + for (; i < MAX_TRAFFIC_CLASS; i++) {
8914 + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
8915 + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
8922 + * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
8923 + * @hw: pointer to hardware structure
8924 + * @dcb_config: pointer to ixgbe_dcb_config structure
8926 + * Configure Rx Packet Arbiter and credits for each traffic class.
8928 +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
8929 + struct ixgbe_dcb_config *dcb_config)
8931 + struct tc_bw_alloc *p;
8933 + u32 credit_refill = 0;
8934 + u32 credit_max = 0;
8939 + * Disable the arbiter before changing parameters
8940 + * (always enable recycle mode; WSP)
8942 + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
8943 + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
8946 + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
8947 + * bits sets for the UPs that needs to be mappped to that TC.
8948 + * e.g if priorities 6 and 7 are to be mapped to a TC then the
8949 + * up_to_tc_bitmap value for that TC will be 11000000 in binary.
8952 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8953 + p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
8954 + for (j = 0; j < MAX_USER_PRIORITY; j++) {
8955 + if (p->up_to_tc_bitmap & (1 << j))
8956 + reg |= (i << (j * IXGBE_RTRUP2TC_UP_SHIFT));
8959 + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
8961 + /* Configure traffic class credits and priority */
8962 + for (i = 0; i < dcb_config->num_tcs.pg_tcs; i++) {
8963 + p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
8965 + credit_refill = p->data_credits_refill;
8966 + credit_max = p->data_credits_max;
8967 + reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
8969 + reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT;
8971 + if (p->prio_type == prio_link)
8972 + reg |= IXGBE_RTRPT4C_LSP;
8974 + IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
8978 + * Configure Rx packet plane (recycle mode; WSP) and
8981 + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
8982 + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
8988 + * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
8989 + * @hw: pointer to hardware structure
8990 + * @dcb_config: pointer to ixgbe_dcb_config structure
8992 + * Configure Tx Descriptor Arbiter and credits for each traffic class.
8994 +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
8995 + struct ixgbe_dcb_config *dcb_config)
8997 + struct tc_bw_alloc *p;
8998 + u32 reg, max_credits;
9001 + /* Clear the per-Tx queue credits; we use per-TC instead */
9002 + for (i = 0; i < 128; i++) {
9003 + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
9004 + IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
9007 + /* Configure traffic class credits and priority */
9008 + for (i = 0; i < dcb_config->num_tcs.pg_tcs; i++) {
9009 + p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
9010 + max_credits = dcb_config->tc_config[i].desc_credits_max;
9011 + reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
9012 + reg |= p->data_credits_refill;
9013 + reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT;
9015 + if (p->prio_type == prio_group)
9016 + reg |= IXGBE_RTTDT2C_GSP;
9018 + if (p->prio_type == prio_link)
9019 + reg |= IXGBE_RTTDT2C_LSP;
9021 + IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
9025 + * Configure Tx descriptor plane (recycle mode; WSP) and
9028 + reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
9029 + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
9035 + * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
9036 + * @hw: pointer to hardware structure
9037 + * @dcb_config: pointer to ixgbe_dcb_config structure
9039 + * Configure Tx Packet Arbiter and credits for each traffic class.
9041 +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
9042 + struct ixgbe_dcb_config *dcb_config)
9044 + struct tc_bw_alloc *p;
9049 + * Disable the arbiter before changing parameters
9050 + * (always enable recycle mode; SP; arb delay)
9052 + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
9053 + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
9054 + IXGBE_RTTPCS_ARBDIS;
9055 + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
9058 + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
9059 + * bits sets for the UPs that needs to be mappped to that TC.
9060 + * e.g if priorities 6 and 7 are to be mapped to a TC then the
9061 + * up_to_tc_bitmap value for that TC will be 11000000 in binary.
9064 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
9065 + p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
9066 + for (j = 0; j < MAX_USER_PRIORITY; j++)
9067 + if (p->up_to_tc_bitmap & (1 << j))
9068 + reg |= (i << (j * IXGBE_RTTUP2TC_UP_SHIFT));
9070 + IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
9072 + /* Configure traffic class credits and priority */
9073 + for (i = 0; i < dcb_config->num_tcs.pg_tcs; i++) {
9074 + p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
9075 + reg = p->data_credits_refill;
9076 + reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT;
9077 + reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
9079 + if (p->prio_type == prio_group)
9080 + reg |= IXGBE_RTTPT2C_GSP;
9082 + if (p->prio_type == prio_link)
9083 + reg |= IXGBE_RTTPT2C_LSP;
9085 + IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
9089 + * Configure Tx packet plane (recycle mode; SP; arb delay) and
9092 + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
9093 + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
9094 + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
9100 + * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
9101 + * @hw: pointer to hardware structure
9102 + * @dcb_config: pointer to ixgbe_dcb_config structure
9104 + * Configure Priority Flow Control (PFC) for each traffic class.
9106 +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
9107 + struct ixgbe_dcb_config *dcb_config)
9109 + u32 i, reg, rx_pba_size;
9111 + /* If PFC is disabled globally then fall back to LFC. */
9112 + if (!dcb_config->pfc_mode_enable) {
9113 + for (i = 0; i < dcb_config->num_tcs.pg_tcs; i++)
9114 + hw->mac.ops.fc_enable(hw, i);
9118 + /* Configure PFC Tx thresholds per TC */
9119 + for (i = 0; i < dcb_config->num_tcs.pg_tcs; i++) {
9120 + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
9122 + reg = ((rx_pba_size >> 5) & 0xFFE0);
9123 + if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
9124 + dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
9125 + reg |= IXGBE_FCRTL_XONE;
9126 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
9128 + reg = ((rx_pba_size >> 2) & 0xFFE0);
9129 + if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
9130 + dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
9131 + reg |= IXGBE_FCRTH_FCEN;
9132 + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
9135 + /* Configure pause time (2 TCs per register) */
9136 + reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
9137 + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
9138 + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
9140 + /* Configure flow control refresh threshold value */
9141 + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
9143 + /* Enable Transmit PFC */
9144 + reg = IXGBE_FCCFG_TFCE_PRIORITY;
9145 + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
9148 + * Enable Receive PFC
9149 + * We will always honor XOFF frames we receive when
9150 + * we are in PFC mode.
9152 + reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
9153 + reg &= ~IXGBE_MFLCN_RFCE;
9154 + reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
9155 + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
9161 + * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
9162 + * @hw: pointer to hardware structure
9164 + * Configure queue statistics registers, all queues belonging to same traffic
9165 + * class uses a single set of queue statistics counters.
9167 +s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
9173 + * Receive Queues stats setting
9174 + * 32 RQSMR registers, each configuring 4 queues.
9175 + * Set all 16 queues of each TC to the same stat
9176 + * with TC 'n' going to stat 'n'.
9178 + for (i = 0; i < 32; i++) {
9179 + reg = 0x01010101 * (i / 4);
9180 + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
9183 + * Transmit Queues stats setting
9184 + * 32 TQSM registers, each controlling 4 queues.
9185 + * Set all queues of each TC to the same stat
9186 + * with TC 'n' going to stat 'n'.
9187 + * Tx queues are allocated non-uniformly to TCs:
9188 + * 32, 32, 16, 16, 8, 8, 8, 8.
9190 + for (i = 0; i < 32; i++) {
9207 + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
9214 + * ixgbe_dcb_config_82599 - Configure general DCB parameters
9215 + * @hw: pointer to hardware structure
9216 + * @dcb_config: pointer to ixgbe_dcb_config structure
9218 + * Configure general DCB parameters.
9220 +s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
9221 + struct ixgbe_dcb_config *dcb_config)
9226 + /* Disable the Tx desc arbiter so that MTQC can be changed */
9227 + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
9228 + reg |= IXGBE_RTTDCS_ARBDIS;
9229 + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
9231 + /* Enable DCB for Rx with 8 TCs */
9232 + reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
9233 + switch (reg & IXGBE_MRQC_MRQE_MASK) {
9235 + case IXGBE_MRQC_RT4TCEN:
9236 + /* RSS disabled cases */
9237 + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
9239 + case IXGBE_MRQC_RSSEN:
9240 + case IXGBE_MRQC_RTRSS4TCEN:
9241 + /* RSS enabled cases */
9242 + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN;
9245 + /* Unsupported value, assume stale data, overwrite no RSS */
9246 + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
9248 + if (dcb_config->num_tcs.pg_tcs == 4) {
9249 + /* Enable DCB for Rx with 4 TCs and VT Mode*/
9250 + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_VMDQRT4TCEN;
9252 + IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
9254 + /* Enable DCB for Tx with 8 TCs */
9255 + if (dcb_config->num_tcs.pg_tcs == 8)
9256 + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
9257 + else /* Enable DCB for Tx with 4 TCs and VT Mode*/
9258 + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_VT_ENA
9259 + | IXGBE_MTQC_4TC_4TQ;
9260 + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
9262 + /* Disable drop for all queues */
9263 + for (q=0; q < 128; q++) {
9264 + IXGBE_WRITE_REG(hw, IXGBE_QDE, q << IXGBE_QDE_IDX_SHIFT);
9267 + /* Enable the Tx desc arbiter */
9268 + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
9269 + reg &= ~IXGBE_RTTDCS_ARBDIS;
9270 + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
9276 + * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
9277 + * @hw: pointer to hardware structure
9278 + * @dcb_config: pointer to ixgbe_dcb_config structure
9280 + * Configure dcb settings and enable dcb mode.
9282 +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
9283 + struct ixgbe_dcb_config *dcb_config)
9286 + ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config);
9287 + ixgbe_dcb_config_82599(hw, dcb_config);
9288 + ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
9289 + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
9290 + ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
9291 + ixgbe_dcb_config_pfc_82599(hw, dcb_config);
9292 + ixgbe_dcb_config_tc_stats_82599(hw);
9298 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82599.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82599.h
9299 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82599.h 1969-12-31 19:00:00.000000000 -0500
9300 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82599.h 2010-08-25 17:56:26.000000000 -0400
9302 +/*******************************************************************************
9304 + Intel 10 Gigabit PCI Express Linux driver
9305 + Copyright(c) 1999 - 2010 Intel Corporation.
9307 + This program is free software; you can redistribute it and/or modify it
9308 + under the terms and conditions of the GNU General Public License,
9309 + version 2, as published by the Free Software Foundation.
9311 + This program is distributed in the hope it will be useful, but WITHOUT
9312 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9313 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9316 + You should have received a copy of the GNU General Public License along with
9317 + this program; if not, write to the Free Software Foundation, Inc.,
9318 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9320 + The full GNU General Public License is included in this distribution in
9321 + the file called "COPYING".
9323 + Contact Information:
9324 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9325 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9327 +*******************************************************************************/
9329 +#ifndef _DCB_82599_CONFIG_H_
9330 +#define _DCB_82599_CONFIG_H_
9332 +/* DCB register definitions */
9333 +#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
9334 + * 1 WSP - Weighted Strict Priority
9336 +#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
9337 + * 1 WRR - Weighted Round Robin
9339 +#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
9340 +#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
9341 +#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
9344 +#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
9346 +/* Receive UP2TC mapping */
9347 +#define IXGBE_RTRUP2TC_UP_SHIFT 3
9348 +/* Transmit UP2TC mapping */
9349 +#define IXGBE_RTTUP2TC_UP_SHIFT 3
9351 +#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
9352 +#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */
9353 +#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */
9354 +#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
9356 +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
9359 +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
9363 +/* RTRPCS Bit Masks */
9364 +#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
9365 +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
9366 +#define IXGBE_RTRPCS_RAC 0x00000004
9367 +#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */
9369 +/* RTTDT2C Bit Masks */
9370 +#define IXGBE_RTTDT2C_MCL_SHIFT 12
9371 +#define IXGBE_RTTDT2C_BWG_SHIFT 9
9372 +#define IXGBE_RTTDT2C_GSP 0x40000000
9373 +#define IXGBE_RTTDT2C_LSP 0x80000000
9375 +#define IXGBE_RTTPT2C_MCL_SHIFT 12
9376 +#define IXGBE_RTTPT2C_BWG_SHIFT 9
9377 +#define IXGBE_RTTPT2C_GSP 0x40000000
9378 +#define IXGBE_RTTPT2C_LSP 0x80000000
9380 +/* RTTPCS Bit Masks */
9381 +#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
9382 + * 1 SP - Strict Priority
9384 +#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
9385 +#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
9386 +#define IXGBE_RTTPCS_ARBD_SHIFT 22
9387 +#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */
9389 +#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */
9390 +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
9391 +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
9392 +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
9393 +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
9394 +#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
9395 +#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/
9396 +#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/
9398 +#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
9399 +#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
9402 +/* DCB hardware-specific driver APIs */
9404 +/* DCB PFC functions */
9405 +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
9406 + struct ixgbe_dcb_config *dcb_config);
9407 +s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
9408 + struct ixgbe_hw_stats *stats,
9411 +/* DCB traffic class stats */
9412 +s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw);
9413 +s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
9414 + struct ixgbe_hw_stats *stats,
9417 +/* DCB config arbiters */
9418 +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
9419 + struct ixgbe_dcb_config *dcb_config);
9420 +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
9421 + struct ixgbe_dcb_config *dcb_config);
9422 +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
9423 + struct ixgbe_dcb_config *dcb_config);
9425 +/* DCB hw initialization */
9426 +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
9427 + struct ixgbe_dcb_config *config);
9429 +#endif /* _DCB_82599_CONFIG_H */
9430 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb.c
9431 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb.c 1969-12-31 19:00:00.000000000 -0500
9432 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb.c 2010-08-25 17:56:26.000000000 -0400
9434 +/*******************************************************************************
9436 + Intel 10 Gigabit PCI Express Linux driver
9437 + Copyright(c) 1999 - 2010 Intel Corporation.
9439 + This program is free software; you can redistribute it and/or modify it
9440 + under the terms and conditions of the GNU General Public License,
9441 + version 2, as published by the Free Software Foundation.
9443 + This program is distributed in the hope it will be useful, but WITHOUT
9444 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9445 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9448 + You should have received a copy of the GNU General Public License along with
9449 + this program; if not, write to the Free Software Foundation, Inc.,
9450 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9452 + The full GNU General Public License is included in this distribution in
9453 + the file called "COPYING".
9455 + Contact Information:
9456 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9457 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9459 +*******************************************************************************/
9462 +#include "ixgbe_type.h"
9463 +#include "ixgbe_dcb.h"
9464 +#include "ixgbe_dcb_82598.h"
9465 +#include "ixgbe_dcb_82599.h"
9468 + * ixgbe_dcb_config - Struct containing DCB settings.
9469 + * @dcb_config: Pointer to DCB config structure
9471 + * This function checks DCB rules for DCB settings.
9472 + * The following rules are checked:
9473 + * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
9474 + * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
9475 + * Group must total 100.
9476 + * 3. A Traffic Class should not be set to both Link Strict Priority
9477 + * and Group Strict Priority.
9478 + * 4. Link strict Bandwidth Groups can only have link strict traffic classes
9479 + * with zero bandwidth.
9481 +s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config)
9483 + struct tc_bw_alloc *p;
9485 + u8 i, j, bw = 0, bw_id;
9486 + u8 bw_sum[2][MAX_BW_GROUP];
9487 + bool link_strict[2][MAX_BW_GROUP];
9489 + memset(bw_sum, 0, sizeof(bw_sum));
9490 + memset(link_strict, 0, sizeof(link_strict));
9492 + /* First Tx, then Rx */
9493 + for (i = 0; i < 2; i++) {
9494 + /* Check each traffic class for rule violation */
9495 + for (j = 0; j < dcb_config->num_tcs.pg_tcs; j++) {
9496 + p = &dcb_config->tc_config[j].path[i];
9498 + bw = p->bwg_percent;
9499 + bw_id = p->bwg_id;
9501 + if (bw_id >= MAX_BW_GROUP) {
9502 + ret_val = DCB_ERR_CONFIG;
9505 + if (p->prio_type == prio_link) {
9506 + link_strict[i][bw_id] = true;
9507 + /* Link strict should have zero bandwidth */
9509 + ret_val = DCB_ERR_LS_BW_NONZERO;
9514 + * Traffic classes without link strict
9515 + * should have non-zero bandwidth.
9517 + ret_val = DCB_ERR_TC_BW_ZERO;
9520 + bw_sum[i][bw_id] += bw;
9525 + /* Check each bandwidth group for rule violation */
9526 + for (j = 0; j < MAX_BW_GROUP; j++) {
9527 + bw += dcb_config->bw_percentage[i][j];
9529 + * Sum of bandwidth percentages of all traffic classes
9530 + * within a Bandwidth Group must total 100 except for
9531 + * link strict group (zero bandwidth).
9533 + if (link_strict[i][j]) {
9534 + if (bw_sum[i][j]) {
9536 + * Link strict group should have zero
9539 + ret_val = DCB_ERR_LS_BWG_NONZERO;
9542 + } else if (bw_sum[i][j] != BW_PERCENT &&
9543 + bw_sum[i][j] != 0) {
9544 + ret_val = DCB_ERR_TC_BW;
9549 + if (bw != BW_PERCENT) {
9550 + ret_val = DCB_ERR_BW_GROUP;
9555 + return DCB_SUCCESS;
9558 + hw_dbg(hw, "DCB error code %d while checking %s settings.\n",
9559 + ret_val, (j == DCB_TX_CONFIG) ? "Tx" : "Rx");
9565 + * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
9566 + * @ixgbe_dcb_config: Struct containing DCB settings.
9567 + * @direction: Configuring either Tx or Rx.
9569 + * This function calculates the credits allocated to each traffic class.
9570 + * It should be called only after the rules are checked by
9571 + * ixgbe_dcb_check_config().
9573 +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
9574 + struct ixgbe_dcb_config *dcb_config,
9575 + u32 max_frame_size,
9578 + struct tc_bw_alloc *p;
9580 + /* Initialization values default for Tx settings */
9581 + u32 credit_refill = 0;
9582 + u32 credit_max = 0;
9583 + u32 minimal_credit_max = 0;
9584 + u16 link_percentage = 0;
9585 + u8 bw_percent = 0;
9588 + if (dcb_config == NULL) {
9589 + ret_val = DCB_ERR_CONFIG;
9593 + /* Find out the link percentage for each TC first */
9594 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
9595 + p = &dcb_config->tc_config[i].path[direction];
9596 + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
9598 + link_percentage = p->bwg_percent;
9599 + /* Must be careful of integer division for very small nums */
9600 + link_percentage = (link_percentage * bw_percent) / 100;
9601 + if (p->bwg_percent > 0 && link_percentage == 0)
9602 + link_percentage = 1;
9604 + /* Save link_percentage for reference */
9605 + p->link_percent = (u8)link_percentage;
9607 + /* Calculate credit refill and save it */
9608 + credit_refill = link_percentage * MINIMUM_CREDIT_REFILL;
9609 + p->data_credits_refill = (u16)credit_refill;
9611 + /* Calculate maximum credit for the TC */
9612 + credit_max = (link_percentage * MAX_CREDIT) / 100;
9615 + * Adjustment based on rule checking, if the percentage
9616 + * of a TC is too small, the maximum credit may not be
9617 + * enough to send out a jumbo frame in data plane arbitration.
9621 + minimal_credit_max = (max_frame_size +
9622 + (DCB_CREDIT_QUANTUM - 1)) /
9623 + DCB_CREDIT_QUANTUM;
9625 + if (credit_max < minimal_credit_max)
9626 + credit_max = minimal_credit_max;
9629 + if (direction == DCB_TX_CONFIG) {
9631 + * Adjustment based on rule checking, if the
9632 + * percentage of a TC is too small, the maximum
9633 + * credit may not be enough to send out a TSO
9634 + * packet in descriptor plane arbitration.
9636 + if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_TSO)
9637 + && (hw->mac.type == ixgbe_mac_82598EB))
9638 + credit_max = MINIMUM_CREDIT_FOR_TSO;
9640 + dcb_config->tc_config[i].desc_credits_max =
9644 + p->data_credits_max = (u16)credit_max;
9652 + * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
9653 + * @hw: pointer to hardware structure
9654 + * @stats: pointer to statistics structure
9655 + * @tc_count: Number of elements in bwg_array.
9657 + * This function returns the status data for each of the Traffic Classes in use.
9659 +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
9663 + if (hw->mac.type == ixgbe_mac_82598EB)
9664 + ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
9665 + else if (hw->mac.type >= ixgbe_mac_82599EB)
9666 + ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
9671 + * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
9672 + * @hw: pointer to hardware structure
9673 + * @stats: pointer to statistics structure
9674 + * @tc_count: Number of elements in bwg_array.
9676 + * This function returns the CBFC status data for each of the Traffic Classes.
9678 +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
9682 + if (hw->mac.type == ixgbe_mac_82598EB)
9683 + ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
9684 + else if (hw->mac.type >= ixgbe_mac_82599EB)
9685 + ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
9690 + * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter
9691 + * @hw: pointer to hardware structure
9692 + * @dcb_config: pointer to ixgbe_dcb_config structure
9694 + * Configure Rx Data Arbiter and credits for each traffic class.
9696 +s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
9697 + struct ixgbe_dcb_config *dcb_config)
9700 + if (hw->mac.type == ixgbe_mac_82598EB)
9701 + ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
9702 + else if (hw->mac.type >= ixgbe_mac_82599EB)
9703 + ret = ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
9708 + * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter
9709 + * @hw: pointer to hardware structure
9710 + * @dcb_config: pointer to ixgbe_dcb_config structure
9712 + * Configure Tx Descriptor Arbiter and credits for each traffic class.
9714 +s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
9715 + struct ixgbe_dcb_config *dcb_config)
9718 + if (hw->mac.type == ixgbe_mac_82598EB)
9719 + ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
9720 + else if (hw->mac.type >= ixgbe_mac_82599EB)
9721 + ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
9726 + * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter
9727 + * @hw: pointer to hardware structure
9728 + * @dcb_config: pointer to ixgbe_dcb_config structure
9730 + * Configure Tx Data Arbiter and credits for each traffic class.
9732 +s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
9733 + struct ixgbe_dcb_config *dcb_config)
9736 + if (hw->mac.type == ixgbe_mac_82598EB)
9737 + ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
9738 + else if (hw->mac.type >= ixgbe_mac_82599EB)
9739 + ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
9744 + * ixgbe_dcb_config_pfc - Config priority flow control
9745 + * @hw: pointer to hardware structure
9746 + * @dcb_config: pointer to ixgbe_dcb_config structure
9748 + * Configure Priority Flow Control for each traffic class.
9750 +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
9751 + struct ixgbe_dcb_config *dcb_config)
9754 + if (hw->mac.type == ixgbe_mac_82598EB)
9755 + ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config);
9756 + else if (hw->mac.type >= ixgbe_mac_82599EB)
9757 + ret = ixgbe_dcb_config_pfc_82599(hw, dcb_config);
9762 + * ixgbe_dcb_config_tc_stats - Config traffic class statistics
9763 + * @hw: pointer to hardware structure
9765 + * Configure queue statistics registers, all queues belonging to same traffic
9766 + * class uses a single set of queue statistics counters.
9768 +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
9771 + if (hw->mac.type == ixgbe_mac_82598EB)
9772 + ret = ixgbe_dcb_config_tc_stats_82598(hw);
9773 + else if (hw->mac.type >= ixgbe_mac_82599EB)
9774 + ret = ixgbe_dcb_config_tc_stats_82599(hw);
9779 + * ixgbe_dcb_hw_config - Config and enable DCB
9780 + * @hw: pointer to hardware structure
9781 + * @dcb_config: pointer to ixgbe_dcb_config structure
9783 + * Configure dcb settings and enable dcb mode.
9785 +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
9786 + struct ixgbe_dcb_config *dcb_config)
9789 + if (hw->mac.type == ixgbe_mac_82598EB)
9790 + ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
9791 + else if (hw->mac.type >= ixgbe_mac_82599EB)
9792 + ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
9795 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb.h
9796 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb.h 1969-12-31 19:00:00.000000000 -0500
9797 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb.h 2010-08-25 17:56:26.000000000 -0400
9799 +/*******************************************************************************
9801 + Intel 10 Gigabit PCI Express Linux driver
9802 + Copyright(c) 1999 - 2010 Intel Corporation.
9804 + This program is free software; you can redistribute it and/or modify it
9805 + under the terms and conditions of the GNU General Public License,
9806 + version 2, as published by the Free Software Foundation.
9808 + This program is distributed in the hope it will be useful, but WITHOUT
9809 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9810 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9813 + You should have received a copy of the GNU General Public License along with
9814 + this program; if not, write to the Free Software Foundation, Inc.,
9815 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9817 + The full GNU General Public License is included in this distribution in
9818 + the file called "COPYING".
9820 + Contact Information:
9821 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9822 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9824 +*******************************************************************************/
9826 +#ifndef _DCB_CONFIG_H_
9827 +#define _DCB_CONFIG_H_
9829 +#include "ixgbe_type.h"
9831 +/* DCB data structures */
9833 +#define IXGBE_MAX_PACKET_BUFFERS 8
9834 +#define MAX_USER_PRIORITY 8
9835 +#define MAX_TRAFFIC_CLASS 8
9836 +#define MAX_BW_GROUP 8
9837 +#define BW_PERCENT 100
9839 +#define DCB_TX_CONFIG 0
9840 +#define DCB_RX_CONFIG 1
9842 +/* DCB error Codes */
9843 +#define DCB_SUCCESS 0
9844 +#define DCB_ERR_CONFIG -1
9845 +#define DCB_ERR_PARAM -2
9847 +/* Transmit and receive Errors */
9848 +/* Error in bandwidth group allocation */
9849 +#define DCB_ERR_BW_GROUP -3
9850 +/* Error in traffic class bandwidth allocation */
9851 +#define DCB_ERR_TC_BW -4
9852 +/* Traffic class has both link strict and group strict enabled */
9853 +#define DCB_ERR_LS_GS -5
9854 +/* Link strict traffic class has non zero bandwidth */
9855 +#define DCB_ERR_LS_BW_NONZERO -6
9856 +/* Link strict bandwidth group has non zero bandwidth */
9857 +#define DCB_ERR_LS_BWG_NONZERO -7
9858 +/* Traffic class has zero bandwidth */
9859 +#define DCB_ERR_TC_BW_ZERO -8
9861 +#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF
9863 +struct dcb_pfc_tc_debug {
9869 +enum strict_prio_type {
9875 +/* DCB capability definitions */
9876 +#define IXGBE_DCB_PG_SUPPORT 0x00000001
9877 +#define IXGBE_DCB_PFC_SUPPORT 0x00000002
9878 +#define IXGBE_DCB_BCN_SUPPORT 0x00000004
9879 +#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008
9880 +#define IXGBE_DCB_GSP_SUPPORT 0x00000010
9882 +#define IXGBE_DCB_8_TC_SUPPORT 0x80
9884 +struct dcb_support {
9885 + /* DCB capabilities */
9888 + /* Each bit represents a number of TCs configurable in the hw.
9889 + * If 8 traffic classes can be configured, the value is 0x80.
9891 + u8 traffic_classes;
9892 + u8 pfc_traffic_classes;
9895 +/* Traffic class bandwidth allocation per direction */
9896 +struct tc_bw_alloc {
9897 + u8 bwg_id; /* Bandwidth Group (BWG) ID */
9898 + u8 bwg_percent; /* % of BWG's bandwidth */
9899 + u8 link_percent; /* % of link bandwidth */
9900 + u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
9901 + u16 data_credits_refill; /* Credit refill amount in 64B granularity */
9902 + u16 data_credits_max; /* Max credits for a configured packet buffer
9903 + * in 64B granularity.*/
9904 + enum strict_prio_type prio_type; /* Link or Group Strict Priority */
9907 +enum dcb_pfc_type {
9914 +/* Traffic class configuration */
9915 +struct tc_configuration {
9916 + struct tc_bw_alloc path[2]; /* One each for Tx/Rx */
9917 + enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */
9919 + u16 desc_credits_max; /* For Tx Descriptor arbitration */
9920 + u8 tc; /* Traffic class (TC) */
9923 +enum dcb_rx_pba_cfg {
9924 + pba_equal, /* PBA[0-7] each use 64KB FIFO */
9925 + pba_80_48 /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
9928 +struct dcb_num_tcs {
9933 +struct ixgbe_dcb_config {
9934 + struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
9935 + struct dcb_support support;
9936 + struct dcb_num_tcs num_tcs;
9937 + u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
9938 + bool pfc_mode_enable;
9939 + bool round_robin_enable;
9941 + enum dcb_rx_pba_cfg rx_pba_cfg;
9943 + u32 dcb_cfg_version; /* Not used...OS-specific? */
9944 + u32 link_speed; /* For bandwidth allocation validation purpose */
9947 +/* DCB driver APIs */
9949 +/* DCB rule checking function.*/
9950 +s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config);
9952 +/* DCB credits calculation */
9953 +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
9954 + struct ixgbe_dcb_config *config,
9955 + u32 max_frame_size,
9958 +/* DCB PFC functions */
9959 +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
9960 + struct ixgbe_dcb_config *dcb_config);
9961 +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
9964 +/* DCB traffic class stats */
9965 +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
9966 +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
9969 +/* DCB config arbiters */
9970 +s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
9971 + struct ixgbe_dcb_config *dcb_config);
9972 +s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
9973 + struct ixgbe_dcb_config *dcb_config);
9974 +s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
9975 + struct ixgbe_dcb_config *dcb_config);
9977 +/* DCB hw initialization */
9978 +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, struct ixgbe_dcb_config *config);
9981 +/* DCB definitions for credit calculation */
9982 +#define DCB_CREDIT_QUANTUM 64
9983 +#define MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
9984 +#define MINIMUM_CREDIT_REFILL 2 /* 2 * 64B = 128B */
9985 +#define DCB_MAX_TSO_SIZE (32 * 1024) /* MAX TSO packet size supported
9987 +/* 513 for 32KB TSO packet */
9988 +#define MINIMUM_CREDIT_FOR_TSO ((DCB_MAX_TSO_SIZE / DCB_CREDIT_QUANTUM) + 1)
9989 +#define MAX_CREDIT (2 * MAX_CREDIT_REFILL)
9991 +#endif /* _DCB_CONFIG_H */
9992 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_nl.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_nl.c
9993 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_nl.c 1969-12-31 19:00:00.000000000 -0500
9994 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_nl.c 2010-08-25 17:56:26.000000000 -0400
9996 +/*******************************************************************************
9998 + Intel 10 Gigabit PCI Express Linux driver
9999 + Copyright(c) 1999 - 2010 Intel Corporation.
10001 + This program is free software; you can redistribute it and/or modify it
10002 + under the terms and conditions of the GNU General Public License,
10003 + version 2, as published by the Free Software Foundation.
10005 + This program is distributed in the hope it will be useful, but WITHOUT
10006 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10007 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10010 + You should have received a copy of the GNU General Public License along with
10011 + this program; if not, write to the Free Software Foundation, Inc.,
10012 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
10014 + The full GNU General Public License is included in this distribution in
10015 + the file called "COPYING".
10017 + Contact Information:
10018 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
10019 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10021 +*******************************************************************************/
10023 +#include "ixgbe.h"
10026 +#include <linux/dcbnl.h>
10027 +#include "ixgbe_dcb_82598.h"
10028 +#include "ixgbe_dcb_82599.h"
10030 +#include <linux/netlink.h>
10031 +#include <linux/genetlink.h>
10032 +#include <net/genetlink.h>
10033 +#include <linux/netdevice.h>
10036 +/* Callbacks for DCB netlink in the kernel */
10037 +#define BIT_DCB_MODE 0x01
10038 +#define BIT_PFC 0x02
10039 +#define BIT_PG_RX 0x04
10040 +#define BIT_PG_TX 0x08
10041 +#define BIT_APP_UPCHG 0x10
10042 +#define BIT_RESETLINK 0x40
10043 +#define BIT_LINKSPEED 0x80
10045 +/* Responses for the DCB_C_SET_ALL command */
10046 +#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
10047 +#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
10048 +#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
10050 +#ifndef CONFIG_DCB
10051 +/* DCB configuration commands */
10067 + DCB_C_GPERM_HWADDR,
10068 + __DCB_C_ENUM_MAX,
10071 +#define IXGBE_DCB_C_MAX (__DCB_C_ENUM_MAX - 1)
10073 +/* DCB configuration attributes */
10075 + DCB_A_UNDEFINED = 0,
10084 + DCB_A_PERM_HWADDR,
10085 + __DCB_A_ENUM_MAX,
10088 +#define IXGBE_DCB_A_MAX (__DCB_A_ENUM_MAX - 1)
10090 +/* PERM HWADDR attributes */
10092 + PERM_HW_A_UNDEFINED,
10100 + __PERM_HW_A_ENUM_MAX,
10103 +#define IXGBE_DCB_PERM_HW_A_MAX (__PERM_HW_A_ENUM_MAX - 1)
10105 +/* PFC configuration attributes */
10107 + PFC_A_UP_UNDEFINED,
10116 + PFC_A_UP_MAX, /* Used as an iterator cap */
10118 + __PFC_A_UP_ENUM_MAX,
10121 +#define IXGBE_DCB_PFC_A_UP_MAX (__PFC_A_UP_ENUM_MAX - 1)
10123 +/* Priority Group Traffic Class and Bandwidth Group
10124 + * configuration attributes
10136 + PG_A_TC_MAX, /* Used as an iterator cap */
10146 + PG_A_BWG_MAX, /* Used as an iterator cap */
10151 +#define IXGBE_DCB_PG_A_MAX (__PG_A_ENUM_MAX - 1)
10154 + TC_A_PARAM_UNDEFINED,
10155 + TC_A_PARAM_STRICT_PRIO,
10156 + TC_A_PARAM_BW_GROUP_ID,
10157 + TC_A_PARAM_BW_PCT_IN_GROUP,
10158 + TC_A_PARAM_UP_MAPPING,
10159 + TC_A_PARAM_MAX, /* Used as an iterator cap */
10161 + __TC_A_PARAM_ENUM_MAX,
10164 +#define IXGBE_DCB_TC_A_PARAM_MAX (__TC_A_PARAM_ENUM_MAX - 1)
10166 +#define DCB_PROTO_VERSION 0x1
10167 +#define is_pci_device(dev) ((dev)->bus == &pci_bus_type)
10169 +static struct genl_family dcb_family = {
10170 + .id = GENL_ID_GENERATE,
10172 + .name = "IXGBE_DCB",
10173 + .version = DCB_PROTO_VERSION,
10174 + .maxattr = IXGBE_DCB_A_MAX,
10177 +/* DCB NETLINK attributes policy */
10178 +static struct nla_policy dcb_genl_policy[IXGBE_DCB_A_MAX + 1] = {
10179 + [DCB_A_IFNAME] = {.type = NLA_STRING, .len = IFNAMSIZ - 1},
10180 + [DCB_A_STATE] = {.type = NLA_U8},
10181 + [DCB_A_PG_CFG] = {.type = NLA_NESTED},
10182 + [DCB_A_PFC_CFG] = {.type = NLA_NESTED},
10183 + [DCB_A_PFC_STATS] = {.type = NLA_NESTED},
10184 + [DCB_A_PG_STATS] = {.type = NLA_NESTED},
10185 + [DCB_A_LINK_SPD] = {.type = NLA_U8},
10186 + [DCB_A_SET_ALL] = {.type = NLA_U8},
10187 + [DCB_A_PERM_HWADDR] = {.type = NLA_NESTED},
10190 +/* DCB_A_PERM_HWADDR nested attributes... an array. */
10191 +static struct nla_policy dcb_perm_hwaddr_nest[IXGBE_DCB_PERM_HW_A_MAX + 1] = {
10192 + [PERM_HW_A_0] = {.type = NLA_U8},
10193 + [PERM_HW_A_1] = {.type = NLA_U8},
10194 + [PERM_HW_A_2] = {.type = NLA_U8},
10195 + [PERM_HW_A_3] = {.type = NLA_U8},
10196 + [PERM_HW_A_4] = {.type = NLA_U8},
10197 + [PERM_HW_A_5] = {.type = NLA_U8},
10198 + [PERM_HW_A_ALL] = {.type = NLA_FLAG},
10201 +/* DCB_A_PFC_CFG nested attributes...like an array. */
10202 +static struct nla_policy dcb_pfc_up_nest[IXGBE_DCB_PFC_A_UP_MAX + 1] = {
10203 + [PFC_A_UP_0] = {.type = NLA_U8},
10204 + [PFC_A_UP_1] = {.type = NLA_U8},
10205 + [PFC_A_UP_2] = {.type = NLA_U8},
10206 + [PFC_A_UP_3] = {.type = NLA_U8},
10207 + [PFC_A_UP_4] = {.type = NLA_U8},
10208 + [PFC_A_UP_5] = {.type = NLA_U8},
10209 + [PFC_A_UP_6] = {.type = NLA_U8},
10210 + [PFC_A_UP_7] = {.type = NLA_U8},
10211 + [PFC_A_UP_ALL] = {.type = NLA_FLAG},
10214 +/* DCB_A_PG_CFG nested attributes...like a struct. */
10215 +static struct nla_policy dcb_pg_nest[IXGBE_DCB_PG_A_MAX + 1] = {
10216 + [PG_A_TC_0] = {.type = NLA_NESTED},
10217 + [PG_A_TC_1] = {.type = NLA_NESTED},
10218 + [PG_A_TC_2] = {.type = NLA_NESTED},
10219 + [PG_A_TC_3] = {.type = NLA_NESTED},
10220 + [PG_A_TC_4] = {.type = NLA_NESTED},
10221 + [PG_A_TC_5] = {.type = NLA_NESTED},
10222 + [PG_A_TC_6] = {.type = NLA_NESTED},
10223 + [PG_A_TC_7] = {.type = NLA_NESTED},
10224 + [PG_A_TC_ALL] = {.type = NLA_NESTED},
10225 + [PG_A_BWG_0] = {.type = NLA_U8},
10226 + [PG_A_BWG_1] = {.type = NLA_U8},
10227 + [PG_A_BWG_2] = {.type = NLA_U8},
10228 + [PG_A_BWG_3] = {.type = NLA_U8},
10229 + [PG_A_BWG_4] = {.type = NLA_U8},
10230 + [PG_A_BWG_5] = {.type = NLA_U8},
10231 + [PG_A_BWG_6] = {.type = NLA_U8},
10232 + [PG_A_BWG_7] = {.type = NLA_U8},
10233 + [PG_A_BWG_ALL]= {.type = NLA_FLAG},
10236 +/* TC_A_CLASS_X nested attributes. */
10237 +static struct nla_policy dcb_tc_param_nest[IXGBE_DCB_TC_A_PARAM_MAX + 1] = {
10238 + [TC_A_PARAM_STRICT_PRIO] = {.type = NLA_U8},
10239 + [TC_A_PARAM_BW_GROUP_ID] = {.type = NLA_U8},
10240 + [TC_A_PARAM_BW_PCT_IN_GROUP] = {.type = NLA_U8},
10241 + [TC_A_PARAM_UP_MAPPING] = {.type = NLA_U8},
10242 + [TC_A_PARAM_ALL] = {.type = NLA_FLAG},
10245 +static int ixgbe_dcb_check_adapter(struct net_device *netdev)
10247 + struct device *busdev;
10248 + struct pci_dev *pcidev;
10250 + busdev = netdev->dev.parent;
10254 + if (!is_pci_device(busdev))
10257 + pcidev = to_pci_dev(busdev);
10261 + if (ixgbe_is_ixgbe(pcidev))
10269 +int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
10270 + struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
10272 + struct tc_configuration *src_tc_cfg = NULL;
10273 + struct tc_configuration *dst_tc_cfg = NULL;
10276 + if (!src_dcb_cfg || !dst_dcb_cfg)
10279 + for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
10280 + src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
10281 + dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
10283 + dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
10284 + src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
10286 + dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
10287 + src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
10289 + dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
10290 + src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
10292 + dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
10293 + src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
10295 + dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
10296 + src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
10298 + dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
10299 + src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
10301 + dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
10302 + src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
10304 + dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
10305 + src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
10308 + for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
10309 + dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
10310 + [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
10311 + [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
10312 + dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
10313 + [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
10314 + [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
10317 + for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
10318 + dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
10319 + src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
10321 + dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable;
10326 +int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
10327 + struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
10329 + struct tc_configuration *src_tc_cfg = NULL;
10330 + struct tc_configuration *dst_tc_cfg = NULL;
10333 + if (!src_dcb_cfg || !dst_dcb_cfg)
10336 + dst_dcb_cfg->link_speed = src_dcb_cfg->link_speed;
10338 + for (i = PG_A_TC_0; i < tc_max + PG_A_TC_0; i++) {
10339 + src_tc_cfg = &src_dcb_cfg->tc_config[i - PG_A_TC_0];
10340 + dst_tc_cfg = &dst_dcb_cfg->tc_config[i - PG_A_TC_0];
10342 + dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
10343 + src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
10345 + dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
10346 + src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
10348 + dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
10349 + src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
10351 + dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
10352 + src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
10354 + dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
10355 + src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
10357 + dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
10358 + src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
10360 + dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
10361 + src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
10363 + dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
10364 + src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
10367 + for (i = PG_A_BWG_0; i < PG_A_BWG_MAX; i++) {
10368 + dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG][i - PG_A_BWG_0] =
10369 + src_dcb_cfg->bw_percentage[DCB_TX_CONFIG][i - PG_A_BWG_0];
10370 + dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG][i - PG_A_BWG_0] =
10371 + src_dcb_cfg->bw_percentage[DCB_RX_CONFIG][i - PG_A_BWG_0];
10374 + for (i = PFC_A_UP_0; i < PFC_A_UP_MAX; i++) {
10375 + dst_dcb_cfg->tc_config[i - PFC_A_UP_0].dcb_pfc =
10376 + src_dcb_cfg->tc_config[i - PFC_A_UP_0].dcb_pfc;
10382 +static int ixgbe_nl_reply(u8 value, u8 cmd, u8 attr, struct genl_info *info)
10384 + struct sk_buff *dcb_skb = NULL;
10388 + dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
10392 + data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0, cmd);
10396 + ret = nla_put_u8(dcb_skb, attr, value);
10400 + /* end the message, assign the nlmsg_len. */
10401 + genlmsg_end(dcb_skb, data);
10402 + ret = genlmsg_reply(dcb_skb, info);
10415 +static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
10417 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
10419 + return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
10422 +static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
10425 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
10428 + /* Turn on DCB */
10429 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
10432 + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
10433 + DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n");
10438 + if (netif_running(netdev))
10439 +#ifdef HAVE_NET_DEVICE_OPS
10440 + netdev->netdev_ops->ndo_stop(netdev);
10442 + netdev->stop(netdev);
10444 + ixgbe_clear_interrupt_scheme(adapter);
10445 + if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
10446 + adapter->last_lfc_mode = adapter->hw.fc.current_mode;
10447 + adapter->hw.fc.requested_mode = ixgbe_fc_none;
10449 + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
10450 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
10451 + DPRINTK(DRV, INFO, "DCB enabled, "
10452 + "disabling Flow Director\n");
10453 + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
10454 + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
10456 + adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
10457 + ixgbe_init_interrupt_scheme(adapter);
10458 + if (netif_running(netdev))
10459 +#ifdef HAVE_NET_DEVICE_OPS
10460 + netdev->netdev_ops->ndo_open(netdev);
10462 + netdev->open(netdev);
10465 + /* Turn off DCB */
10466 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
10467 + if (netif_running(netdev))
10468 +#ifdef HAVE_NET_DEVICE_OPS
10469 + netdev->netdev_ops->ndo_stop(netdev);
10471 + netdev->stop(netdev);
10473 + ixgbe_clear_interrupt_scheme(adapter);
10474 + adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
10475 + adapter->temp_dcb_cfg.pfc_mode_enable = false;
10476 + adapter->dcb_cfg.pfc_mode_enable = false;
10477 + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
10478 + adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
10479 + if (adapter->hw.mac.type == ixgbe_mac_82599EB)
10480 + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
10481 + ixgbe_init_interrupt_scheme(adapter);
10482 + if (netif_running(netdev))
10483 +#ifdef HAVE_NET_DEVICE_OPS
10484 + netdev->netdev_ops->ndo_open(netdev);
10486 + netdev->open(netdev);
10494 +static int ixgbe_dcb_gstate(struct sk_buff *skb, struct genl_info *info)
10496 + int ret = -ENOMEM;
10497 + struct net_device *netdev = NULL;
10498 + struct ixgbe_adapter *adapter = NULL;
10500 + if (!info->attrs[DCB_A_IFNAME])
10503 + netdev = dev_get_by_name(&init_net,
10504 + nla_data(info->attrs[DCB_A_IFNAME]));
10508 + ret = ixgbe_dcb_check_adapter(netdev);
10512 + adapter = netdev_priv(netdev);
10514 + ret = ixgbe_nl_reply(!!(adapter->flags & IXGBE_FLAG_DCB_ENABLED),
10515 + DCB_C_GSTATE, DCB_A_STATE, info);
10524 +static int ixgbe_dcb_sstate(struct sk_buff *skb, struct genl_info *info)
10526 + struct net_device *netdev = NULL;
10527 + struct ixgbe_adapter *adapter = NULL;
10528 + int ret = -EINVAL;
10531 + if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_STATE])
10534 + netdev = dev_get_by_name(&init_net,
10535 + nla_data(info->attrs[DCB_A_IFNAME]));
10539 + ret = ixgbe_dcb_check_adapter(netdev);
10543 + adapter = netdev_priv(netdev);
10545 + value = nla_get_u8(info->attrs[DCB_A_STATE]);
10546 + if ((value & 1) != value) {
10547 + DPRINTK(DRV, ERR, "Value is not 1 or 0, it is %d.\n", value);
10551 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
10552 + if (netdev->flags & IFF_UP)
10553 +#ifdef HAVE_NET_DEVICE_OPS
10554 + netdev->netdev_ops->ndo_stop(netdev);
10556 + netdev->stop(netdev);
10558 + ixgbe_clear_interrupt_scheme(adapter);
10560 + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
10561 + if (adapter->flags & IXGBE_FLAG_RSS_CAPABLE)
10562 + adapter->flags |=
10563 + IXGBE_FLAG_RSS_ENABLED;
10564 + ixgbe_init_interrupt_scheme(adapter);
10565 + ixgbe_reset(adapter);
10566 + if (netdev->flags & IFF_UP)
10567 +#ifdef HAVE_NET_DEVICE_OPS
10568 + netdev->netdev_ops->ndo_open(netdev);
10570 + netdev->open(netdev);
10574 + /* Nothing to do, already off */
10578 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
10579 + /* Nothing to do, already on */
10581 + } else if (!(adapter->flags & IXGBE_FLAG_DCB_CAPABLE)) {
10582 + DPRINTK(DRV, ERR, "Enable failed. Make sure "
10583 + "the driver can enable MSI-X.\n");
10587 + if (netdev->flags & IFF_UP)
10588 +#ifdef HAVE_NET_DEVICE_OPS
10589 + netdev->netdev_ops->ndo_stop(netdev);
10591 + netdev->stop(netdev);
10593 + ixgbe_clear_interrupt_scheme(adapter);
10595 + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
10596 + adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
10597 + adapter->dcb_cfg.support.capabilities =
10598 + (IXGBE_DCB_PG_SUPPORT | IXGBE_DCB_PFC_SUPPORT |
10599 + IXGBE_DCB_GSP_SUPPORT);
10600 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
10601 + DPRINTK(DRV, INFO, "DCB enabled, "
10602 + "disabling Flow Director\n");
10603 + adapter->flags &=
10604 + ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
10605 + adapter->flags &=
10606 + ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
10607 + adapter->dcb_cfg.support.capabilities |=
10608 + IXGBE_DCB_UP2TC_SUPPORT;
10610 + adapter->ring_feature[RING_F_DCB].indices = 8;
10611 + ixgbe_init_interrupt_scheme(adapter);
10612 + ixgbe_reset(adapter);
10613 + if (netdev->flags & IFF_UP)
10614 +#ifdef HAVE_NET_DEVICE_OPS
10615 + netdev->netdev_ops->ndo_open(netdev);
10617 + netdev->open(netdev);
10625 + ret = ixgbe_nl_reply(0, DCB_C_SSTATE, DCB_A_STATE, info);
10635 +static int ixgbe_dcb_glink_spd(struct sk_buff *skb, struct genl_info *info)
10637 + int ret = -ENOMEM;
10638 + struct net_device *netdev = NULL;
10639 + struct ixgbe_adapter *adapter = NULL;
10641 + if (!info->attrs[DCB_A_IFNAME])
10644 + netdev = dev_get_by_name(&init_net,
10645 + nla_data(info->attrs[DCB_A_IFNAME]));
10649 + ret = ixgbe_dcb_check_adapter(netdev);
10653 + adapter = netdev_priv(netdev);
10655 + ret = ixgbe_nl_reply(adapter->dcb_cfg.link_speed & 0xff,
10656 + DCB_C_GLINK_SPD, DCB_A_LINK_SPD, info);
10665 +static int ixgbe_dcb_slink_spd(struct sk_buff *skb, struct genl_info *info)
10667 + struct net_device *netdev = NULL;
10668 + struct ixgbe_adapter *adapter = NULL;
10669 + int ret = -EINVAL;
10672 + if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_LINK_SPD])
10675 + netdev = dev_get_by_name(&init_net,
10676 + nla_data(info->attrs[DCB_A_IFNAME]));
10680 + ret = ixgbe_dcb_check_adapter(netdev);
10684 + adapter = netdev_priv(netdev);
10686 + value = nla_get_u8(info->attrs[DCB_A_LINK_SPD]);
10688 + DPRINTK(DRV, ERR, "Value is not 0 thru 9, it is %d.\n", value);
10690 + if (!adapter->dcb_set_bitmap &&
10691 + ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
10692 + adapter->ring_feature[RING_F_DCB].indices)) {
10697 + adapter->temp_dcb_cfg.link_speed = value;
10698 + adapter->dcb_set_bitmap |= BIT_LINKSPEED;
10701 + ret = ixgbe_nl_reply(0, DCB_C_SLINK_SPD, DCB_A_LINK_SPD, info);
10713 +static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
10716 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
10719 + memset(perm_addr, 0xff, MAX_ADDR_LEN);
10721 + for (i = 0; i < netdev->addr_len; i++)
10722 + perm_addr[i] = adapter->hw.mac.perm_addr[i];
10724 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
10725 + for (j = 0; j < netdev->addr_len; j++, i++)
10726 + perm_addr[i] = adapter->hw.mac.san_addr[j];
10730 +static int ixgbe_dcb_gperm_hwaddr(struct sk_buff *skb, struct genl_info *info)
10733 + struct sk_buff *dcb_skb = NULL;
10734 + struct nlattr *tb[IXGBE_DCB_PERM_HW_A_MAX + 1], *nest;
10735 + struct net_device *netdev = NULL;
10736 + struct ixgbe_adapter *adapter = NULL;
10737 + struct ixgbe_hw *hw = NULL;
10738 + int ret = -ENOMEM;
10741 + if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PERM_HWADDR])
10744 + netdev = dev_get_by_name(&init_net,
10745 + nla_data(info->attrs[DCB_A_IFNAME]));
10749 + ret = ixgbe_dcb_check_adapter(netdev);
10753 + adapter = netdev_priv(netdev);
10755 + hw = &adapter->hw;
10757 + ret = nla_parse_nested(tb, IXGBE_DCB_PERM_HW_A_MAX,
10758 + info->attrs[DCB_A_PERM_HWADDR],
10759 + dcb_perm_hwaddr_nest);
10763 + dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
10767 + data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0,
10768 + DCB_C_GPERM_HWADDR);
10772 + nest = nla_nest_start(dcb_skb, DCB_A_PERM_HWADDR);
10776 + for (i = 0; i < netdev->addr_len; i++) {
10777 + if (!tb[i+PERM_HW_A_0] && !tb[PERM_HW_A_ALL])
10780 + ret = nla_put_u8(dcb_skb, DCB_A_PERM_HWADDR,
10781 + hw->mac.perm_addr[i]);
10784 + nla_nest_cancel(dcb_skb, nest);
10789 + nla_nest_end(dcb_skb, nest);
10791 + genlmsg_end(dcb_skb, data);
10793 + ret = genlmsg_reply(dcb_skb, info);
10801 + DPRINTK(DRV, ERR, "Error in get permanent hwaddr.\n");
10810 +static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
10811 + u8 prio, u8 bwg_id, u8 bw_pct,
10814 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
10816 + if (prio != DCB_ATTR_VALUE_UNDEFINED)
10817 + adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
10818 + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
10819 + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id;
10820 + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
10821 + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent =
10823 + if (up_map != DCB_ATTR_VALUE_UNDEFINED)
10824 + adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
10827 + if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
10828 + adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
10829 + (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
10830 + adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
10831 + (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
10832 + adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
10833 + (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
10834 + adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) {
10835 + adapter->dcb_set_bitmap |= BIT_PG_TX;
10836 + adapter->dcb_set_bitmap |= BIT_RESETLINK;
10840 +static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
10843 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
10845 + adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
10847 + if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
10848 + adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
10849 + adapter->dcb_set_bitmap |= BIT_PG_TX;
10850 + adapter->dcb_set_bitmap |= BIT_RESETLINK;
10854 +static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
10855 + u8 prio, u8 bwg_id, u8 bw_pct,
10858 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
10860 + if (prio != DCB_ATTR_VALUE_UNDEFINED)
10861 + adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
10862 + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
10863 + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id;
10864 + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
10865 + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent =
10867 + if (up_map != DCB_ATTR_VALUE_UNDEFINED)
10868 + adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
10871 + if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
10872 + adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
10873 + (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
10874 + adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
10875 + (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
10876 + adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
10877 + (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
10878 + adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) {
10879 + adapter->dcb_set_bitmap |= BIT_PG_RX;
10880 + adapter->dcb_set_bitmap |= BIT_RESETLINK;
10884 +static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
10887 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
10889 + adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
10891 + if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
10892 + adapter->dcb_cfg.bw_percentage[1][bwg_id]) {
10893 + adapter->dcb_set_bitmap |= BIT_PG_RX;
10894 + adapter->dcb_set_bitmap |= BIT_RESETLINK;
10898 +static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
10899 + u8 *prio, u8 *bwg_id, u8 *bw_pct,
10902 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
10904 + *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
10905 + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
10906 + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent;
10907 + *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
10910 +static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
10913 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
10915 + *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
10918 +static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
10919 + u8 *prio, u8 *bwg_id, u8 *bw_pct,
10922 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
10924 + *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
10925 + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
10926 + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent;
10927 + *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap;
10930 +static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
10933 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
10935 + *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
10938 +static int ixgbe_dcb_pg_scfg(struct sk_buff *skb, struct genl_info *info,
10941 + struct net_device *netdev = NULL;
10942 + struct ixgbe_adapter *adapter = NULL;
10943 + struct tc_configuration *tc_config = NULL;
10944 + struct tc_configuration *tc_tmpcfg = NULL;
10945 + struct nlattr *pg_tb[IXGBE_DCB_PG_A_MAX + 1];
10946 + struct nlattr *param_tb[IXGBE_DCB_TC_A_PARAM_MAX + 1];
10947 + int i, ret, tc_max;
10951 + if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PG_CFG])
10954 + netdev = dev_get_by_name(&init_net,
10955 + nla_data(info->attrs[DCB_A_IFNAME]));
10959 + ret = ixgbe_dcb_check_adapter(netdev);
10963 + adapter = netdev_priv(netdev);
10965 + ret = nla_parse_nested(pg_tb, IXGBE_DCB_PG_A_MAX,
10966 + info->attrs[DCB_A_PG_CFG], dcb_pg_nest);
10970 + if (!adapter->dcb_set_bitmap &&
10971 + ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
10972 + adapter->ring_feature[RING_F_DCB].indices))
10975 + tc_max = adapter->ring_feature[RING_F_DCB].indices;
10976 + for (i = PG_A_TC_0; i < tc_max + PG_A_TC_0; i++) {
10980 + ret = nla_parse_nested(param_tb, IXGBE_DCB_TC_A_PARAM_MAX,
10981 + pg_tb[i], dcb_tc_param_nest);
10985 + tc_config = &adapter->dcb_cfg.tc_config[i - PG_A_TC_0];
10986 + tc_tmpcfg = &adapter->temp_dcb_cfg.tc_config[i - PG_A_TC_0];
10987 + if (param_tb[TC_A_PARAM_STRICT_PRIO]) {
10988 + value = nla_get_u8(param_tb[TC_A_PARAM_STRICT_PRIO]);
10989 + tc_tmpcfg->path[dir].prio_type = value;
10990 + if (tc_tmpcfg->path[dir].prio_type !=
10991 + tc_config->path[dir].prio_type)
10994 + if (param_tb[TC_A_PARAM_BW_GROUP_ID]) {
10995 + value = nla_get_u8(param_tb[TC_A_PARAM_BW_GROUP_ID]);
10996 + tc_tmpcfg->path[dir].bwg_id = value;
10997 + if (tc_tmpcfg->path[dir].bwg_id !=
10998 + tc_config->path[dir].bwg_id)
11001 + if (param_tb[TC_A_PARAM_BW_PCT_IN_GROUP]) {
11002 + value = nla_get_u8(param_tb[TC_A_PARAM_BW_PCT_IN_GROUP]);
11003 + tc_tmpcfg->path[dir].bwg_percent = value;
11004 + if (tc_tmpcfg->path[dir].bwg_percent !=
11005 + tc_config->path[dir].bwg_percent)
11008 + if (param_tb[TC_A_PARAM_UP_MAPPING]) {
11009 + value = nla_get_u8(param_tb[TC_A_PARAM_UP_MAPPING]);
11010 + tc_tmpcfg->path[dir].up_to_tc_bitmap = value;
11011 + if (tc_tmpcfg->path[dir].up_to_tc_bitmap !=
11012 + tc_config->path[dir].up_to_tc_bitmap)
11017 + for (i = PG_A_BWG_0; i < PG_A_BWG_MAX; i++) {
11021 + value = nla_get_u8(pg_tb[i]);
11022 + adapter->temp_dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0] = value;
11024 + if (adapter->temp_dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0] !=
11025 + adapter->dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0])
11029 + adapter->temp_dcb_cfg.round_robin_enable = false;
11032 + if (dir == DCB_TX_CONFIG)
11033 + adapter->dcb_set_bitmap |= BIT_PG_TX;
11035 + adapter->dcb_set_bitmap |= BIT_PG_RX;
11037 + adapter->dcb_set_bitmap |= BIT_RESETLINK;
11040 + ret = ixgbe_nl_reply(0, (dir? DCB_C_PGRX_SCFG : DCB_C_PGTX_SCFG),
11041 + DCB_A_PG_CFG, info);
11050 +static int ixgbe_dcb_pgtx_scfg(struct sk_buff *skb, struct genl_info *info)
11052 + return ixgbe_dcb_pg_scfg(skb, info, DCB_TX_CONFIG);
11055 +static int ixgbe_dcb_pgrx_scfg(struct sk_buff *skb, struct genl_info *info)
11057 + return ixgbe_dcb_pg_scfg(skb, info, DCB_RX_CONFIG);
11060 +static int ixgbe_dcb_pg_gcfg(struct sk_buff *skb, struct genl_info *info,
11064 + struct sk_buff *dcb_skb = NULL;
11065 + struct nlattr *pg_nest, *param_nest, *tb;
11066 + struct nlattr *pg_tb[IXGBE_DCB_PG_A_MAX + 1];
11067 + struct nlattr *param_tb[IXGBE_DCB_TC_A_PARAM_MAX + 1];
11068 + struct net_device *netdev = NULL;
11069 + struct ixgbe_adapter *adapter = NULL;
11070 + struct tc_configuration *tc_config = NULL;
11071 + struct tc_bw_alloc *tc = NULL;
11072 + int ret = -ENOMEM;
11075 + if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PG_CFG])
11078 + netdev = dev_get_by_name(&init_net,
11079 + nla_data(info->attrs[DCB_A_IFNAME]));
11083 + ret = ixgbe_dcb_check_adapter(netdev);
11087 + adapter = netdev_priv(netdev);
11089 + ret = nla_parse_nested(pg_tb, IXGBE_DCB_PG_A_MAX,
11090 + info->attrs[DCB_A_PG_CFG], dcb_pg_nest);
11094 + dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
11098 + data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0,
11099 + (dir) ? DCB_C_PGRX_GCFG : DCB_C_PGTX_GCFG);
11104 + pg_nest = nla_nest_start(dcb_skb, DCB_A_PG_CFG);
11108 + tc_max = adapter->ring_feature[RING_F_DCB].indices;
11109 + for (i = PG_A_TC_0; i < tc_max + PG_A_TC_0; i++) {
11110 + if (!pg_tb[i] && !pg_tb[PG_A_TC_ALL])
11113 + if (pg_tb[PG_A_TC_ALL])
11114 + tb = pg_tb[PG_A_TC_ALL];
11117 + ret = nla_parse_nested(param_tb, IXGBE_DCB_TC_A_PARAM_MAX,
11118 + tb, dcb_tc_param_nest);
11122 + param_nest = nla_nest_start(dcb_skb, i);
11126 + tc_config = &adapter->dcb_cfg.tc_config[i - PG_A_TC_0];
11127 + tc = &adapter->dcb_cfg.tc_config[i - PG_A_TC_0].path[dir];
11129 + if (param_tb[TC_A_PARAM_STRICT_PRIO] ||
11130 + param_tb[TC_A_PARAM_ALL]) {
11131 + ret = nla_put_u8(dcb_skb, TC_A_PARAM_STRICT_PRIO,
11136 + if (param_tb[TC_A_PARAM_BW_GROUP_ID] ||
11137 + param_tb[TC_A_PARAM_ALL]) {
11138 + ret = nla_put_u8(dcb_skb, TC_A_PARAM_BW_GROUP_ID,
11143 + if (param_tb[TC_A_PARAM_BW_PCT_IN_GROUP] ||
11144 + param_tb[TC_A_PARAM_ALL]) {
11145 + ret = nla_put_u8(dcb_skb, TC_A_PARAM_BW_PCT_IN_GROUP,
11146 + tc->bwg_percent);
11150 + if (param_tb[TC_A_PARAM_UP_MAPPING] ||
11151 + param_tb[TC_A_PARAM_ALL]) {
11152 + ret = nla_put_u8(dcb_skb, TC_A_PARAM_UP_MAPPING,
11153 + tc->up_to_tc_bitmap);
11157 + nla_nest_end(dcb_skb, param_nest);
11160 + for (i = PG_A_BWG_0; i < PG_A_BWG_MAX; i++) {
11161 + if (!pg_tb[i] && !pg_tb[PG_A_BWG_ALL])
11164 + ret = nla_put_u8(dcb_skb, i,
11165 + adapter->dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0]);
11171 + nla_nest_end(dcb_skb, pg_nest);
11173 + genlmsg_end(dcb_skb, data);
11174 + ret = genlmsg_reply(dcb_skb, info);
11182 + DPRINTK(DRV, ERR, "Error in get pg %s.\n", dir?"rx":"tx");
11183 + nla_nest_cancel(dcb_skb, param_nest);
11185 + nla_nest_cancel(dcb_skb, pg_nest);
11193 +static int ixgbe_dcb_pgtx_gcfg(struct sk_buff *skb, struct genl_info *info)
11195 + return ixgbe_dcb_pg_gcfg(skb, info, DCB_TX_CONFIG);
11198 +static int ixgbe_dcb_pgrx_gcfg(struct sk_buff *skb, struct genl_info *info)
11200 + return ixgbe_dcb_pg_gcfg(skb, info, DCB_RX_CONFIG);
11205 +static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
11208 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
11210 + adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
11211 + if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
11212 + adapter->dcb_cfg.tc_config[priority].dcb_pfc) {
11213 + adapter->dcb_set_bitmap |= BIT_PFC;
11217 +static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
11220 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
11222 + *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
11225 +static int ixgbe_dcb_spfccfg(struct sk_buff *skb, struct genl_info *info)
11227 + struct nlattr *tb[IXGBE_DCB_PFC_A_UP_MAX + 1];
11228 + struct net_device *netdev = NULL;
11229 + struct ixgbe_adapter *adapter = NULL;
11230 + int i, ret = -ENOMEM;
11234 + netdev = dev_get_by_name(&init_net,
11235 + nla_data(info->attrs[DCB_A_IFNAME]));
11239 + adapter = netdev_priv(netdev);
11241 + if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PFC_CFG])
11244 + ret = ixgbe_dcb_check_adapter(netdev);
11248 + adapter = netdev_priv(netdev);
11250 + ret = nla_parse_nested(tb, IXGBE_DCB_PFC_A_UP_MAX,
11251 + info->attrs[DCB_A_PFC_CFG],
11252 + dcb_pfc_up_nest);
11256 + if (!adapter->dcb_set_bitmap &&
11257 + ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
11258 + adapter->ring_feature[RING_F_DCB].indices)) {
11263 + for (i = PFC_A_UP_0; i < PFC_A_UP_MAX; i++) {
11267 + setting = nla_get_u8(tb[i]);
11268 + adapter->temp_dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc = setting;
11270 + if (adapter->temp_dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc !=
11271 + adapter->dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc)
11276 + adapter->dcb_set_bitmap |= BIT_PFC;
11278 + ret = ixgbe_nl_reply(0, DCB_C_PFC_SCFG, DCB_A_PFC_CFG, info);
11287 +static int ixgbe_dcb_gpfccfg(struct sk_buff *skb, struct genl_info *info)
11290 + struct sk_buff *dcb_skb = NULL;
11291 + struct nlattr *tb[IXGBE_DCB_PFC_A_UP_MAX + 1], *nest;
11292 + struct net_device *netdev = NULL;
11293 + struct ixgbe_adapter *adapter = NULL;
11294 + int ret = -ENOMEM;
11297 + if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PFC_CFG])
11300 + netdev = dev_get_by_name(&init_net,
11301 + nla_data(info->attrs[DCB_A_IFNAME]));
11305 + ret = ixgbe_dcb_check_adapter(netdev);
11309 + adapter = netdev_priv(netdev);
11311 + ret = nla_parse_nested(tb, IXGBE_DCB_PFC_A_UP_MAX,
11312 + info->attrs[DCB_A_PFC_CFG], dcb_pfc_up_nest);
11316 + dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
11320 + data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0,
11325 + nest = nla_nest_start(dcb_skb, DCB_A_PFC_CFG);
11329 + for (i = PFC_A_UP_0; i < PFC_A_UP_MAX; i++) {
11330 + if (!tb[i] && !tb[PFC_A_UP_ALL])
11333 + ret = nla_put_u8(dcb_skb, i,
11334 + adapter->dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc);
11336 + nla_nest_cancel(dcb_skb, nest);
11341 + nla_nest_end(dcb_skb, nest);
11343 + genlmsg_end(dcb_skb, data);
11345 + ret = genlmsg_reply(dcb_skb, info);
11353 + DPRINTK(DRV, ERR, "Error in get pfc stats.\n");
11362 +static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
11364 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
11367 + if (!adapter->dcb_set_bitmap)
11368 + return DCB_NO_HW_CHG;
11370 + ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
11371 + adapter->ring_feature[RING_F_DCB].indices);
11373 + return DCB_NO_HW_CHG;
11375 + /* Only take down the adapter if the configuration change
11376 + * requires a reset.
11378 + if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
11379 + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
11382 + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
11383 + if (netif_running(netdev))
11384 +#ifdef HAVE_NET_DEVICE_OPS
11385 + netdev->netdev_ops->ndo_stop(netdev);
11387 + netdev->stop(netdev);
11389 + ixgbe_clear_interrupt_scheme(adapter);
11391 + if (netif_running(netdev))
11392 + ixgbe_down(adapter);
11396 + if (adapter->dcb_cfg.pfc_mode_enable) {
11397 + switch (adapter->hw.mac.type) {
11398 + case ixgbe_mac_82599EB:
11399 + if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
11400 + adapter->last_lfc_mode = adapter->hw.fc.current_mode;
11405 + adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
11407 + switch (adapter->hw.mac.type) {
11408 + case ixgbe_mac_82598EB:
11409 + adapter->hw.fc.requested_mode = ixgbe_fc_none;
11411 + case ixgbe_mac_82599EB:
11412 + adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
11419 + if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
11420 + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
11421 + ixgbe_init_interrupt_scheme(adapter);
11422 + if (netif_running(netdev))
11423 +#ifdef HAVE_NET_DEVICE_OPS
11424 + netdev->netdev_ops->ndo_open(netdev);
11426 + netdev->open(netdev);
11429 + if (netif_running(netdev))
11430 + ixgbe_up(adapter);
11432 + ret = DCB_HW_CHG_RST;
11433 + } else if (adapter->dcb_set_bitmap & BIT_PFC) {
11434 + if (adapter->hw.mac.type == ixgbe_mac_82598EB)
11435 + ixgbe_dcb_config_pfc_82598(&adapter->hw,
11436 + &adapter->dcb_cfg);
11437 + else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
11438 + ixgbe_dcb_config_pfc_82599(&adapter->hw,
11439 + &adapter->dcb_cfg);
11440 + ret = DCB_HW_CHG;
11442 + if (adapter->dcb_cfg.pfc_mode_enable)
11443 + adapter->hw.fc.current_mode = ixgbe_fc_pfc;
11445 + if (adapter->dcb_set_bitmap & BIT_RESETLINK)
11446 + clear_bit(__IXGBE_RESETTING, &adapter->state);
11447 + adapter->dcb_set_bitmap = 0x00;
11451 +static int ixgbe_dcb_set_all(struct sk_buff *skb, struct genl_info *info)
11453 + struct net_device *netdev = NULL;
11454 + struct ixgbe_adapter *adapter = NULL;
11455 + int ret = -ENOMEM;
11459 + if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_SET_ALL])
11462 + netdev = dev_get_by_name(&init_net,
11463 + nla_data(info->attrs[DCB_A_IFNAME]));
11467 + ret = ixgbe_dcb_check_adapter(netdev);
11471 + adapter = netdev_priv(netdev);
11473 + if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) {
11478 + value = nla_get_u8(info->attrs[DCB_A_SET_ALL]);
11479 + if ((value & 1) != value) {
11480 + DPRINTK(DRV, ERR, "Value is not 1 or 0, it is %d.\n", value);
11482 + if (!adapter->dcb_set_bitmap) {
11487 + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
11490 + ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg,
11491 + &adapter->dcb_cfg,
11492 + adapter->ring_feature[RING_F_DCB].indices);
11494 + clear_bit(__IXGBE_RESETTING, &adapter->state);
11498 + ixgbe_down(adapter);
11499 + ixgbe_up(adapter);
11500 + adapter->dcb_set_bitmap = 0x00;
11501 + clear_bit(__IXGBE_RESETTING, &adapter->state);
11505 + ret = ixgbe_nl_reply(retval, DCB_C_SET_ALL, DCB_A_SET_ALL, info);
11517 +static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
11519 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
11522 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
11524 + case DCB_CAP_ATTR_PG:
11527 + case DCB_CAP_ATTR_PFC:
11530 + case DCB_CAP_ATTR_UP2TC:
11533 + case DCB_CAP_ATTR_PG_TCS:
11536 + case DCB_CAP_ATTR_PFC_TCS:
11539 + case DCB_CAP_ATTR_GSP:
11542 + case DCB_CAP_ATTR_BCN:
11556 +static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
11558 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
11561 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
11563 + case DCB_NUMTCS_ATTR_PG:
11564 + *num = adapter->dcb_cfg.num_tcs.pg_tcs;
11566 + case DCB_NUMTCS_ATTR_PFC:
11567 + *num = adapter->dcb_cfg.num_tcs.pfc_tcs;
11580 +static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
11582 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
11585 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
11587 + case DCB_NUMTCS_ATTR_PG:
11588 + adapter->dcb_cfg.num_tcs.pg_tcs = num;
11590 + case DCB_NUMTCS_ATTR_PFC:
11591 + adapter->dcb_cfg.num_tcs.pfc_tcs = num;
11604 +static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
11606 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
11608 + return adapter->dcb_cfg.pfc_mode_enable;
11611 +static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
11613 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
11615 + adapter->temp_dcb_cfg.pfc_mode_enable = state;
11616 + if (adapter->temp_dcb_cfg.pfc_mode_enable !=
11617 + adapter->dcb_cfg.pfc_mode_enable)
11618 + adapter->dcb_set_bitmap |= BIT_PFC;
11622 +#ifdef HAVE_DCBNL_OPS_GETAPP
11624 + * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority
11625 + * @netdev : the corresponding netdev
11626 + * @idtype : identifies the id as ether type or TCP/UDP port number
11627 + * @id: id is either ether type or TCP/UDP port number
11629 + * Returns : on success, returns a non-zero 802.1p user priority bitmap
11630 + * otherwise returns 0 as the invalid user priority bitmap to indicate an
11633 +static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
11637 + switch (idtype) {
11638 + case DCB_APP_IDTYPE_ETHTYPE:
11640 + if (id == ETH_P_FCOE)
11641 + rval = ixgbe_fcoe_getapp(netdev_priv(netdev));
11644 + case DCB_APP_IDTYPE_PORTNUM:
11653 + * ixgbe_dcbnl_setapp - set the DCBX application user priority
11654 + * @netdev : the corresponding netdev
11655 + * @idtype : identifies the id as ether type or TCP/UDP port number
11656 + * @id: id is either ether type or TCP/UDP port number
11657 + * @up: the 802.1p user priority bitmap
11659 + * Returns : 0 on success or 1 on error
11661 +static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
11662 + u8 idtype, u16 id, u8 up)
11666 + switch (idtype) {
11667 + case DCB_APP_IDTYPE_ETHTYPE:
11669 + if (id == ETH_P_FCOE) {
11671 + struct ixgbe_adapter *adapter;
11673 + adapter = netdev_priv(netdev);
11674 + tc = adapter->fcoe.tc;
11675 + rval = ixgbe_fcoe_setapp(adapter, up);
11676 + if ((!rval) && (tc != adapter->fcoe.tc) &&
11677 + (adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
11678 + (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
11679 + adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
11680 + adapter->dcb_set_bitmap |= BIT_RESETLINK;
11685 + case DCB_APP_IDTYPE_PORTNUM:
11692 +#endif /* HAVE_DCBNL_OPS_GETAPP */
11698 +struct dcbnl_rtnl_ops dcbnl_ops = {
11699 + .getstate = ixgbe_dcbnl_get_state,
11700 + .setstate = ixgbe_dcbnl_set_state,
11701 + .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
11702 + .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx,
11703 + .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx,
11704 + .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx,
11705 + .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx,
11706 + .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx,
11707 + .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx,
11708 + .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx,
11709 + .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx,
11710 + .setpfccfg = ixgbe_dcbnl_set_pfc_cfg,
11711 + .getpfccfg = ixgbe_dcbnl_get_pfc_cfg,
11712 + .setall = ixgbe_dcbnl_set_all,
11713 + .getcap = ixgbe_dcbnl_getcap,
11714 + .getnumtcs = ixgbe_dcbnl_getnumtcs,
11715 + .setnumtcs = ixgbe_dcbnl_setnumtcs,
11716 + .getpfcstate = ixgbe_dcbnl_getpfcstate,
11717 + .setpfcstate = ixgbe_dcbnl_setpfcstate,
11718 +#ifdef HAVE_DCBNL_OPS_GETAPP
11719 + .getapp = ixgbe_dcbnl_getapp,
11720 + .setapp = ixgbe_dcbnl_setapp,
11724 +/* DCB Generic NETLINK command Definitions */
11725 +/* Get DCB Admin Mode */
11726 +static struct genl_ops ixgbe_dcb_genl_c_gstate = {
11727 + .cmd = DCB_C_GSTATE,
11728 + .flags = GENL_ADMIN_PERM,
11729 + .policy = dcb_genl_policy,
11730 + .doit = ixgbe_dcb_gstate,
11734 +/* Set DCB Admin Mode */
11735 +static struct genl_ops ixgbe_dcb_genl_c_sstate = {
11736 + .cmd = DCB_C_SSTATE,
11737 + .flags = GENL_ADMIN_PERM,
11738 + .policy = dcb_genl_policy,
11739 + .doit = ixgbe_dcb_sstate,
11743 +/* Set TX Traffic Attributes */
11744 +static struct genl_ops ixgbe_dcb_genl_c_spgtx = {
11745 + .cmd = DCB_C_PGTX_SCFG,
11746 + .flags = GENL_ADMIN_PERM,
11747 + .policy = dcb_genl_policy,
11748 + .doit = ixgbe_dcb_pgtx_scfg,
11752 +/* Set RX Traffic Attributes */
11753 +static struct genl_ops ixgbe_dcb_genl_c_spgrx = {
11754 + .cmd = DCB_C_PGRX_SCFG,
11755 + .flags = GENL_ADMIN_PERM,
11756 + .policy = dcb_genl_policy,
11757 + .doit = ixgbe_dcb_pgrx_scfg,
11762 +static struct genl_ops ixgbe_dcb_genl_c_spfc = {
11763 + .cmd = DCB_C_PFC_SCFG,
11764 + .flags = GENL_ADMIN_PERM,
11765 + .policy = dcb_genl_policy,
11766 + .doit = ixgbe_dcb_spfccfg,
11770 +/* Get TX Traffic Attributes */
11771 +static struct genl_ops ixgbe_dcb_genl_c_gpgtx = {
11772 + .cmd = DCB_C_PGTX_GCFG,
11773 + .flags = GENL_ADMIN_PERM,
11774 + .policy = dcb_genl_policy,
11775 + .doit = ixgbe_dcb_pgtx_gcfg,
11779 +/* Get RX Traffic Attributes */
11780 +static struct genl_ops ixgbe_dcb_genl_c_gpgrx = {
11781 + .cmd = DCB_C_PGRX_GCFG,
11782 + .flags = GENL_ADMIN_PERM,
11783 + .policy = dcb_genl_policy,
11784 + .doit = ixgbe_dcb_pgrx_gcfg,
11789 +static struct genl_ops ixgbe_dcb_genl_c_gpfc = {
11790 + .cmd = DCB_C_PFC_GCFG,
11791 + .flags = GENL_ADMIN_PERM,
11792 + .policy = dcb_genl_policy,
11793 + .doit = ixgbe_dcb_gpfccfg,
11798 +/* Get Link Speed setting */
11799 +static struct genl_ops ixgbe_dcb_genl_c_glink_spd = {
11800 + .cmd = DCB_C_GLINK_SPD,
11801 + .flags = GENL_ADMIN_PERM,
11802 + .policy = dcb_genl_policy,
11803 + .doit = ixgbe_dcb_glink_spd,
11807 +/* Set Link Speed setting */
11808 +static struct genl_ops ixgbe_dcb_genl_c_slink_spd = {
11809 + .cmd = DCB_C_SLINK_SPD,
11810 + .flags = GENL_ADMIN_PERM,
11811 + .policy = dcb_genl_policy,
11812 + .doit = ixgbe_dcb_slink_spd,
11816 +/* Set all "set" feature */
11817 +static struct genl_ops ixgbe_dcb_genl_c_set_all= {
11818 + .cmd = DCB_C_SET_ALL,
11819 + .flags = GENL_ADMIN_PERM,
11820 + .policy = dcb_genl_policy,
11821 + .doit = ixgbe_dcb_set_all,
11825 +/* Get permanent HW address */
11826 +static struct genl_ops ixgbe_dcb_genl_c_gperm_hwaddr = {
11827 + .cmd = DCB_C_GPERM_HWADDR,
11828 + .flags = GENL_ADMIN_PERM,
11829 + .policy = dcb_genl_policy,
11830 + .doit = ixgbe_dcb_gperm_hwaddr,
11835 + * ixgbe_dcb_netlink_register - Initialize the NETLINK communication channel
11838 + * Call out to the DCB components so they can register their families and
11839 + * commands with Generic NETLINK mechanism. Return zero on success and
11840 + * non-zero on failure.
11843 +int ixgbe_dcb_netlink_register(void)
11847 + /* consider writing as:
11848 + * ret = genl_register_family(aaa)
11849 + * || genl_register_ops(bbb, bbb)
11850 + * || genl_register_ops(ccc, ccc);
11854 + ret = genl_register_family(&dcb_family);
11858 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gstate);
11862 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_sstate);
11866 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_spgtx);
11870 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_spgrx);
11874 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_spfc);
11878 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gpfc);
11882 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gpgtx);
11886 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gpgrx);
11891 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_glink_spd);
11895 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_slink_spd);
11899 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_set_all);
11903 + ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gperm_hwaddr);
11910 + genl_unregister_family(&dcb_family);
11914 +int ixgbe_dcb_netlink_unregister(void)
11916 + return genl_unregister_family(&dcb_family);
11919 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_ethtool.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_ethtool.c
11920 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_ethtool.c 1969-12-31 19:00:00.000000000 -0500
11921 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_ethtool.c 2010-08-25 17:56:26.000000000 -0400
11923 +/*******************************************************************************
11925 + Intel 10 Gigabit PCI Express Linux driver
11926 + Copyright(c) 1999 - 2010 Intel Corporation.
11928 + This program is free software; you can redistribute it and/or modify it
11929 + under the terms and conditions of the GNU General Public License,
11930 + version 2, as published by the Free Software Foundation.
11932 + This program is distributed in the hope it will be useful, but WITHOUT
11933 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11934 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11937 + You should have received a copy of the GNU General Public License along with
11938 + this program; if not, write to the Free Software Foundation, Inc.,
11939 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
11941 + The full GNU General Public License is included in this distribution in
11942 + the file called "COPYING".
11944 + Contact Information:
11945 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
11946 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
11948 +*******************************************************************************/
11950 +/* ethtool support for ixgbe */
11952 +#include <linux/types.h>
11953 +#include <linux/module.h>
11954 +#include <linux/pci.h>
11955 +#include <linux/netdevice.h>
11956 +#include <linux/ethtool.h>
11957 +#include <linux/vmalloc.h>
11958 +#ifdef SIOCETHTOOL
11959 +#include <asm/uaccess.h>
11961 +#include "ixgbe.h"
11963 +#ifndef ETH_GSTRING_LEN
11964 +#define ETH_GSTRING_LEN 32
11967 +#define IXGBE_ALL_RAR_ENTRIES 16
11969 +#ifdef ETHTOOL_OPS_COMPAT
11970 +#include "kcompat_ethtool.c"
11972 +#ifdef ETHTOOL_GSTATS
11973 +struct ixgbe_stats {
11974 + char stat_string[ETH_GSTRING_LEN];
11979 +#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
11980 + offsetof(struct ixgbe_adapter, m)
11981 +static struct ixgbe_stats ixgbe_gstrings_stats[] = {
11982 + {"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
11983 + {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
11984 + {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
11985 + {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
11986 + {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
11987 + {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
11988 + {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
11989 + {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
11990 + {"lsc_int", IXGBE_STAT(lsc_int)},
11991 + {"tx_busy", IXGBE_STAT(tx_busy)},
11992 + {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
11993 + {"rx_errors", IXGBE_STAT(net_stats.rx_errors)},
11994 + {"tx_errors", IXGBE_STAT(net_stats.tx_errors)},
11995 + {"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)},
11996 +#ifndef CONFIG_IXGBE_NAPI
11997 + {"rx_dropped_backlog", IXGBE_STAT(rx_dropped_backlog)},
11999 + {"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)},
12000 + {"multicast", IXGBE_STAT(net_stats.multicast)},
12001 + {"broadcast", IXGBE_STAT(stats.bprc)},
12002 + {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
12003 + {"collisions", IXGBE_STAT(net_stats.collisions)},
12004 + {"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)},
12005 + {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
12006 + {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
12007 + {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
12008 + {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
12009 + {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
12010 + {"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)},
12011 + {"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)},
12012 + {"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)},
12013 + {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
12014 + {"tx_restart_queue", IXGBE_STAT(restart_queue)},
12015 + {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
12016 + {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
12017 + {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
12018 + {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
12019 + {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
12020 + {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
12021 + {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
12022 +#ifndef IXGBE_NO_LLI
12023 + {"low_latency_interrupt", IXGBE_STAT(lli_int)},
12025 + {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
12026 + {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
12027 +#ifndef IXGBE_NO_LRO
12028 + {"lro_aggregated", IXGBE_STAT(lro_stats.coal)},
12029 + {"lro_flushed", IXGBE_STAT(lro_stats.flushed)},
12030 + {"lro_recycled", IXGBE_STAT(lro_stats.recycled)},
12031 +#endif /* IXGBE_NO_LRO */
12032 + {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
12033 + {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
12034 + {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
12035 + {"rx_flm", IXGBE_STAT(flm)},
12037 + {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
12038 + {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
12039 +#endif /* HAVE_TX_MQ */
12041 + {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
12042 + {"fcoe_last_errors", IXGBE_STAT(stats.fclast)},
12043 + {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
12044 + {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
12045 + {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
12046 + {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
12047 + {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
12048 +#endif /* IXGBE_FCOE */
12051 +#define IXGBE_QUEUE_STATS_LEN \
12052 + ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
12053 + ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
12054 + (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
12055 +#define IXGBE_VF_STATS_LEN \
12056 + ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_vfs) * \
12057 + (sizeof(struct vf_stats) / sizeof(u64)))
12058 +#define IXGBE_PB_STATS_LEN ( \
12059 + (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
12060 + IXGBE_FLAG_DCB_ENABLED) ? \
12061 + (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
12062 + sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
12063 + sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
12064 + sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
12065 + / sizeof(u64) : 0)
12066 +#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_PB_STATS_LEN + IXGBE_QUEUE_STATS_LEN + IXGBE_VF_STATS_LEN)
12067 +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
12068 +#endif /* ETHTOOL_GSTATS */
12069 +#ifdef ETHTOOL_TEST
12070 +static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
12071 + "Register test (offline)", "Eeprom test (offline)",
12072 + "Interrupt test (offline)", "Loopback test (offline)",
12073 + "Link test (on/offline)"
12075 +#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
12076 +#endif /* ETHTOOL_TEST */
12078 +static int ixgbe_get_settings(struct net_device *netdev,
12079 + struct ethtool_cmd *ecmd)
12081 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12082 + struct ixgbe_hw *hw = &adapter->hw;
12083 + u32 link_speed = 0;
12086 + ecmd->supported = SUPPORTED_10000baseT_Full;
12087 + ecmd->autoneg = AUTONEG_ENABLE;
12088 + ecmd->transceiver = XCVR_EXTERNAL;
12089 + if ((hw->phy.media_type == ixgbe_media_type_copper) ||
12090 + (hw->phy.multispeed_fiber)) {
12091 + ecmd->supported |= (SUPPORTED_1000baseT_Full |
12092 + SUPPORTED_Autoneg);
12094 + ecmd->advertising = ADVERTISED_Autoneg;
12095 + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
12096 + ecmd->advertising |= ADVERTISED_10000baseT_Full;
12097 + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
12098 + ecmd->advertising |= ADVERTISED_1000baseT_Full;
12100 + * It's possible that phy.autoneg_advertised may not be
12101 + * set yet. If so display what the default would be -
12102 + * both 1G and 10G supported.
12104 + if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full |
12105 + ADVERTISED_10000baseT_Full)))
12106 + ecmd->advertising |= (ADVERTISED_10000baseT_Full |
12107 + ADVERTISED_1000baseT_Full);
12109 + if (hw->phy.media_type == ixgbe_media_type_copper) {
12110 + ecmd->supported |= SUPPORTED_TP;
12111 + ecmd->advertising |= ADVERTISED_TP;
12112 + ecmd->port = PORT_TP;
12114 + ecmd->supported |= SUPPORTED_FIBRE;
12115 + ecmd->advertising |= ADVERTISED_FIBRE;
12116 + ecmd->port = PORT_FIBRE;
12118 + } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
12119 + /* Set as FIBRE until SERDES defined in kernel */
12120 + if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
12121 + ecmd->supported = (SUPPORTED_1000baseT_Full |
12122 + SUPPORTED_FIBRE);
12123 + ecmd->advertising = (ADVERTISED_1000baseT_Full |
12124 + ADVERTISED_FIBRE);
12125 + ecmd->port = PORT_FIBRE;
12126 + ecmd->autoneg = AUTONEG_DISABLE;
12127 + } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
12128 + (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
12129 + ecmd->supported |= (SUPPORTED_1000baseT_Full |
12130 + SUPPORTED_Autoneg |
12131 + SUPPORTED_FIBRE);
12132 + ecmd->advertising = (ADVERTISED_10000baseT_Full |
12133 + ADVERTISED_1000baseT_Full |
12134 + ADVERTISED_Autoneg |
12135 + ADVERTISED_FIBRE);
12136 + ecmd->port = PORT_FIBRE;
12138 + ecmd->supported |= (SUPPORTED_1000baseT_Full |
12139 + SUPPORTED_FIBRE);
12140 + ecmd->advertising = (ADVERTISED_10000baseT_Full |
12141 + ADVERTISED_1000baseT_Full |
12142 + ADVERTISED_FIBRE);
12143 + ecmd->port = PORT_FIBRE;
12146 + ecmd->supported |= SUPPORTED_FIBRE;
12147 + ecmd->advertising = (ADVERTISED_10000baseT_Full |
12148 + ADVERTISED_FIBRE);
12149 + ecmd->port = PORT_FIBRE;
12150 + ecmd->autoneg = AUTONEG_DISABLE;
12153 +#ifdef HAVE_ETHTOOL_SFP_DISPLAY_PORT
12154 + /* Get PHY type */
12155 + switch (adapter->hw.phy.type) {
12156 + case ixgbe_phy_tn:
12157 + case ixgbe_phy_cu_unknown:
12158 + /* Copper 10G-BASET */
12159 + ecmd->port = PORT_TP;
12161 + case ixgbe_phy_qt:
12162 + ecmd->port = PORT_FIBRE;
12164 + case ixgbe_phy_nl:
12165 + case ixgbe_phy_sfp_passive_tyco:
12166 + case ixgbe_phy_sfp_passive_unknown:
12167 + case ixgbe_phy_sfp_ftl:
12168 + case ixgbe_phy_sfp_avago:
12169 + case ixgbe_phy_sfp_intel:
12170 + case ixgbe_phy_sfp_unknown:
12171 + switch (adapter->hw.phy.sfp_type) {
12172 + /* SFP+ devices, further checking needed */
12173 + case ixgbe_sfp_type_da_cu:
12174 + case ixgbe_sfp_type_da_cu_core0:
12175 + case ixgbe_sfp_type_da_cu_core1:
12176 + ecmd->port = PORT_DA;
12178 + case ixgbe_sfp_type_sr:
12179 + case ixgbe_sfp_type_lr:
12180 + case ixgbe_sfp_type_srlr_core0:
12181 + case ixgbe_sfp_type_srlr_core1:
12182 + ecmd->port = PORT_FIBRE;
12184 + case ixgbe_sfp_type_not_present:
12185 + ecmd->port = PORT_NONE;
12187 + case ixgbe_sfp_type_1g_cu_core0:
12188 + case ixgbe_sfp_type_1g_cu_core1:
12189 + ecmd->port = PORT_TP;
12190 + ecmd->supported = SUPPORTED_TP;
12191 + ecmd->advertising = (ADVERTISED_1000baseT_Full |
12194 + case ixgbe_sfp_type_unknown:
12196 + ecmd->port = PORT_OTHER;
12200 + case ixgbe_phy_xaui:
12201 + ecmd->port = PORT_NONE;
12203 + case ixgbe_phy_unknown:
12204 + case ixgbe_phy_generic:
12205 + case ixgbe_phy_sfp_unsupported:
12207 + ecmd->port = PORT_OTHER;
12212 + if (!in_interrupt()) {
12213 + hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
12216 + * this case is a special workaround for RHEL5 bonding
12217 + * that calls this routine from interrupt context
12219 + link_speed = adapter->link_speed;
12220 + link_up = adapter->link_up;
12224 + ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
12225 + SPEED_10000 : SPEED_1000;
12226 + ecmd->duplex = DUPLEX_FULL;
12228 + ecmd->speed = -1;
12229 + ecmd->duplex = -1;
12235 +static int ixgbe_set_settings(struct net_device *netdev,
12236 + struct ethtool_cmd *ecmd)
12238 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12239 + struct ixgbe_hw *hw = &adapter->hw;
12240 + u32 advertised, old;
12243 + if ((hw->phy.media_type == ixgbe_media_type_copper) ||
12244 + (hw->phy.multispeed_fiber)) {
12245 + /* 10000/copper and 1000/copper must autoneg
12246 + * this function does not support any duplex forcing, but can
12247 + * limit the advertising of the adapter to only 10000 or 1000 */
12248 + if (ecmd->autoneg == AUTONEG_DISABLE)
12251 + old = hw->phy.autoneg_advertised;
12253 + if (ecmd->advertising & ADVERTISED_10000baseT_Full)
12254 + advertised |= IXGBE_LINK_SPEED_10GB_FULL;
12256 + if (ecmd->advertising & ADVERTISED_1000baseT_Full)
12257 + advertised |= IXGBE_LINK_SPEED_1GB_FULL;
12259 + if (old == advertised)
12261 + /* this sets the link speed and restarts auto-neg */
12262 + hw->mac.autotry_restart = true;
12263 + err = hw->mac.ops.setup_link(hw, advertised, true, true);
12265 + DPRINTK(PROBE, INFO,
12266 + "setup link failed with code %d\n", err);
12267 + hw->mac.ops.setup_link(hw, old, true, true);
12270 + /* in this case we currently only support 10Gb/FULL */
12271 + if ((ecmd->autoneg == AUTONEG_ENABLE) ||
12272 + (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
12273 + (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
12280 +static void ixgbe_get_pauseparam(struct net_device *netdev,
12281 + struct ethtool_pauseparam *pause)
12283 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12284 + struct ixgbe_hw *hw = &adapter->hw;
12287 + * Flow Control Autoneg isn't on if
12288 + * - we didn't ask for it OR
12289 + * - it failed, we know this by tx & rx being off
12291 + if (hw->fc.disable_fc_autoneg || (hw->fc.current_mode == ixgbe_fc_none))
12292 + pause->autoneg = 0;
12294 + pause->autoneg = 1;
12297 + if (hw->fc.current_mode == ixgbe_fc_pfc) {
12298 + pause->rx_pause = 0;
12299 + pause->tx_pause = 0;
12304 + if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
12305 + pause->rx_pause = 1;
12306 + } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
12307 + pause->tx_pause = 1;
12308 + } else if (hw->fc.current_mode == ixgbe_fc_full) {
12309 + pause->rx_pause = 1;
12310 + pause->tx_pause = 1;
12314 +static int ixgbe_set_pauseparam(struct net_device *netdev,
12315 + struct ethtool_pauseparam *pause)
12317 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12318 + struct ixgbe_hw *hw = &adapter->hw;
12319 + struct ixgbe_fc_info fc;
12321 + if (adapter->dcb_cfg.pfc_mode_enable ||
12322 + ((hw->mac.type == ixgbe_mac_82598EB) &&
12323 + (adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
12328 + if (pause->autoneg != AUTONEG_ENABLE)
12329 + fc.disable_fc_autoneg = true;
12331 + fc.disable_fc_autoneg = false;
12333 + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
12334 + fc.requested_mode = ixgbe_fc_full;
12335 + else if (pause->rx_pause && !pause->tx_pause)
12336 + fc.requested_mode = ixgbe_fc_rx_pause;
12337 + else if (!pause->rx_pause && pause->tx_pause)
12338 + fc.requested_mode = ixgbe_fc_tx_pause;
12339 + else if (!pause->rx_pause && !pause->tx_pause)
12340 + fc.requested_mode = ixgbe_fc_none;
12344 + adapter->last_lfc_mode = fc.requested_mode;
12346 + /* if the thing changed then we'll update and use new autoneg */
12347 + if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
12349 + if (netif_running(netdev))
12350 + ixgbe_reinit_locked(adapter);
12352 + ixgbe_reset(adapter);
12358 +static u32 ixgbe_get_rx_csum(struct net_device *netdev)
12360 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12361 + return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED);
12364 +static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
12366 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12368 + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
12370 + adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
12375 +static u32 ixgbe_get_tx_csum(struct net_device *netdev)
12377 + return (netdev->features & NETIF_F_IP_CSUM) != 0;
12380 +static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
12383 +#ifdef NETIF_F_IPV6_CSUM
12384 + netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
12386 + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
12388 + netdev->features |= NETIF_F_IP_CSUM;
12390 + netdev->features &= ~NETIF_F_IP_CSUM;
12396 +#ifdef NETIF_F_TSO
12397 +static int ixgbe_set_tso(struct net_device *netdev, u32 data)
12399 +#ifndef HAVE_NETDEV_VLAN_FEATURES
12400 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12401 +#endif /* HAVE_NETDEV_VLAN_FEATURES */
12403 + netdev->features |= NETIF_F_TSO;
12404 +#ifdef NETIF_F_TSO6
12405 + netdev->features |= NETIF_F_TSO6;
12408 + netdev->features &= ~NETIF_F_TSO;
12409 +#ifdef NETIF_F_TSO6
12410 + netdev->features &= ~NETIF_F_TSO6;
12412 +#ifndef HAVE_NETDEV_VLAN_FEATURES
12413 +#ifdef NETIF_F_HW_VLAN_TX
12414 + /* disable TSO on all VLANs if they're present */
12415 + if (adapter->vlgrp) {
12417 + struct net_device *v_netdev;
12418 + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
12420 + vlan_group_get_device(adapter->vlgrp, i);
12422 + v_netdev->features &= ~NETIF_F_TSO;
12423 +#ifdef NETIF_F_TSO6
12424 + v_netdev->features &= ~NETIF_F_TSO6;
12426 + vlan_group_set_device(adapter->vlgrp, i,
12432 +#endif /* HAVE_NETDEV_VLAN_FEATURES */
12436 +#endif /* NETIF_F_TSO */
12438 +static u32 ixgbe_get_msglevel(struct net_device *netdev)
12440 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12441 + return adapter->msg_enable;
12444 +static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
12446 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12447 + adapter->msg_enable = data;
12450 +static int ixgbe_get_regs_len(struct net_device *netdev)
12452 +#define IXGBE_REGS_LEN 1128
12453 + return IXGBE_REGS_LEN * sizeof(u32);
12456 +#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
12459 +static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
12462 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12463 + struct ixgbe_hw *hw = &adapter->hw;
12464 + u32 *regs_buff = p;
12467 + memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
12469 + regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
12471 + /* General Registers */
12472 + regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
12473 + regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
12474 + regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
12475 + regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
12476 + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
12477 + regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
12478 + regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
12479 + regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
12481 + /* NVM Register */
12482 + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
12483 + regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
12484 + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
12485 + regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
12486 + regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
12487 + regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
12488 + regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
12489 + regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
12490 + regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
12491 + regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
12494 + /* don't read EICR because it can clear interrupt causes, instead
12495 + * read EICS which is a shadow but doesn't clear EICR */
12496 + regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
12497 + regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
12498 + regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
12499 + regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
12500 + regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
12501 + regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
12502 + regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
12503 + regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
12504 + regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
12505 + regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
12506 + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
12507 + regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
12509 + /* Flow Control */
12510 + regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
12511 + regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
12512 + regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
12513 + regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
12514 + regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
12515 + for (i = 0; i < 8; i++)
12516 + switch (hw->mac.type) {
12517 + case ixgbe_mac_82598EB:
12518 + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
12519 + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
12521 + case ixgbe_mac_82599EB:
12522 + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
12523 + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
12528 + regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
12529 + regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
12531 + /* Receive DMA */
12532 + for (i = 0; i < 64; i++)
12533 + regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
12534 + for (i = 0; i < 64; i++)
12535 + regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
12536 + for (i = 0; i < 64; i++)
12537 + regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
12538 + for (i = 0; i < 64; i++)
12539 + regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
12540 + for (i = 0; i < 64; i++)
12541 + regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
12542 + for (i = 0; i < 64; i++)
12543 + regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
12544 + for (i = 0; i < 16; i++)
12545 + regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
12546 + for (i = 0; i < 16; i++)
12547 + regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
12548 + regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
12549 + for (i = 0; i < 8; i++)
12550 + regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
12551 + regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
12552 + regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
12555 + regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
12556 + regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
12557 + for (i = 0; i < 16; i++)
12558 + regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
12559 + for (i = 0; i < 16; i++)
12560 + regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
12561 + regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
12562 + regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
12563 + regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
12564 + regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
12565 + regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
12566 + regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
12567 + for (i = 0; i < 8; i++)
12568 + regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
12569 + for (i = 0; i < 8; i++)
12570 + regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
12571 + regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
12574 + for (i = 0; i < 32; i++)
12575 + regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
12576 + for (i = 0; i < 32; i++)
12577 + regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
12578 + for (i = 0; i < 32; i++)
12579 + regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
12580 + for (i = 0; i < 32; i++)
12581 + regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
12582 + for (i = 0; i < 32; i++)
12583 + regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
12584 + for (i = 0; i < 32; i++)
12585 + regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
12586 + for (i = 0; i < 32; i++)
12587 + regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
12588 + for (i = 0; i < 32; i++)
12589 + regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
12590 + regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
12591 + for (i = 0; i < 16; i++)
12592 + regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
12593 + regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
12594 + for (i = 0; i < 8; i++)
12595 + regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
12596 + regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
12599 + regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
12600 + regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
12601 + regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
12602 + regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
12603 + regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
12604 + regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
12605 + regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
12606 + regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
12607 + regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
12610 + regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
12611 + regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
12612 + regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
12613 + regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
12614 + for (i = 0; i < 8; i++)
12615 + regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
12616 + for (i = 0; i < 8; i++)
12617 + regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
12618 + for (i = 0; i < 8; i++)
12619 + regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
12620 + for (i = 0; i < 8; i++)
12621 + regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
12622 + for (i = 0; i < 8; i++)
12623 + regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
12624 + for (i = 0; i < 8; i++)
12625 + regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
12628 + regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
12629 + regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
12630 + regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
12631 + regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
12632 + for (i = 0; i < 8; i++)
12633 + regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
12634 + regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
12635 + regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
12636 + regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
12637 + regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
12638 + regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
12639 + regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
12640 + regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
12641 + for (i = 0; i < 8; i++)
12642 + regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
12643 + for (i = 0; i < 8; i++)
12644 + regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
12645 + for (i = 0; i < 8; i++)
12646 + regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
12647 + for (i = 0; i < 8; i++)
12648 + regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
12649 + regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
12650 + regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
12651 + regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
12652 + regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
12653 + regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
12654 + regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
12655 + regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
12656 + regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
12657 + regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
12658 + regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
12659 + regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
12660 + regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
12661 + for (i = 0; i < 8; i++)
12662 + regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
12663 + regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
12664 + regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
12665 + regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
12666 + regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
12667 + regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
12668 + regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
12669 + regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
12670 + regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
12671 + regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
12672 + regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
12673 + regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
12674 + regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
12675 + regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
12676 + regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
12677 + regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
12678 + regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
12679 + regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
12680 + regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
12681 + regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
12682 + for (i = 0; i < 16; i++)
12683 + regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
12684 + for (i = 0; i < 16; i++)
12685 + regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
12686 + for (i = 0; i < 16; i++)
12687 + regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
12688 + for (i = 0; i < 16; i++)
12689 + regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
12692 + regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
12693 + regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
12694 + regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
12695 + regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
12696 + regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
12697 + regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
12698 + regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
12699 + regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
12700 + regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
12701 + regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
12702 + regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
12703 + regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
12704 + regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
12705 + regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
12706 + regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
12707 + regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
12708 + regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
12709 + regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
12710 + regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
12711 + regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
12712 + regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
12713 + regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
12714 + regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
12715 + regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
12716 + regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
12717 + regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
12718 + regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
12719 + regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
12720 + regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
12721 + regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
12722 + regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
12723 + regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
12724 + regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
12727 + regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
12728 + for (i = 0; i < 8; i++)
12729 + regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
12730 + regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
12731 + for (i = 0; i < 4; i++)
12732 + regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
12733 + regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
12734 + regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
12735 + for (i = 0; i < 8; i++)
12736 + regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
12737 + regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
12738 + for (i = 0; i < 4; i++)
12739 + regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
12740 + regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
12741 + regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
12742 + regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
12743 + regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
12744 + regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
12745 + regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
12746 + regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
12747 + regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
12748 + regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
12749 + regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
12750 + regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
12751 + for (i = 0; i < 8; i++)
12752 + regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
12753 + regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
12754 + regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
12755 + regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
12756 + regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
12757 + regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
12758 + regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
12759 + regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
12760 + regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
12761 + regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
12764 +static int ixgbe_get_eeprom_len(struct net_device *netdev)
12766 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12767 + return adapter->hw.eeprom.word_size * 2;
12770 +static int ixgbe_get_eeprom(struct net_device *netdev,
12771 + struct ethtool_eeprom *eeprom, u8 *bytes)
12773 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12774 + struct ixgbe_hw *hw = &adapter->hw;
12775 + u16 *eeprom_buff;
12776 + int first_word, last_word, eeprom_len;
12780 + if (eeprom->len == 0)
12783 + eeprom->magic = hw->vendor_id | (hw->device_id << 16);
12785 + first_word = eeprom->offset >> 1;
12786 + last_word = (eeprom->offset + eeprom->len - 1) >> 1;
12787 + eeprom_len = last_word - first_word + 1;
12789 + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
12790 + if (!eeprom_buff)
12793 + for (i = 0; i < eeprom_len; i++) {
12794 + if ((ret_val = ixgbe_read_eeprom(hw, first_word + i,
12795 + &eeprom_buff[i])))
12799 + /* Device's eeprom is always little-endian, word addressable */
12800 + for (i = 0; i < eeprom_len; i++)
12801 + le16_to_cpus(&eeprom_buff[i]);
12803 + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
12804 + kfree(eeprom_buff);
12809 +static int ixgbe_set_eeprom(struct net_device *netdev,
12810 + struct ethtool_eeprom *eeprom, u8 *bytes)
12812 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12813 + struct ixgbe_hw *hw = &adapter->hw;
12814 + u16 *eeprom_buff;
12816 + int max_len, first_word, last_word, ret_val = 0;
12819 + if (eeprom->len == 0)
12820 + return -EOPNOTSUPP;
12822 + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
12825 + max_len = hw->eeprom.word_size * 2;
12827 + first_word = eeprom->offset >> 1;
12828 + last_word = (eeprom->offset + eeprom->len - 1) >> 1;
12829 + eeprom_buff = kmalloc(max_len, GFP_KERNEL);
12830 + if (!eeprom_buff)
12833 + ptr = (void *)eeprom_buff;
12835 + if (eeprom->offset & 1) {
12836 + /* need read/modify/write of first changed EEPROM word */
12837 + /* only the second byte of the word is being modified */
12838 + ret_val = ixgbe_read_eeprom(hw, first_word, &eeprom_buff[0]);
12841 + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
12842 + /* need read/modify/write of last changed EEPROM word */
12843 + /* only the first byte of the word is being modified */
12844 + ret_val = ixgbe_read_eeprom(hw, last_word,
12845 + &eeprom_buff[last_word - first_word]);
12848 + /* Device's eeprom is always little-endian, word addressable */
12849 + for (i = 0; i < last_word - first_word + 1; i++)
12850 + le16_to_cpus(&eeprom_buff[i]);
12852 + memcpy(ptr, bytes, eeprom->len);
12854 + for (i = 0; i <= (last_word - first_word); i++)
12855 + ret_val |= ixgbe_write_eeprom(hw, first_word + i, eeprom_buff[i]);
12857 + /* Update the checksum */
12858 + ixgbe_update_eeprom_checksum(hw);
12860 + kfree(eeprom_buff);
12864 +static void ixgbe_get_drvinfo(struct net_device *netdev,
12865 + struct ethtool_drvinfo *drvinfo)
12867 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12868 + char firmware_version[32];
12870 + strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
12871 + strncpy(drvinfo->version, ixgbe_driver_version,
12872 + sizeof(drvinfo->version));
12874 + snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
12875 + (adapter->eeprom_version & 0xF000) >> 12,
12876 + (adapter->eeprom_version & 0x0FF0) >> 4,
12877 + adapter->eeprom_version & 0x000F);
12879 + strncpy(drvinfo->fw_version, firmware_version,
12880 + sizeof(drvinfo->fw_version));
12881 + strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
12882 + sizeof(drvinfo->bus_info));
12883 + drvinfo->n_stats = IXGBE_STATS_LEN;
12884 + drvinfo->testinfo_len = IXGBE_TEST_LEN;
12885 + drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
12888 +static void ixgbe_get_ringparam(struct net_device *netdev,
12889 + struct ethtool_ringparam *ring)
12891 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12892 + struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
12893 + struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
12895 + ring->rx_max_pending = IXGBE_MAX_RXD;
12896 + ring->tx_max_pending = IXGBE_MAX_TXD;
12897 + ring->rx_mini_max_pending = 0;
12898 + ring->rx_jumbo_max_pending = 0;
12899 + ring->rx_pending = rx_ring->count;
12900 + ring->tx_pending = tx_ring->count;
12901 + ring->rx_mini_pending = 0;
12902 + ring->rx_jumbo_pending = 0;
12905 +static int ixgbe_set_ringparam(struct net_device *netdev,
12906 + struct ethtool_ringparam *ring)
12908 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
12909 + struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
12911 + u32 new_rx_count, new_tx_count;
12912 + bool need_update = false;
12914 + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
12917 + new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
12918 + new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
12919 + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
12921 + new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
12922 + new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
12923 + new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
12925 + if ((new_tx_count == adapter->tx_ring[0]->count) &&
12926 + (new_rx_count == adapter->rx_ring[0]->count)) {
12927 + /* nothing to do */
12931 + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
12934 + if (!netif_running(adapter->netdev)) {
12935 + for (i = 0; i < adapter->num_tx_queues; i++)
12936 + adapter->tx_ring[i]->count = new_tx_count;
12937 + for (i = 0; i < adapter->num_rx_queues; i++)
12938 + adapter->rx_ring[i]->count = new_rx_count;
12939 + adapter->tx_ring_count = new_tx_count;
12940 + adapter->rx_ring_count = new_rx_count;
12941 + goto clear_reset;
12944 + temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
12945 + if (!temp_tx_ring) {
12947 + goto clear_reset;
12950 + if (new_tx_count != adapter->tx_ring_count) {
12951 + for (i = 0; i < adapter->num_tx_queues; i++) {
12952 + memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
12953 + sizeof(struct ixgbe_ring));
12954 + temp_tx_ring[i].count = new_tx_count;
12955 + err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
12959 + ixgbe_free_tx_resources(&temp_tx_ring[i]);
12961 + goto clear_reset;
12964 + need_update = true;
12967 + temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
12968 + if (!temp_rx_ring) {
12973 + if (new_rx_count != adapter->rx_ring_count) {
12974 + for (i = 0; i < adapter->num_rx_queues; i++) {
12975 + memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
12976 + sizeof(struct ixgbe_ring));
12977 + temp_rx_ring[i].count = new_rx_count;
12978 + err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
12982 + ixgbe_free_rx_resources(&temp_rx_ring[i]);
12987 + need_update = true;
12990 + /* if rings need to be updated, here's the place to do it in one shot */
12991 + if (need_update) {
12992 + ixgbe_down(adapter);
12995 + if (new_tx_count != adapter->tx_ring_count) {
12996 + for (i = 0; i < adapter->num_tx_queues; i++) {
12997 + ixgbe_free_tx_resources(adapter->tx_ring[i]);
12998 + memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
12999 + sizeof(struct ixgbe_ring));
13001 + adapter->tx_ring_count = new_tx_count;
13005 + if (new_rx_count != adapter->rx_ring_count) {
13006 + for (i = 0; i < adapter->num_rx_queues; i++) {
13007 + ixgbe_free_rx_resources(adapter->rx_ring[i]);
13008 + memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
13009 + sizeof(struct ixgbe_ring));
13011 + adapter->rx_ring_count = new_rx_count;
13013 + ixgbe_up(adapter);
13016 + vfree(temp_rx_ring);
13018 + vfree(temp_tx_ring);
13020 + clear_bit(__IXGBE_RESETTING, &adapter->state);
13024 +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
13025 +static int ixgbe_get_stats_count(struct net_device *netdev)
13027 + return IXGBE_STATS_LEN;
13030 +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
13031 +static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
13034 + case ETH_SS_TEST:
13035 + return IXGBE_TEST_LEN;
13036 + case ETH_SS_STATS:
13037 + return IXGBE_STATS_LEN;
13038 +#ifdef NETIF_F_NTUPLE
13039 + case ETH_SS_NTUPLE_FILTERS:
13040 + return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
13041 + ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY);
13042 +#endif /* NETIF_F_NTUPLE */
13044 + return -EOPNOTSUPP;
13048 +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
13049 +static void ixgbe_get_ethtool_stats(struct net_device *netdev,
13050 + struct ethtool_stats *stats, u64 *data)
13052 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
13054 + int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
13058 + ixgbe_update_stats(adapter);
13059 + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
13060 + char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
13061 + data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
13062 + sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
13064 + for (j = 0; j < adapter->num_tx_queues; j++) {
13065 + queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
13066 + for (k = 0; k < stat_count; k++)
13067 + data[i + k] = queue_stat[k];
13070 + for (j = 0; j < adapter->num_rx_queues; j++) {
13071 + queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
13072 + for (k = 0; k < stat_count; k++)
13073 + data[i + k] = queue_stat[k];
13076 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
13077 + for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
13078 + data[i++] = adapter->stats.pxontxc[j];
13079 + data[i++] = adapter->stats.pxofftxc[j];
13081 + for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
13082 + data[i++] = adapter->stats.pxonrxc[j];
13083 + data[i++] = adapter->stats.pxoffrxc[j];
13086 + stat_count = sizeof(struct vf_stats) / sizeof(u64);
13087 + for(j = 0; j < adapter->num_vfs; j++) {
13088 + queue_stat = (u64 *)&adapter->vfinfo[j].vfstats;
13089 + for (k = 0; k < stat_count; k++) {
13090 + data[i + k] = queue_stat[k];
13092 + queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats;
13093 + for (k = 0; k < stat_count; k++) {
13094 + data[i + k] += queue_stat[k];
13100 +static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
13103 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
13104 + char *p = (char *)data;
13107 + switch (stringset) {
13108 + case ETH_SS_TEST:
13109 + memcpy(data, *ixgbe_gstrings_test,
13110 + IXGBE_TEST_LEN * ETH_GSTRING_LEN);
13112 + case ETH_SS_STATS:
13113 + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
13114 + memcpy(p, ixgbe_gstrings_stats[i].stat_string,
13115 + ETH_GSTRING_LEN);
13116 + p += ETH_GSTRING_LEN;
13118 + for (i = 0; i < adapter->num_tx_queues; i++) {
13119 + sprintf(p, "tx_queue_%u_packets", i);
13120 + p += ETH_GSTRING_LEN;
13121 + sprintf(p, "tx_queue_%u_bytes", i);
13122 + p += ETH_GSTRING_LEN;
13124 + for (i = 0; i < adapter->num_rx_queues; i++) {
13125 + sprintf(p, "rx_queue_%u_packets", i);
13126 + p += ETH_GSTRING_LEN;
13127 + sprintf(p, "rx_queue_%u_bytes", i);
13128 + p += ETH_GSTRING_LEN;
13130 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
13131 + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
13132 + sprintf(p, "tx_pb_%u_pxon", i);
13133 + p += ETH_GSTRING_LEN;
13134 + sprintf(p, "tx_pb_%u_pxoff", i);
13135 + p += ETH_GSTRING_LEN;
13137 + for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
13138 + sprintf(p, "rx_pb_%u_pxon", i);
13139 + p += ETH_GSTRING_LEN;
13140 + sprintf(p, "rx_pb_%u_pxoff", i);
13141 + p += ETH_GSTRING_LEN;
13144 + for (i = 0; i < adapter->num_vfs; i++) {
13145 + sprintf(p, "VF %d Rx Packets", i);
13146 + p += ETH_GSTRING_LEN;
13147 + sprintf(p, "VF %d Rx Bytes", i);
13148 + p += ETH_GSTRING_LEN;
13149 + sprintf(p, "VF %d Tx Packets", i);
13150 + p += ETH_GSTRING_LEN;
13151 + sprintf(p, "VF %d Tx Bytes", i);
13152 + p += ETH_GSTRING_LEN;
13153 + sprintf(p, "VF %d MC Packets", i);
13154 + p += ETH_GSTRING_LEN;
13156 + /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
13161 +static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
13163 + struct ixgbe_hw *hw = &adapter->hw;
13165 + u32 link_speed = 0;
13168 + hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
13176 +/* ethtool register test data */
13177 +struct ixgbe_reg_test {
13185 +/* In the hardware, registers are laid out either singly, in arrays
13186 + * spaced 0x40 bytes apart, or in contiguous tables. We assume
13187 + * most tests take place on arrays or single registers (handled
13188 + * as a single-element array) and special-case the tables.
13189 + * Table tests are always pattern tests.
13191 + * We also make provision for some required setup steps by specifying
13192 + * registers to be written without any read-back testing.
13195 +#define PATTERN_TEST 1
13196 +#define SET_READ_TEST 2
13197 +#define WRITE_NO_TEST 3
13198 +#define TABLE32_TEST 4
13199 +#define TABLE64_TEST_LO 5
13200 +#define TABLE64_TEST_HI 6
13202 +/* default 82599 register test */
13203 +static struct ixgbe_reg_test reg_test_82599[] = {
13204 + { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
13205 + { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
13206 + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
13207 + { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
13208 + { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
13209 + { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
13210 + { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
13211 + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
13212 + { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
13213 + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
13214 + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
13215 + { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
13216 + { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
13217 + { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
13218 + { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
13219 + { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
13220 + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
13221 + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
13222 + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
13226 +/* default 82598 register test */
13227 +static struct ixgbe_reg_test reg_test_82598[] = {
13228 + { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
13229 + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
13230 + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
13231 + { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
13232 + { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
13233 + { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
13234 + { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
13235 + /* Enable all four RX queues before testing. */
13236 + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
13237 + /* RDH is read-only for 82598, only test RDT. */
13238 + { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
13239 + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
13240 + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
13241 + { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
13242 + { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
13243 + { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
13244 + { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
13245 + { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
13246 + { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
13247 + { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
13248 + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
13249 + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
13250 + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
13254 +#define REG_PATTERN_TEST(R, M, W) \
13256 + u32 pat, val, before; \
13257 + const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
13258 + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
13259 + before = readl(adapter->hw.hw_addr + R); \
13260 + writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
13261 + val = readl(adapter->hw.hw_addr + R); \
13262 + if (val != (_test[pat] & W & M)) { \
13263 + DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\
13264 + "0x%08X expected 0x%08X\n", \
13265 + R, val, (_test[pat] & W & M)); \
13267 + writel(before, adapter->hw.hw_addr + R); \
13270 + writel(before, adapter->hw.hw_addr + R); \
13274 +#define REG_SET_AND_CHECK(R, M, W) \
13276 + u32 val, before; \
13277 + before = readl(adapter->hw.hw_addr + R); \
13278 + writel((W & M), (adapter->hw.hw_addr + R)); \
13279 + val = readl(adapter->hw.hw_addr + R); \
13280 + if ((W & M) != (val & M)) { \
13281 + DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
13282 + "expected 0x%08X\n", R, (val & M), (W & M)); \
13284 + writel(before, (adapter->hw.hw_addr + R)); \
13287 + writel(before, (adapter->hw.hw_addr + R)); \
13290 +static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
13292 + struct ixgbe_reg_test *test;
13293 + u32 value, before, after;
13296 + switch (adapter->hw.mac.type) {
13297 + case ixgbe_mac_82598EB:
13298 + toggle = 0x7FFFF3FF;
13299 + test = reg_test_82598;
13301 + case ixgbe_mac_82599EB:
13302 + toggle = 0x7FFFF30F;
13303 + test = reg_test_82599;
13312 + * Because the status register is such a special case,
13313 + * we handle it separately from the rest of the register
13314 + * tests. Some bits are read-only, some toggle, and some
13315 + * are writeable on newer MACs.
13317 + before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
13318 + value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
13319 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
13320 + after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
13321 + if (value != after) {
13322 + DPRINTK(DRV, ERR, "failed STATUS register test got: "
13323 + "0x%08X expected: 0x%08X\n", after, value);
13327 + /* restore previous status */
13328 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
13331 + * Perform the remainder of the register test, looping through
13332 + * the test table until we either fail or reach the null entry.
13334 + while (test->reg) {
13335 + for (i = 0; i < test->array_len; i++) {
13336 + switch (test->test_type) {
13337 + case PATTERN_TEST:
13338 + REG_PATTERN_TEST(test->reg + (i * 0x40),
13342 + case SET_READ_TEST:
13343 + REG_SET_AND_CHECK(test->reg + (i * 0x40),
13347 + case WRITE_NO_TEST:
13348 + writel(test->write,
13349 + (adapter->hw.hw_addr + test->reg)
13352 + case TABLE32_TEST:
13353 + REG_PATTERN_TEST(test->reg + (i * 4),
13357 + case TABLE64_TEST_LO:
13358 + REG_PATTERN_TEST(test->reg + (i * 8),
13362 + case TABLE64_TEST_HI:
13363 + REG_PATTERN_TEST((test->reg + 4) + (i * 8),
13376 +static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
13378 + if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL))
13385 +static irqreturn_t ixgbe_test_intr(int irq, void *data)
13387 + struct net_device *netdev = (struct net_device *) data;
13388 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
13390 + adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
13392 + return IRQ_HANDLED;
13395 +static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
13397 + struct net_device *netdev = adapter->netdev;
13398 + u32 mask, i = 0, shared_int = true;
13399 + u32 irq = adapter->pdev->irq;
13403 + /* Hook up test interrupt handler just for this test */
13404 + if (adapter->msix_entries) {
13405 + /* NOTE: we don't test MSI-X interrupts here, yet */
13407 + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
13408 + shared_int = false;
13409 + if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
13414 + } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
13415 + netdev->name, netdev)) {
13416 + shared_int = false;
13417 + } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
13418 + netdev->name, netdev)) {
13422 + DPRINTK(HW, INFO, "testing %s interrupt\n",
13423 + (shared_int ? "shared" : "unshared"));
13425 + /* Disable all the interrupts */
13426 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
13429 + /* Test each interrupt */
13430 + for (; i < 10; i++) {
13431 + /* Interrupt to test */
13434 + if (!shared_int) {
13436 + * Disable the interrupts to be reported in
13437 + * the cause register and then force the same
13438 + * interrupt and see if one gets posted. If
13439 + * an interrupt was posted to the bus, the
13442 + adapter->test_icr = 0;
13443 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
13444 + ~mask & 0x00007FFF);
13445 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
13446 + ~mask & 0x00007FFF);
13449 + if (adapter->test_icr & mask) {
13456 + * Enable the interrupt to be reported in the cause
13457 + * register and then force the same interrupt and see
13458 + * if one gets posted. If an interrupt was not posted
13459 + * to the bus, the test failed.
13461 + adapter->test_icr = 0;
13462 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
13463 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
13466 + if (!(adapter->test_icr &mask)) {
13471 + if (!shared_int) {
13473 + * Disable the other interrupts to be reported in
13474 + * the cause register and then force the other
13475 + * interrupts and see if any get posted. If
13476 + * an interrupt was posted to the bus, the
13479 + adapter->test_icr = 0;
13480 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
13481 + ~mask & 0x00007FFF);
13482 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
13483 + ~mask & 0x00007FFF);
13486 + if (adapter->test_icr) {
13493 + /* Disable all the interrupts */
13494 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
13497 + /* Unhook test interrupt handler */
13498 + free_irq(irq, netdev);
13503 +static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
13505 + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
13506 + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
13507 + struct ixgbe_hw *hw = &adapter->hw;
13510 + /* shut down the DMA engines now so they can be reinitialized later */
13513 + reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
13514 + reg_ctl &= ~IXGBE_RXCTRL_RXEN;
13515 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
13516 + reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
13517 + reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
13518 + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
13521 + reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
13522 + reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
13523 + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
13525 + switch (hw->mac.type) {
13526 + case ixgbe_mac_82599EB:
13527 + reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
13528 + reg_ctl &= ~IXGBE_DMATXCTL_TE;
13529 + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
13535 + ixgbe_reset(adapter);
13537 + ixgbe_free_tx_resources(&adapter->test_tx_ring);
13538 + ixgbe_free_rx_resources(&adapter->test_rx_ring);
13541 +static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
13543 + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
13544 + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
13545 + u32 rctl, reg_data;
13549 + /* Setup Tx descriptor ring and Tx buffers */
13550 + tx_ring->count = IXGBE_DEFAULT_TXD;
13551 + tx_ring->queue_index = 0;
13552 + tx_ring->dev = pci_dev_to_dev(adapter->pdev);
13553 + tx_ring->netdev = adapter->netdev;
13554 + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
13555 + tx_ring->numa_node = adapter->node;
13557 + err = ixgbe_setup_tx_resources(tx_ring);
13561 + switch (adapter->hw.mac.type) {
13562 + case ixgbe_mac_82599EB:
13563 + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
13564 + reg_data |= IXGBE_DMATXCTL_TE;
13565 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
13571 + ixgbe_configure_tx_ring(adapter, tx_ring);
13573 + /* Setup Rx Descriptor ring and Rx buffers */
13574 + rx_ring->count = IXGBE_DEFAULT_RXD;
13575 + rx_ring->queue_index = 0;
13576 + rx_ring->dev = pci_dev_to_dev(adapter->pdev);
13577 + rx_ring->netdev = adapter->netdev;
13578 + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
13579 + rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
13580 + rx_ring->numa_node = adapter->node;
13582 + err = ixgbe_setup_rx_resources(rx_ring);
13588 + rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
13589 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
13591 + ixgbe_configure_rx_ring(adapter, rx_ring);
13593 + rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
13594 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
13599 + ixgbe_free_desc_rings(adapter);
13603 +static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
13605 + struct ixgbe_hw *hw = &adapter->hw;
13608 + /* right now we only support MAC loopback in the driver */
13609 + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
13610 + /* Setup MAC loopback */
13611 + reg_data |= IXGBE_HLREG0_LPBK;
13612 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
13614 + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
13615 + reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
13616 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
13618 + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
13619 + reg_data &= ~IXGBE_AUTOC_LMS_MASK;
13620 + reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
13621 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
13622 + IXGBE_WRITE_FLUSH(&adapter->hw);
13625 + /* Disable Atlas Tx lanes; re-enabled in reset path */
13626 + if (hw->mac.type == ixgbe_mac_82598EB) {
13629 + ixgbe_read_analog_reg8(&adapter->hw,
13630 + IXGBE_ATLAS_PDN_LPBK, &atlas);
13631 + atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
13632 + ixgbe_write_analog_reg8(&adapter->hw,
13633 + IXGBE_ATLAS_PDN_LPBK, atlas);
13635 + ixgbe_read_analog_reg8(&adapter->hw,
13636 + IXGBE_ATLAS_PDN_10G, &atlas);
13637 + atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
13638 + ixgbe_write_analog_reg8(&adapter->hw,
13639 + IXGBE_ATLAS_PDN_10G, atlas);
13641 + ixgbe_read_analog_reg8(&adapter->hw,
13642 + IXGBE_ATLAS_PDN_1G, &atlas);
13643 + atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
13644 + ixgbe_write_analog_reg8(&adapter->hw,
13645 + IXGBE_ATLAS_PDN_1G, atlas);
13647 + ixgbe_read_analog_reg8(&adapter->hw,
13648 + IXGBE_ATLAS_PDN_AN, &atlas);
13649 + atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
13650 + ixgbe_write_analog_reg8(&adapter->hw,
13651 + IXGBE_ATLAS_PDN_AN, atlas);
13657 +static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
13661 + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
13662 + reg_data &= ~IXGBE_HLREG0_LPBK;
13663 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
13666 +static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
13667 + unsigned int frame_size)
13669 + memset(skb->data, 0xFF, frame_size);
13670 + frame_size &= ~1;
13671 + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
13672 + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
13673 + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
13676 +static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
13677 + unsigned int frame_size)
13679 + frame_size &= ~1;
13680 + if (*(skb->data + 3) == 0xFF) {
13681 + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
13682 + (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
13689 +static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
13690 + struct ixgbe_ring *tx_ring,
13691 + unsigned int size)
13693 + union ixgbe_adv_rx_desc *rx_desc;
13694 + struct ixgbe_rx_buffer *rx_buffer_info;
13695 + struct ixgbe_tx_buffer *tx_buffer_info;
13696 + const int bufsz = rx_ring->rx_buf_len;
13698 + u16 rx_ntc, tx_ntc, count = 0;
13700 + /* initialize next to clean and descriptor values */
13701 + rx_ntc = rx_ring->next_to_clean;
13702 + tx_ntc = tx_ring->next_to_clean;
13703 + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
13704 + staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
13706 + while (staterr & IXGBE_RXD_STAT_DD) {
13707 + /* check Rx buffer */
13708 + rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
13710 + /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
13711 + dma_unmap_single(rx_ring->dev,
13712 + rx_buffer_info->dma,
13714 + DMA_FROM_DEVICE);
13715 + rx_buffer_info->dma = 0;
13717 + /* verify contents of skb */
13718 + if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
13721 + /* unmap buffer on Tx side */
13722 + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
13723 + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
13725 + /* increment Rx/Tx next to clean counters */
13727 + if (rx_ntc == rx_ring->count)
13730 + if (tx_ntc == tx_ring->count)
13733 + /* fetch next descriptor */
13734 + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
13735 + staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
13738 + /* re-map buffers to ring, store next to clean values */
13739 + ixgbe_alloc_rx_buffers(rx_ring, count);
13740 + rx_ring->next_to_clean = rx_ntc;
13741 + tx_ring->next_to_clean = tx_ntc;
13746 +static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
13748 + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
13749 + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
13750 + int i, j, lc, good_cnt, ret_val = 0;
13751 + unsigned int size = 1024;
13752 + netdev_tx_t tx_ret_val;
13753 + struct sk_buff *skb;
13755 + /* allocate test skb */
13756 + skb = alloc_skb(size, GFP_KERNEL);
13760 + /* place data into test skb */
13761 + ixgbe_create_lbtest_frame(skb, size);
13762 + skb_put(skb, size);
13765 + * Calculate the loop count based on the largest descriptor ring
13766 + * The idea is to wrap the largest ring a number of times using 64
13767 + * send/receive pairs during each loop
13770 + if (rx_ring->count <= tx_ring->count)
13771 + lc = ((tx_ring->count / 64) * 2) + 1;
13773 + lc = ((rx_ring->count / 64) * 2) + 1;
13775 + for (j = 0; j <= lc; j++) {
13776 + /* reset count of good packets */
13779 + /* place 64 packets on the transmit queue*/
13780 + for (i = 0; i < 64; i++) {
13782 + tx_ret_val = ixgbe_xmit_frame_ring(skb,
13785 + if (tx_ret_val == NETDEV_TX_OK)
13789 + if (good_cnt != 64) {
13794 + /* allow 200 milliseconds for packets to go from Tx to Rx */
13797 + good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
13798 + if (good_cnt != 64) {
13804 + /* free the original skb */
13810 +static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
13812 + *data = ixgbe_setup_desc_rings(adapter);
13815 + *data = ixgbe_setup_loopback_test(adapter);
13817 + goto err_loopback;
13818 + *data = ixgbe_run_loopback_test(adapter);
13819 + ixgbe_loopback_cleanup(adapter);
13822 + ixgbe_free_desc_rings(adapter);
13827 +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
13828 +static int ixgbe_diag_test_count(struct net_device *netdev)
13830 + return IXGBE_TEST_LEN;
13833 +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
13834 +static void ixgbe_diag_test(struct net_device *netdev,
13835 + struct ethtool_test *eth_test, u64 *data)
13837 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
13838 + bool if_running = netif_running(netdev);
13840 + set_bit(__IXGBE_TESTING, &adapter->state);
13841 + if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
13842 + /* Offline tests */
13844 + DPRINTK(HW, INFO, "offline testing starting\n");
13846 + /* Link test performed before hardware reset so autoneg doesn't
13847 + * interfere with test result */
13848 + if (ixgbe_link_test(adapter, &data[4]))
13849 + eth_test->flags |= ETH_TEST_FL_FAILED;
13851 + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
13853 + for (i = 0; i < adapter->num_vfs; i++) {
13854 + if (adapter->vfinfo[i].clear_to_send) {
13855 + DPRINTK(DRV, WARNING, "Please take "
13856 + "active VFS offline and "
13857 + "restart the adapter before "
13858 + "running NIC diagnostics\n");
13863 + clear_bit(__IXGBE_TESTING,
13864 + &adapter->state);
13865 + goto skip_ol_tests;
13871 + /* indicate we're in test mode */
13872 + dev_close(netdev);
13874 + ixgbe_reset(adapter);
13876 + DPRINTK(HW, INFO, "register testing starting\n");
13877 + if (ixgbe_reg_test(adapter, &data[0]))
13878 + eth_test->flags |= ETH_TEST_FL_FAILED;
13880 + ixgbe_reset(adapter);
13881 + DPRINTK(HW, INFO, "eeprom testing starting\n");
13882 + if (ixgbe_eeprom_test(adapter, &data[1]))
13883 + eth_test->flags |= ETH_TEST_FL_FAILED;
13885 + ixgbe_reset(adapter);
13886 + DPRINTK(HW, INFO, "interrupt testing starting\n");
13887 + if (ixgbe_intr_test(adapter, &data[2]))
13888 + eth_test->flags |= ETH_TEST_FL_FAILED;
13890 + /* If SRIOV or VMDq is enabled then skip MAC
13891 + * loopback diagnostic. */
13892 + if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
13893 + IXGBE_FLAG_VMDQ_ENABLED)) {
13894 + DPRINTK(HW, INFO, "skip MAC loopback diagnostic in VT "
13897 + goto skip_loopback;
13900 + ixgbe_reset(adapter);
13901 + DPRINTK(HW, INFO, "loopback testing starting\n");
13902 + if (ixgbe_loopback_test(adapter, &data[3]))
13903 + eth_test->flags |= ETH_TEST_FL_FAILED;
13906 + ixgbe_reset(adapter);
13908 + clear_bit(__IXGBE_TESTING, &adapter->state);
13910 + dev_open(netdev);
13912 + DPRINTK(HW, INFO, "online testing starting\n");
13913 + /* Online tests */
13914 + if (ixgbe_link_test(adapter, &data[4]))
13915 + eth_test->flags |= ETH_TEST_FL_FAILED;
13917 + /* Online tests aren't run; pass by default */
13923 + clear_bit(__IXGBE_TESTING, &adapter->state);
13926 + msleep_interruptible(4 * 1000);
13929 +static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
13930 + struct ethtool_wolinfo *wol)
13932 + struct ixgbe_hw *hw = &adapter->hw;
13935 + switch(hw->device_id) {
13936 + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
13937 + /* All except this subdevice support WOL */
13938 + if (hw->subsystem_device_id ==
13939 + IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
13940 + wol->supported = 0;
13943 + case IXGBE_DEV_ID_82599_KX4:
13947 + wol->supported = 0;
13953 +static void ixgbe_get_wol(struct net_device *netdev,
13954 + struct ethtool_wolinfo *wol)
13956 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
13958 + wol->supported = WAKE_UCAST | WAKE_MCAST |
13959 + WAKE_BCAST | WAKE_MAGIC;
13960 + wol->wolopts = 0;
13962 + if (ixgbe_wol_exclusion(adapter, wol) ||
13963 + !device_can_wakeup(&adapter->pdev->dev))
13966 + if (adapter->wol & IXGBE_WUFC_EX)
13967 + wol->wolopts |= WAKE_UCAST;
13968 + if (adapter->wol & IXGBE_WUFC_MC)
13969 + wol->wolopts |= WAKE_MCAST;
13970 + if (adapter->wol & IXGBE_WUFC_BC)
13971 + wol->wolopts |= WAKE_BCAST;
13972 + if (adapter->wol & IXGBE_WUFC_MAG)
13973 + wol->wolopts |= WAKE_MAGIC;
13976 +static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
13978 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
13980 + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
13981 + return -EOPNOTSUPP;
13983 + if (ixgbe_wol_exclusion(adapter, wol))
13984 + return wol->wolopts ? -EOPNOTSUPP : 0;
13986 + adapter->wol = 0;
13988 + if (wol->wolopts & WAKE_UCAST)
13989 + adapter->wol |= IXGBE_WUFC_EX;
13990 + if (wol->wolopts & WAKE_MCAST)
13991 + adapter->wol |= IXGBE_WUFC_MC;
13992 + if (wol->wolopts & WAKE_BCAST)
13993 + adapter->wol |= IXGBE_WUFC_BC;
13994 + if (wol->wolopts & WAKE_MAGIC)
13995 + adapter->wol |= IXGBE_WUFC_MAG;
13997 + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
14002 +static int ixgbe_nway_reset(struct net_device *netdev)
14004 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
14006 + if (netif_running(netdev))
14007 + ixgbe_reinit_locked(adapter);
14012 +static int ixgbe_phys_id(struct net_device *netdev, u32 data)
14014 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
14015 + u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL);
14018 + if (!data || data > 300)
14021 + for (i = 0; i < (data * 1000); i += 400) {
14022 + ixgbe_led_on(&adapter->hw, IXGBE_LED_ON);
14023 + msleep_interruptible(200);
14024 + ixgbe_led_off(&adapter->hw, IXGBE_LED_ON);
14025 + msleep_interruptible(200);
14028 + /* Restore LED settings */
14029 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg);
14034 +static int ixgbe_get_coalesce(struct net_device *netdev,
14035 + struct ethtool_coalesce *ec)
14037 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
14039 + ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
14040 +#ifndef CONFIG_IXGBE_NAPI
14041 + ec->rx_max_coalesced_frames_irq = adapter->rx_ring[0]->work_limit;
14044 + /* only valid if in constant ITR mode */
14045 + switch (adapter->rx_itr_setting) {
14047 + /* throttling disabled */
14048 + ec->rx_coalesce_usecs = 0;
14051 + /* dynamic ITR mode */
14052 + ec->rx_coalesce_usecs = 1;
14055 + /* fixed interrupt rate mode */
14056 + ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param;
14060 + /* if in mixed tx/rx queues per vector mode, report only rx settings */
14061 + if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
14064 + /* only valid if in constant ITR mode */
14065 + switch (adapter->tx_itr_setting) {
14067 + /* throttling disabled */
14068 + ec->tx_coalesce_usecs = 0;
14071 + /* dynamic ITR mode */
14072 + ec->tx_coalesce_usecs = 1;
14075 + ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param;
14083 + * this function must be called before setting the new value of
14086 +static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter,
14087 + struct ethtool_coalesce *ec)
14089 + /* check the old value and enable RSC if necessary */
14090 + if ((adapter->rx_itr_setting == 0) &&
14091 + (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
14092 + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
14093 + adapter->netdev->features |= NETIF_F_LRO;
14094 + DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n",
14095 + ec->rx_coalesce_usecs);
14101 +static int ixgbe_set_coalesce(struct net_device *netdev,
14102 + struct ethtool_coalesce *ec)
14104 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
14105 + struct ixgbe_q_vector *q_vector;
14107 + bool need_reset = false;
14109 + /* don't accept tx specific changes if we've got mixed RxTx vectors */
14110 + if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
14111 + && ec->tx_coalesce_usecs)
14114 + if (ec->tx_max_coalesced_frames_irq)
14115 + adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
14117 +#ifndef CONFIG_IXGBE_NAPI
14118 + if (ec->rx_max_coalesced_frames_irq)
14119 + adapter->rx_ring[0]->work_limit = ec->rx_max_coalesced_frames_irq;
14122 + if (ec->rx_coalesce_usecs > 1) {
14124 + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
14125 + max_int = IXGBE_MAX_RSC_INT_RATE;
14127 + max_int = IXGBE_MAX_INT_RATE;
14129 + /* check the limits */
14130 + if ((1000000/ec->rx_coalesce_usecs > max_int) ||
14131 + (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
14134 + /* check the old value and enable RSC if necessary */
14135 + need_reset = ixgbe_reenable_rsc(adapter, ec);
14137 + /* store the value in ints/second */
14138 + adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
14140 + /* static value of interrupt rate */
14141 + adapter->rx_itr_setting = adapter->rx_eitr_param;
14142 + /* clear the lower bit as its used for dynamic state */
14143 + adapter->rx_itr_setting &= ~1;
14144 + } else if (ec->rx_coalesce_usecs == 1) {
14145 + /* check the old value and enable RSC if necessary */
14146 + need_reset = ixgbe_reenable_rsc(adapter, ec);
14148 + /* 1 means dynamic mode */
14149 + adapter->rx_eitr_param = 20000;
14150 + adapter->rx_itr_setting = 1;
14153 + * any other value means disable eitr, which is best
14154 + * served by setting the interrupt rate very high
14156 + adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
14157 + adapter->rx_itr_setting = 0;
14160 + * if hardware RSC is enabled, disable it when
14161 + * setting low latency mode, to avoid errata, assuming
14162 + * that when the user set low latency mode they want
14163 + * it at the cost of anything else
14165 + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
14166 + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
14167 + netdev->features &= ~NETIF_F_LRO;
14168 + DPRINTK(PROBE, INFO,
14169 + "rx-usecs set to 0, disabling RSC\n");
14170 + need_reset = true;
14174 + if (ec->tx_coalesce_usecs > 1) {
14176 + * don't have to worry about max_int as above because
14177 + * tx vectors don't do hardware RSC (an rx function)
14179 + /* check the limits */
14180 + if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
14181 + (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
14184 + /* store the value in ints/second */
14185 + adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs;
14187 + /* static value of interrupt rate */
14188 + adapter->tx_itr_setting = adapter->tx_eitr_param;
14190 + /* clear the lower bit as its used for dynamic state */
14191 + adapter->tx_itr_setting &= ~1;
14192 + } else if (ec->tx_coalesce_usecs == 1) {
14193 + /* 1 means dynamic mode */
14194 + adapter->tx_eitr_param = 10000;
14195 + adapter->tx_itr_setting = 1;
14197 + adapter->tx_eitr_param = IXGBE_MAX_INT_RATE;
14198 + adapter->tx_itr_setting = 0;
14201 + /* MSI/MSIx Interrupt Mode */
14202 + if (adapter->flags &
14203 + (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) {
14204 + int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
14205 + for (i = 0; i < num_vectors; i++) {
14206 + q_vector = adapter->q_vector[i];
14207 + if (q_vector->txr_count && !q_vector->rxr_count)
14209 + q_vector->eitr = adapter->tx_eitr_param;
14211 + /* rx only or mixed */
14212 + q_vector->eitr = adapter->rx_eitr_param;
14213 + ixgbe_write_eitr(q_vector);
14215 + /* Legacy Interrupt Mode */
14217 + q_vector = adapter->q_vector[0];
14218 + q_vector->eitr = adapter->rx_eitr_param;
14219 + ixgbe_write_eitr(q_vector);
14223 + * do reset here at the end to make sure EITR==0 case is handled
14224 + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
14225 + * also locks in RSC enable/disable which requires reset
14227 + if (need_reset) {
14228 + if (netif_running(netdev))
14229 + ixgbe_reinit_locked(adapter);
14231 + ixgbe_reset(adapter);
14237 +#ifdef ETHTOOL_GFLAGS
14238 +static int ixgbe_set_flags(struct net_device *netdev, u32 data)
14240 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
14241 + bool need_reset = false;
14244 + rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
14248 + /* if state changes we need to update adapter->flags and reset */
14249 + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
14251 + * cast both to bool and verify if they are set the same
14252 + * but only enable RSC if itr is non-zero, as
14253 + * itr=0 and RSC are mutually exclusive
14255 + if (((!!(data & ETH_FLAG_LRO)) !=
14256 + (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
14257 + adapter->rx_itr_setting &&
14258 + (adapter->rx_itr_setting <= IXGBE_MAX_RSC_INT_RATE)) {
14259 + adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
14260 + switch (adapter->hw.mac.type) {
14261 + case ixgbe_mac_82599EB:
14262 + need_reset = true;
14267 + } else if (!adapter->rx_itr_setting ||
14268 + (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) {
14269 + netdev->features &= ~NETIF_F_LRO;
14273 +#ifndef IXGBE_NO_LRO
14275 + * Cast both to bool and verify if they are set the same
14276 + * and don't set LRO if RSC enabled.
14278 + if (((!!(data & ETH_FLAG_LRO)) !=
14279 + (!!(adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED))) &&
14280 + (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
14282 + adapter->flags2 ^= IXGBE_FLAG2_SWLRO_ENABLED;
14283 + for (i = 0; i < adapter->num_rx_queues; i++) {
14284 + if (adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED)
14285 + set_ring_lro_enabled(adapter->rx_ring[i]);
14287 + clear_ring_lro_enabled(adapter->rx_ring[i]);
14291 +#endif /* IXGBE_NO_LRO */
14294 +#ifdef NETIF_F_NTUPLE
14296 + * Check if Flow Director n-tuple support was enabled or disabled. If
14297 + * the state changed, we need to reset.
14299 + if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
14300 + (!(data & ETH_FLAG_NTUPLE))) {
14301 + /* turn off Flow Director perfect, set hash and reset */
14303 + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
14304 + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
14305 + for (i = 0; i < adapter->num_tx_queues; i++)
14306 + adapter->tx_ring[i]->atr_sample_rate = 20;
14307 + need_reset = true;
14308 + } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
14309 + (data & ETH_FLAG_NTUPLE)) {
14310 + /* turn off Flow Director hash, enable perfect and reset */
14312 + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
14313 + adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
14314 + for (i = 0; i < adapter->num_tx_queues; i++)
14315 + adapter->tx_ring[i]->atr_sample_rate = 0;
14316 + need_reset = true;
14318 + /* no state change */
14321 +#endif /* NETIF_F_NTUPLE */
14322 + if (need_reset) {
14323 + if (netif_running(netdev))
14324 + ixgbe_reinit_locked(adapter);
14326 + ixgbe_reset(adapter);
14331 +#endif /* ETHTOOL_GFLAGS */
14333 +#ifdef NETIF_F_NTUPLE
14334 +static int ixgbe_set_rx_ntuple(struct net_device *dev,
14335 + struct ethtool_rx_ntuple *cmd)
14337 + struct ixgbe_adapter *adapter = netdev_priv(dev);
14338 + struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
14339 + struct ixgbe_atr_input input_struct;
14340 + struct ixgbe_atr_input_masks input_masks;
14341 + int target_queue;
14343 + if (adapter->hw.mac.type == ixgbe_mac_82598EB)
14344 + return -EOPNOTSUPP;
14347 + * Don't allow programming if the action is a queue greater than
14348 + * the number of online Tx queues.
14350 + if ((fs.action >= adapter->num_tx_queues) ||
14351 + (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
14354 + memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
14355 + memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
14357 + input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
14358 + input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
14359 + input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
14360 + input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
14361 + input_masks.vlan_id_mask = fs.vlan_tag_mask;
14362 + /* only use the lowest 2 bytes for flex bytes */
14363 + input_masks.data_mask = (fs.data_mask & 0xffff);
14365 + switch (fs.flow_type) {
14366 + case TCP_V4_FLOW:
14367 + ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
14369 + case UDP_V4_FLOW:
14370 + ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
14372 + case SCTP_V4_FLOW:
14373 + ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
14379 + /* Mask bits from the inputs based on user-supplied mask */
14380 + ixgbe_atr_set_src_ipv4_82599(&input_struct,
14381 + (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
14382 + ixgbe_atr_set_dst_ipv4_82599(&input_struct,
14383 + (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
14384 + /* 82599 expects these to be byte-swapped for perfect filtering */
14385 + ixgbe_atr_set_src_port_82599(&input_struct,
14386 + ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
14387 + ixgbe_atr_set_dst_port_82599(&input_struct,
14388 + ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
14390 + /* VLAN and Flex bytes are either completely masked or not */
14391 + if (!fs.vlan_tag_mask)
14392 + ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
14394 + if (!input_masks.data_mask)
14395 + /* make sure we only use the first 2 bytes of user data */
14396 + ixgbe_atr_set_flex_byte_82599(&input_struct,
14397 + (fs.data & 0xffff));
14399 + /* determine if we need to drop or route the packet */
14400 + if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
14401 + target_queue = MAX_RX_QUEUES - 1;
14403 + target_queue = fs.action;
14405 + spin_lock(&adapter->fdir_perfect_lock);
14406 + ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
14407 + &input_masks, 0, target_queue);
14408 + spin_unlock(&adapter->fdir_perfect_lock);
14413 +#endif /* NETIF_F_NTUPLE */
14414 +static struct ethtool_ops ixgbe_ethtool_ops = {
14415 + .get_settings = ixgbe_get_settings,
14416 + .set_settings = ixgbe_set_settings,
14417 + .get_drvinfo = ixgbe_get_drvinfo,
14418 + .get_regs_len = ixgbe_get_regs_len,
14419 + .get_regs = ixgbe_get_regs,
14420 + .get_wol = ixgbe_get_wol,
14421 + .set_wol = ixgbe_set_wol,
14422 + .nway_reset = ixgbe_nway_reset,
14423 + .get_link = ethtool_op_get_link,
14424 + .get_eeprom_len = ixgbe_get_eeprom_len,
14425 + .get_eeprom = ixgbe_get_eeprom,
14426 + .set_eeprom = ixgbe_set_eeprom,
14427 + .get_ringparam = ixgbe_get_ringparam,
14428 + .set_ringparam = ixgbe_set_ringparam,
14429 + .get_pauseparam = ixgbe_get_pauseparam,
14430 + .set_pauseparam = ixgbe_set_pauseparam,
14431 + .get_rx_csum = ixgbe_get_rx_csum,
14432 + .set_rx_csum = ixgbe_set_rx_csum,
14433 + .get_tx_csum = ixgbe_get_tx_csum,
14434 + .set_tx_csum = ixgbe_set_tx_csum,
14435 + .get_sg = ethtool_op_get_sg,
14436 + .set_sg = ethtool_op_set_sg,
14437 + .get_msglevel = ixgbe_get_msglevel,
14438 + .set_msglevel = ixgbe_set_msglevel,
14439 +#ifdef NETIF_F_TSO
14440 + .get_tso = ethtool_op_get_tso,
14441 + .set_tso = ixgbe_set_tso,
14443 +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
14444 + .self_test_count = ixgbe_diag_test_count,
14445 +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
14446 + .self_test = ixgbe_diag_test,
14447 + .get_strings = ixgbe_get_strings,
14448 + .phys_id = ixgbe_phys_id,
14449 +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
14450 + .get_stats_count = ixgbe_get_stats_count,
14451 +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
14452 + .get_sset_count = ixgbe_get_sset_count,
14453 +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
14454 + .get_ethtool_stats = ixgbe_get_ethtool_stats,
14455 +#ifdef ETHTOOL_GPERMADDR
14456 + .get_perm_addr = ethtool_op_get_perm_addr,
14458 + .get_coalesce = ixgbe_get_coalesce,
14459 + .set_coalesce = ixgbe_set_coalesce,
14460 +#ifdef ETHTOOL_GFLAGS
14461 + .get_flags = ethtool_op_get_flags,
14462 + .set_flags = ixgbe_set_flags,
14464 +#ifdef NETIF_F_NTUPLE
14465 + .set_rx_ntuple = ixgbe_set_rx_ntuple,
14466 +#endif /* NETIF_F_NTUPLE */
14469 +void ixgbe_set_ethtool_ops(struct net_device *netdev)
14471 + SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
14473 +#endif /* SIOCETHTOOL */
14474 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_fcoe.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_fcoe.c
14475 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_fcoe.c 1969-12-31 19:00:00.000000000 -0500
14476 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_fcoe.c 2010-08-25 17:56:26.000000000 -0400
14478 +/*******************************************************************************
14480 + Intel 10 Gigabit PCI Express Linux driver
14481 + Copyright(c) 1999 - 2010 Intel Corporation.
14483 + This program is free software; you can redistribute it and/or modify it
14484 + under the terms and conditions of the GNU General Public License,
14485 + version 2, as published by the Free Software Foundation.
14487 + This program is distributed in the hope it will be useful, but WITHOUT
14488 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14489 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14492 + You should have received a copy of the GNU General Public License along with
14493 + this program; if not, write to the Free Software Foundation, Inc.,
14494 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
14496 + The full GNU General Public License is included in this distribution in
14497 + the file called "COPYING".
14499 + Contact Information:
14500 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
14501 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
14503 +*******************************************************************************/
14505 +#include "ixgbe.h"
14510 +#include "ixgbe_dcb_82599.h"
14511 +#endif /* CONFIG_DCB */
14512 +#include <linux/if_ether.h>
14513 +#include <scsi/scsi_cmnd.h>
14514 +#include <scsi/scsi_device.h>
14515 +#include <scsi/fc/fc_fs.h>
14516 +#include <scsi/fc/fc_fcoe.h>
14517 +#include <scsi/libfc.h>
14518 +#include <scsi/libfcoe.h>
14521 + * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
14522 + * @rx_desc: advanced rx descriptor
14524 + * Returns : true if it is FCoE pkt
14526 +static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
14530 + p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info);
14531 + if (p & IXGBE_RXDADV_PKTTYPE_ETQF) {
14532 + p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK;
14533 + p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT;
14534 + return p == IXGBE_ETQF_FILTER_FCOE;
14540 + * ixgbe_fcoe_clear_ddp - clear the given ddp context
14541 + * @ddp - ptr to the ixgbe_fcoe_ddp
14546 +static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
14557 + * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
14558 + * @netdev: the corresponding net_device
14559 + * @xid: the xid that corresponding ddp will be freed
14561 + * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
14562 + * and it is expected to be called by ULD, i.e., FCP layer of libfc
14563 + * to release the corresponding ddp context when the I/O is done.
14565 + * Returns : data length already ddp-ed in bytes
14567 +int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
14570 + struct ixgbe_fcoe *fcoe;
14571 + struct ixgbe_adapter *adapter;
14572 + struct ixgbe_fcoe_ddp *ddp;
14575 + goto out_ddp_put;
14577 + if (xid >= IXGBE_FCOE_DDP_MAX)
14578 + goto out_ddp_put;
14580 + adapter = netdev_priv(netdev);
14581 + fcoe = &adapter->fcoe;
14582 + ddp = &fcoe->ddp[xid];
14584 + goto out_ddp_put;
14587 + /* if there an error, force to invalidate ddp context */
14589 + spin_lock_bh(&fcoe->lock);
14590 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
14591 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
14592 + (xid | IXGBE_FCFLTRW_WE));
14593 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
14594 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
14595 + (xid | IXGBE_FCDMARW_WE));
14596 + spin_unlock_bh(&fcoe->lock);
14599 + pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
14600 + DMA_FROM_DEVICE);
14601 + pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
14602 + ixgbe_fcoe_clear_ddp(ddp);
14609 + * ixgbe_fcoe_ddp_get - called to set up ddp context
14610 + * @netdev: the corresponding net_device
14611 + * @xid: the exchange id requesting ddp
14612 + * @sgl: the scatter-gather list for this request
14613 + * @sgc: the number of scatter-gather items
14615 + * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
14616 + * and is expected to be called from ULD, e.g., FCP layer of libfc
14617 + * to set up ddp for the corresponding xid of the given sglist for
14618 + * the corresponding I/O.
14620 + * Returns : 1 for success and 0 for no ddp
14622 +int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
14623 + struct scatterlist *sgl, unsigned int sgc)
14625 + struct ixgbe_adapter *adapter;
14626 + struct ixgbe_hw *hw;
14627 + struct ixgbe_fcoe *fcoe;
14628 + struct ixgbe_fcoe_ddp *ddp;
14629 + struct scatterlist *sg;
14630 + unsigned int i, j, dmacount;
14631 + unsigned int len;
14632 + static const unsigned int bufflen = 4096;
14633 + unsigned int firstoff = 0;
14634 + unsigned int lastsize;
14635 + unsigned int thisoff = 0;
14636 + unsigned int thislen = 0;
14637 + u32 fcbuff, fcdmarw, fcfltrw;
14640 + if (!netdev || !sgl || !sgc)
14643 + adapter = netdev_priv(netdev);
14644 + if (xid >= IXGBE_FCOE_DDP_MAX) {
14645 + DPRINTK(DRV, WARNING, "xid=0x%x out-of-range\n", xid);
14649 + fcoe = &adapter->fcoe;
14650 + if (!fcoe->pool) {
14651 + DPRINTK(DRV, WARNING, "xid=0x%x no ddp pool for fcoe\n", xid);
14655 + ddp = &fcoe->ddp[xid];
14657 + DPRINTK(DRV, ERR, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
14658 + xid, ddp->sgl, ddp->sgc);
14661 + ixgbe_fcoe_clear_ddp(ddp);
14663 + /* setup dma from scsi command sgl */
14664 + dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
14665 + if (dmacount == 0) {
14666 + DPRINTK(DRV, ERR, "xid 0x%x DMA map error\n", xid);
14670 + /* alloc the udl from our ddp pool */
14671 + ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp);
14673 + DPRINTK(DRV, ERR, "failed allocated ddp context\n");
14674 + goto out_noddp_unmap;
14680 + for_each_sg(sgl, sg, dmacount, i) {
14681 + addr = sg_dma_address(sg);
14682 + len = sg_dma_len(sg);
14684 + /* max number of buffers allowed in one DDP context */
14685 + if (j >= IXGBE_BUFFCNT_MAX) {
14686 + DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx "
14687 + "not enough descriptors\n",
14688 + xid, i, j, dmacount, (u64)addr);
14689 + goto out_noddp_free;
14692 + /* get the offset of length of current buffer */
14693 + thisoff = addr & ((dma_addr_t)bufflen - 1);
14694 + thislen = min((bufflen - thisoff), len);
14696 + * all but the 1st buffer (j == 0)
14697 + * must be aligned on bufflen
14699 + if ((j != 0) && (thisoff))
14700 + goto out_noddp_free;
14702 + * all but the last buffer
14703 + * ((i == (dmacount - 1)) && (thislen == len))
14704 + * must end at bufflen
14706 + if (((i != (dmacount - 1)) || (thislen != len))
14707 + && ((thislen + thisoff) != bufflen))
14708 + goto out_noddp_free;
14710 + ddp->udl[j] = (u64)(addr - thisoff);
14711 + /* only the first buffer may have none-zero offset */
14713 + firstoff = thisoff;
14719 + /* only the last buffer may have non-full bufflen */
14720 + lastsize = thisoff + thislen;
14722 + fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
14723 + fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
14724 + fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
14725 + fcbuff |= (IXGBE_FCBUFF_VALID);
14728 + fcdmarw |= IXGBE_FCDMARW_WE;
14729 + fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
14732 + fcfltrw |= IXGBE_FCFLTRW_WE;
14734 + /* program DMA context */
14735 + hw = &adapter->hw;
14736 + spin_lock_bh(&fcoe->lock);
14737 + IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
14738 + IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
14739 + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
14740 + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
14741 + /* program filter context */
14742 + IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
14743 + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
14744 + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
14745 + spin_unlock_bh(&fcoe->lock);
14750 + pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
14751 + ixgbe_fcoe_clear_ddp(ddp);
14754 + pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
14759 + * ixgbe_fcoe_ddp - check ddp status and mark it done
14760 + * @adapter: ixgbe adapter
14761 + * @rx_desc: advanced rx descriptor
14762 + * @skb: the skb holding the received data
14764 + * This checks ddp status.
14766 + * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
14767 + * not passing the skb to ULD, > 0 indicates is the length of data
14770 +int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
14771 + union ixgbe_adv_rx_desc *rx_desc,
14772 + struct sk_buff *skb)
14776 + u32 sterr, fceofe, fcerr, fcstat;
14777 + int rc = -EINVAL;
14778 + struct ixgbe_fcoe *fcoe;
14779 + struct ixgbe_fcoe_ddp *ddp;
14780 + struct fc_frame_header *fh;
14782 + if (!ixgbe_rx_is_fcoe(rx_desc))
14785 + skb->ip_summed = CHECKSUM_UNNECESSARY;
14786 + sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
14787 + fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
14788 + fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
14789 + if (fcerr == IXGBE_FCERR_BADCRC)
14790 + skb->ip_summed = CHECKSUM_NONE;
14792 + if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
14793 + fh = (struct fc_frame_header *)(skb->data +
14794 + sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
14796 + fh = (struct fc_frame_header *)(skb->data +
14797 + sizeof(struct fcoe_hdr));
14798 + fctl = ntoh24(fh->fh_f_ctl);
14799 + if (fctl & FC_FC_EX_CTX)
14800 + xid = be16_to_cpu(fh->fh_ox_id);
14802 + xid = be16_to_cpu(fh->fh_rx_id);
14804 + if (xid >= IXGBE_FCOE_DDP_MAX)
14807 + fcoe = &adapter->fcoe;
14808 + ddp = &fcoe->ddp[xid];
14812 + ddp->err = (fcerr | fceofe);
14816 + fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT);
14818 + /* update length of DDPed data */
14819 + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
14820 + /* unmap the sg list when FCP_RSP is received */
14821 + if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) {
14822 + pci_unmap_sg(adapter->pdev, ddp->sgl,
14823 + ddp->sgc, DMA_FROM_DEVICE);
14827 + /* return 0 to bypass going to ULD for DDPed data */
14828 + if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
14830 + else if (ddp->len)
14839 + * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
14840 + * @tx_ring: tx desc ring
14841 + * @skb: associated skb
14842 + * @tx_flags: tx flags
14843 + * @hdr_len: hdr_len to be returned
14845 + * This sets up large send offload for FCoE
14847 + * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
14849 +int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
14850 + u32 tx_flags, u8 *hdr_len)
14852 + struct fc_frame_header *fh;
14853 + u32 vlan_macip_lens;
14854 + u32 fcoe_sof_eof = 0;
14855 + u32 mss_l4len_idx;
14858 +#ifdef NETIF_F_FSO
14859 + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) {
14860 + dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
14861 + skb_shinfo(skb)->gso_type);
14866 + /* resets the header to point fcoe/fc */
14867 + skb_set_network_header(skb, skb->mac_len);
14868 + skb_set_transport_header(skb, skb->mac_len +
14869 + sizeof(struct fcoe_hdr));
14871 + /* sets up SOF and ORIS */
14872 + sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
14875 + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
14878 + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
14879 + IXGBE_ADVTXD_FCOEF_ORIS;
14884 + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
14887 + dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
14891 + /* the first byte of the last dword is EOF */
14892 + skb_copy_bits(skb, skb->len - 4, &eof, 1);
14893 + /* sets up EOF and ORIE */
14896 + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
14899 + /* lso needs ORIE */
14900 + if (skb_is_gso(skb))
14901 + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
14902 + IXGBE_ADVTXD_FCOEF_ORIE;
14904 + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
14907 + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
14910 + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
14913 + dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
14917 + /* sets up PARINC indicating data offset */
14918 + fh = (struct fc_frame_header *)skb_transport_header(skb);
14919 + if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
14920 + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
14922 + /* include trailer in headlen as it is replicated per frame */
14923 + *hdr_len = sizeof(struct fcoe_crc_eof);
14925 + /* hdr_len includes fc_hdr if FCoE lso is enabled */
14926 + if (skb_is_gso(skb))
14927 + *hdr_len += skb_transport_offset(skb) +
14928 + sizeof(struct fc_frame_header);
14930 + /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
14931 + mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
14932 + mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
14934 + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
14935 + vlan_macip_lens = skb_transport_offset(skb) +
14936 + sizeof(struct fc_frame_header);
14937 + vlan_macip_lens |= (skb_transport_offset(skb) - 4)
14938 + << IXGBE_ADVTXD_MACLEN_SHIFT;
14939 + vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
14941 + /* write context desc */
14942 + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
14943 + IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
14945 + return skb_is_gso(skb);
14949 + * ixgbe_configure_fcoe - configures registers for fcoe at start
14950 + * @adapter: ptr to ixgbe adapter
14952 + * This sets up FCoE related registers
14956 +void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
14958 + int i, fcoe_q, fcoe_i;
14959 + struct ixgbe_hw *hw = &adapter->hw;
14960 + struct ixgbe_fcoe *fcoe = &adapter->fcoe;
14961 + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
14966 +#endif /* CONFIG_DCB */
14967 + /* create the pool for ddp if not created yet */
14968 + if (!fcoe->pool) {
14969 + /* allocate ddp pool */
14970 + fcoe->pool = pci_pool_create("ixgbe_fcoe_ddp",
14971 + adapter->pdev, IXGBE_FCPTR_MAX,
14972 + IXGBE_FCPTR_ALIGN, PAGE_SIZE);
14974 + DPRINTK(DRV, ERR,
14975 + "failed to allocated FCoE DDP pool\n");
14977 + spin_lock_init(&fcoe->lock);
14980 + /* Enable L2 eth type filter for FCoE */
14981 + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
14982 + (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
14983 + /* Enable L2 eth type filter for FIP */
14984 + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
14985 + (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
14986 + if (adapter->ring_feature[RING_F_FCOE].indices) {
14987 + /* Use multiple rx queues for FCoE by redirection table */
14988 + for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
14989 + fcoe_i = f->mask + i % f->indices;
14990 + fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
14991 + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
14992 + IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
14994 + IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
14995 + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
14997 + /* Use single rx queue for FCoE */
14998 + fcoe_i = f->mask;
14999 + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
15000 + IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
15001 + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
15002 + IXGBE_ETQS_QUEUE_EN |
15003 + (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
15005 + /* Enable L2 eth type filter for FIP */
15006 + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
15007 + (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
15008 + /* send FIP frames to the first FCoE queue */
15009 + fcoe_i = f->mask;
15010 + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
15011 + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
15012 + IXGBE_ETQS_QUEUE_EN |
15013 + (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
15015 + IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
15016 + IXGBE_FCRXCTRL_FCOELLI |
15017 + IXGBE_FCRXCTRL_FCCRCBO |
15018 + (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
15021 + up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
15022 + for (i = 0; i < MAX_USER_PRIORITY; i++) {
15023 + tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
15024 + tc &= (MAX_TRAFFIC_CLASS - 1);
15025 + if (fcoe->tc == tc) {
15030 +#endif /* CONFIG_DCB */
15034 + * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
15035 + * @adapter : ixgbe adapter
15037 + * Cleans up outstanding ddp context resources
15041 +void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
15044 + struct ixgbe_fcoe *fcoe = &adapter->fcoe;
15046 + /* release ddp resource */
15047 + if (fcoe->pool) {
15048 + for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
15049 + ixgbe_fcoe_ddp_put(adapter->netdev, i);
15050 + pci_pool_destroy(fcoe->pool);
15051 + fcoe->pool = NULL;
15055 +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE
15057 + * ixgbe_fcoe_enable - turn on FCoE offload feature
15058 + * @netdev: the corresponding netdev
15060 + * Turns on FCoE offload feature in 82599.
15062 + * Returns : 0 indicates success or -EINVAL on failure
15064 +int ixgbe_fcoe_enable(struct net_device *netdev)
15066 + int rc = -EINVAL;
15067 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
15068 + struct ixgbe_fcoe *fcoe = &adapter->fcoe;
15071 + if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
15074 + atomic_inc(&fcoe->refcnt);
15075 + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
15078 + DPRINTK(DRV, INFO, "Enabling FCoE offload features.\n");
15079 + if (netif_running(netdev))
15080 + netdev->netdev_ops->ndo_stop(netdev);
15082 + ixgbe_clear_interrupt_scheme(adapter);
15084 + adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
15085 + adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
15086 + netdev->features |= NETIF_F_FCOE_CRC;
15087 + netdev->features |= NETIF_F_FSO;
15088 + netdev->features |= NETIF_F_FCOE_MTU;
15089 + netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
15091 + ixgbe_init_interrupt_scheme(adapter);
15092 + netdev_features_change(netdev);
15094 + if (netif_running(netdev))
15095 + netdev->netdev_ops->ndo_open(netdev);
15103 + * ixgbe_fcoe_disable - turn off FCoE offload feature
15104 + * @netdev: the corresponding netdev
15106 + * Turns off FCoE offload feature in 82599.
15108 + * Returns : 0 indicates success or -EINVAL on failure
15110 +int ixgbe_fcoe_disable(struct net_device *netdev)
15112 + int rc = -EINVAL;
15113 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
15114 + struct ixgbe_fcoe *fcoe = &adapter->fcoe;
15116 + if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) ||
15117 + !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
15118 + goto out_disable;
15120 + if (!atomic_dec_and_test(&fcoe->refcnt))
15121 + goto out_disable;
15123 + DPRINTK(DRV, INFO, "Disabling FCoE offload features.\n");
15124 + netdev->features &= ~NETIF_F_FCOE_CRC;
15125 + netdev->features &= ~NETIF_F_FSO;
15126 + netdev->features &= ~NETIF_F_FCOE_MTU;
15127 + netdev->fcoe_ddp_xid = 0;
15128 + netdev_features_change(netdev);
15130 + if (netif_running(netdev))
15131 + netdev->netdev_ops->ndo_stop(netdev);
15133 + ixgbe_clear_interrupt_scheme(adapter);
15134 + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
15135 + adapter->ring_feature[RING_F_FCOE].indices = 0;
15136 + ixgbe_cleanup_fcoe(adapter);
15137 + ixgbe_init_interrupt_scheme(adapter);
15139 + if (netif_running(netdev))
15140 + netdev->netdev_ops->ndo_open(netdev);
15146 +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
15149 +#ifdef HAVE_DCBNL_OPS_GETAPP
15151 + * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE
15152 + * @adapter : ixgbe adapter
15154 + * Finds out the corresponding user priority bitmap from the current
15155 + * traffic class that FCoE belongs to. Returns 0 as the invalid user
15156 + * priority bitmap to indicate an error.
15158 + * Returns : 802.1p user priority bitmap for FCoE
15160 +u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter)
15162 + return 1 << adapter->fcoe.up;
15166 + * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
15167 + * @adapter : ixgbe adapter
15168 + * @up : 802.1p user priority bitmap
15170 + * Finds out the traffic class from the input user priority
15171 + * bitmap for FCoE.
15173 + * Returns : 0 on success otherwise returns 1 on error
15175 +u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
15180 + /* valid user priority bitmap must not be 0 */
15182 + /* from user priority to the corresponding traffic class */
15183 + up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
15184 + for (i = 0; i < MAX_USER_PRIORITY; i++) {
15185 + if (up & (1 << i)) {
15186 + up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT);
15187 + up2tc &= (MAX_TRAFFIC_CLASS - 1);
15188 + adapter->fcoe.tc = (u8)up2tc;
15189 + adapter->fcoe.up = i;
15197 +#endif /* HAVE_DCBNL_OPS_GETAPP */
15198 +#endif /* CONFIG_DCB */
15200 +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN
15202 + * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
15203 + * @netdev : ixgbe adapter
15204 + * @wwn : the world wide name
15205 + * @type: the type of world wide name
15207 + * Returns the node or port world wide name if both the prefix and the san
15208 + * mac address are valid, then the wwn is formed based on the NAA-2 for
15209 + * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
15211 + * Returns : 0 on success
15213 +int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
15215 + int rc = -EINVAL;
15216 + u16 prefix = 0xffff;
15217 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
15218 + struct ixgbe_mac_info *mac = &adapter->hw.mac;
15221 + case NETDEV_FCOE_WWNN:
15222 + prefix = mac->wwnn_prefix;
15224 + case NETDEV_FCOE_WWPN:
15225 + prefix = mac->wwpn_prefix;
15231 + if ((prefix != 0xffff) &&
15232 + is_valid_ether_addr(mac->san_addr)) {
15233 + *wwn = ((u64) prefix << 48) |
15234 + ((u64) mac->san_addr[0] << 40) |
15235 + ((u64) mac->san_addr[1] << 32) |
15236 + ((u64) mac->san_addr[2] << 24) |
15237 + ((u64) mac->san_addr[3] << 16) |
15238 + ((u64) mac->san_addr[4] << 8) |
15239 + ((u64) mac->san_addr[5]);
15244 +#endif /* HAVE_NETDEV_OPS_FCOE_GETWWN */
15245 +#endif /* IXGBE_FCOE */
15246 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_fcoe.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_fcoe.h
15247 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_fcoe.h 1969-12-31 19:00:00.000000000 -0500
15248 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_fcoe.h 2010-08-25 17:56:26.000000000 -0400
15250 +/*******************************************************************************
15252 + Intel 10 Gigabit PCI Express Linux driver
15253 + Copyright(c) 1999 - 2010 Intel Corporation.
15255 + This program is free software; you can redistribute it and/or modify it
15256 + under the terms and conditions of the GNU General Public License,
15257 + version 2, as published by the Free Software Foundation.
15259 + This program is distributed in the hope it will be useful, but WITHOUT
15260 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15261 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15264 + You should have received a copy of the GNU General Public License along with
15265 + this program; if not, write to the Free Software Foundation, Inc.,
15266 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15268 + The full GNU General Public License is included in this distribution in
15269 + the file called "COPYING".
15271 + Contact Information:
15272 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
15273 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
15275 +*******************************************************************************/
15277 +#ifndef _IXGBE_FCOE_H
15278 +#define _IXGBE_FCOE_H
15282 +#include <scsi/fc/fc_fs.h>
15283 +#include <scsi/fc/fc_fcoe.h>
15285 +/* shift bits within STAT fo FCSTAT */
15286 +#define IXGBE_RXDADV_FCSTAT_SHIFT 4
15288 +/* ddp user buffer */
15289 +#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */
15290 +#define IXGBE_FCPTR_ALIGN 16
15291 +#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t))
15292 +#define IXGBE_FCBUFF_4KB 0x0
15293 +#define IXGBE_FCBUFF_8KB 0x1
15294 +#define IXGBE_FCBUFF_16KB 0x2
15295 +#define IXGBE_FCBUFF_64KB 0x3
15296 +#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */
15297 +#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */
15298 +#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */
15300 +/* Default traffic class to use for FCoE */
15301 +#define IXGBE_FCOE_DEFTC 3
15304 +#define IXGBE_FCERR_BADCRC 0x00100000
15305 +#define IXGBE_FCERR_EOFSOF 0x00200000
15306 +#define IXGBE_FCERR_NOFIRST 0x00300000
15307 +#define IXGBE_FCERR_OOOSEQ 0x00400000
15308 +#define IXGBE_FCERR_NODMA 0x00500000
15309 +#define IXGBE_FCERR_PKTLOST 0x00600000
15311 +struct ixgbe_fcoe_ddp {
15314 + unsigned int sgc;
15315 + struct scatterlist *sgl;
15320 +struct ixgbe_fcoe {
15325 + struct pci_pool *pool;
15326 + struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
15328 +#endif /* IXGBE_FCOE */
15330 +#endif /* _IXGBE_FCOE_H */
15331 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe.h
15332 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe.h 1969-12-31 19:00:00.000000000 -0500
15333 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe.h 2010-08-25 17:56:26.000000000 -0400
15335 +/*******************************************************************************
15337 + Intel 10 Gigabit PCI Express Linux driver
15338 + Copyright(c) 1999 - 2010 Intel Corporation.
15340 + This program is free software; you can redistribute it and/or modify it
15341 + under the terms and conditions of the GNU General Public License,
15342 + version 2, as published by the Free Software Foundation.
15344 + This program is distributed in the hope it will be useful, but WITHOUT
15345 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15346 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15349 + You should have received a copy of the GNU General Public License along with
15350 + this program; if not, write to the Free Software Foundation, Inc.,
15351 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15353 + The full GNU General Public License is included in this distribution in
15354 + the file called "COPYING".
15356 + Contact Information:
15357 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
15358 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
15360 +*******************************************************************************/
15365 +#ifndef IXGBE_NO_LRO
15366 +#include <net/tcp.h>
15369 +#include <linux/pci.h>
15370 +#include <linux/netdevice.h>
15371 +#include <linux/vmalloc.h>
15373 +#ifdef SIOCETHTOOL
15374 +#include <linux/ethtool.h>
15376 +#ifdef NETIF_F_HW_VLAN_TX
15377 +#include <linux/if_vlan.h>
15379 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
15381 +#include <linux/dca.h>
15383 +#include "ixgbe_dcb.h"
15385 +#include "kcompat.h"
15387 +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
15388 +#define IXGBE_FCOE
15389 +#include "ixgbe_fcoe.h"
15390 +#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
15392 +#include "ixgbe_api.h"
15394 +#define PFX "ixgbe: "
15395 +#define DPRINTK(nlevel, klevel, fmt, args...) \
15396 + ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
15397 + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
15398 + __FUNCTION__ , ## args)))
15400 +/* TX/RX descriptor defines */
15401 +#define IXGBE_DEFAULT_TXD 512
15402 +#define IXGBE_MAX_TXD 4096
15403 +#define IXGBE_MIN_TXD 64
15405 +#define IXGBE_DEFAULT_RXD 512
15406 +#define IXGBE_MAX_RXD 4096
15407 +#define IXGBE_MIN_RXD 64
15410 +/* flow control */
15411 +#define IXGBE_DEFAULT_FCRTL 0x10000
15412 +#define IXGBE_MIN_FCRTL 0x40
15413 +#define IXGBE_MAX_FCRTL 0x7FF80
15414 +#define IXGBE_DEFAULT_FCRTH 0x20000
15415 +#define IXGBE_MIN_FCRTH 0x600
15416 +#define IXGBE_MAX_FCRTH 0x7FFF0
15417 +#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
15418 +#define IXGBE_MIN_FCPAUSE 0
15419 +#define IXGBE_MAX_FCPAUSE 0xFFFF
15421 +/* Supported Rx Buffer Sizes */
15422 +#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
15423 +#define IXGBE_RXBUFFER_2048 2048
15424 +#define IXGBE_RXBUFFER_4096 4096
15425 +#define IXGBE_RXBUFFER_8192 8192
15426 +#define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */
15429 + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
15430 + * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
15431 + * this adds up to 512 bytes of extra data meaning the smallest allocation
15432 + * we could have is 1K.
15433 + * i.e. RXBUFFER_512 --> size-1024 slab
15435 +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
15437 +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
15439 +/* Maxium string size for the PBA string from the eeprom */
15440 +#define IXGBE_PBA_LEN 20
15442 +/* How many Rx Buffers do we bundle into one write to the hardware ? */
15443 +#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
15445 +#define IXGBE_TX_FLAGS_CSUM (u32)(1)
15446 +#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
15447 +#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
15448 +#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
15449 +#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
15450 +#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
15451 +#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
15452 +#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
15453 +#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
15454 +#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
15456 +#define IXGBE_MAX_RX_DESC_POLL 10
15458 +#define IXGBE_MAX_RSC_INT_RATE 162760
15460 +#define IXGBE_MAX_VF_MC_ENTRIES 30
15461 +#define IXGBE_MAX_VF_FUNCTIONS 64
15462 +#define IXGBE_MAX_VFTA_ENTRIES 128
15463 +#define MAX_EMULATION_MAC_ADDRS 16
15465 +#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
15467 + u32 current_counter = IXGBE_READ_REG(hw, reg); \
15468 + if (current_counter < last_counter) \
15469 + counter += 0x100000000LL; \
15470 + last_counter = current_counter; \
15471 + counter &= 0xFFFFFFFF00000000LL; \
15472 + counter |= current_counter; \
15475 +#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
15477 + u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
15478 + u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
15479 + u64 current_counter = (current_counter_msb << 32) | \
15480 + current_counter_lsb; \
15481 + if (current_counter < last_counter) \
15482 + counter += 0x1000000000LL; \
15483 + last_counter = current_counter; \
15484 + counter &= 0xFFFFFFF000000000LL; \
15485 + counter |= current_counter; \
15496 +struct vf_data_storage {
15497 + unsigned char vf_mac_addresses[ETH_ALEN];
15498 + u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
15499 + u16 num_vf_mc_hashes;
15500 + u16 default_vf_vlan_id;
15501 + u16 vlans_enabled;
15502 + bool clear_to_send;
15503 + struct vf_stats vfstats;
15504 + struct vf_stats last_vfstats;
15505 + struct vf_stats saved_rst_vfstats;
15507 + u16 pf_vlan; /* When set, guest VLAN config not allowed. */
15511 +#ifndef IXGBE_NO_LRO
15512 +#define IXGBE_LRO_MAX 32 /*Maximum number of LRO descriptors*/
15513 +#define IXGBE_LRO_GLOBAL 10
15515 +struct ixgbe_lro_stats {
15521 +struct ixgbe_lro_desc {
15522 + struct hlist_node lro_node;
15523 + struct sk_buff *skb;
15541 +struct ixgbe_lro_list {
15542 + struct hlist_head active;
15543 + struct hlist_head free;
15545 + struct ixgbe_lro_stats stats;
15548 +#endif /* IXGBE_NO_LRO */
15549 +/* wrapper around a pointer to a socket buffer,
15550 + * so a DMA handle can be stored along with the buffer */
15551 +struct ixgbe_tx_buffer {
15552 + struct sk_buff *skb;
15554 + unsigned long time_stamp;
15556 + u16 next_to_watch;
15557 + unsigned int bytecount;
15559 + u8 mapped_as_page;
15562 +struct ixgbe_rx_buffer {
15563 + struct sk_buff *skb;
15565 + struct page *page;
15566 + dma_addr_t page_dma;
15567 + unsigned int page_offset;
15570 +struct ixgbe_queue_stats {
15575 +struct ixgbe_tx_queue_stats {
15576 + u64 restart_queue;
15580 +struct ixgbe_rx_queue_stats {
15583 + u64 non_eop_descs;
15584 + u64 alloc_rx_page_failed;
15585 + u64 alloc_rx_buff_failed;
15588 +enum ixbge_ring_state_t {
15589 + __IXGBE_TX_FDIR_INIT_DONE,
15590 + __IXGBE_TX_DETECT_HANG,
15591 + __IXGBE_RX_PS_ENABLED,
15592 + __IXGBE_RX_RSC_ENABLED,
15593 +#ifndef IXGBE_NO_LRO
15594 + __IXGBE_RX_LRO_ENABLED,
15598 +#define ring_is_ps_enabled(ring) \
15599 + test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
15600 +#define set_ring_ps_enabled(ring) \
15601 + set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
15602 +#define clear_ring_ps_enabled(ring) \
15603 + clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
15604 +#define check_for_tx_hang(ring) \
15605 + test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
15606 +#define set_check_for_tx_hang(ring) \
15607 + set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
15608 +#define clear_check_for_tx_hang(ring) \
15609 + clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
15610 +#define ring_is_rsc_enabled(ring) \
15611 + test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
15612 +#define set_ring_rsc_enabled(ring) \
15613 + set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
15614 +#define clear_ring_rsc_enabled(ring) \
15615 + clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
15616 +#ifndef IXGBE_NO_LRO
15617 +#define ring_is_lro_enabled(ring) \
15618 + test_bit(__IXGBE_RX_LRO_ENABLED, &(ring)->state)
15619 +#define set_ring_lro_enabled(ring) \
15620 + set_bit(__IXGBE_RX_LRO_ENABLED, &(ring)->state)
15621 +#define clear_ring_lro_enabled(ring) \
15622 + clear_bit(__IXGBE_RX_LRO_ENABLED, &(ring)->state)
15623 +#endif /* IXGBE_NO_LRO */
15624 +struct ixgbe_ring {
15625 + void *desc; /* descriptor ring memory */
15626 + struct device *dev; /* device for dma mapping */
15627 + struct net_device *netdev; /* netdev ring belongs to */
15629 + struct ixgbe_tx_buffer *tx_buffer_info;
15630 + struct ixgbe_rx_buffer *rx_buffer_info;
15632 + unsigned long state;
15633 + u8 atr_sample_rate;
15635 + u16 count; /* amount of descriptors */
15638 + u16 next_to_clean;
15640 + u8 queue_index; /* needed for multiqueue queue management */
15641 + u8 reg_idx; /* holds the special value that gets the
15642 + * hardware register offset associated
15643 + * with this ring, which is different
15644 + * for DCB and RSS modes */
15646 + u16 work_limit; /* max work per interrupt */
15648 + u8 __iomem *tail;
15650 + unsigned int total_bytes;
15651 + unsigned int total_packets;
15653 + struct ixgbe_queue_stats stats;
15655 + struct ixgbe_tx_queue_stats tx_stats;
15656 + struct ixgbe_rx_queue_stats rx_stats;
15659 + unsigned int size; /* length in bytes */
15660 + dma_addr_t dma; /* phys. address of descriptor ring */
15661 + struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
15662 +} ____cacheline_internodealigned_in_smp;
15664 +enum ixgbe_ring_f_enum {
15672 +#endif /* IXGBE_FCOE */
15673 + RING_F_ARRAY_SIZE /* must be last in enum set */
15676 +#define IXGBE_MAX_DCB_INDICES 8
15677 +#define IXGBE_MAX_RSS_INDICES 16
15678 +#define IXGBE_MAX_VMDQ_INDICES 64
15679 +#define IXGBE_MAX_FDIR_INDICES 64
15681 +#define IXGBE_MAX_FCOE_INDICES 8
15682 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
15683 +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
15685 +#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
15686 +#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
15687 +#endif /* IXGBE_FCOE */
15688 +struct ixgbe_ring_feature {
15694 +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
15696 +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
15698 +/* MAX_MSIX_Q_VECTORS of these are allocated,
15699 + * but we only use one per queue-specific vector.
15701 +struct ixgbe_q_vector {
15702 + struct ixgbe_adapter *adapter;
15703 + unsigned int v_idx; /* index of q_vector within array, also used for
15704 + * finding the bit in EICR and friends that
15705 + * represents the vector for this ring */
15706 + int cpu; /* cpu for DCA */
15707 +#ifdef CONFIG_IXGBE_NAPI
15708 + struct napi_struct napi;
15710 + DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
15711 + DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
15712 + u8 rxr_count; /* Rx ring count assigned to this vector */
15713 + u8 txr_count; /* Tx ring count assigned to this vector */
15717 + struct ixgbe_lro_list *lrolist; /* LRO list for queue vector*/
15718 + char name[IFNAMSIZ + 9];
15719 +#ifndef HAVE_NETDEV_NAPI_LIST
15720 + struct net_device poll_dev;
15722 +} ____cacheline_internodealigned_in_smp;
15725 +/* Helper macros to switch between ints/sec and what the register uses.
15726 + * And yes, it's the same math going both ways. The lowest value
15727 + * supported by all of the ixgbe hardware is 8.
15729 +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
15730 + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
15731 +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
15733 +#define IXGBE_DESC_UNUSED(R) \
15734 + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
15735 + (R)->next_to_clean - (R)->next_to_use - 1)
15737 +#define IXGBE_RX_DESC_ADV(R, i) \
15738 + (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
15739 +#define IXGBE_TX_DESC_ADV(R, i) \
15740 + (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
15741 +#define IXGBE_TX_CTXTDESC_ADV(R, i) \
15742 + (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
15744 +#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
15746 +/* use 3K as the baby jumbo frame size for FCoE */
15747 +#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
15748 +#endif /* IXGBE_FCOE */
15750 +#ifdef IXGBE_TCP_TIMER
15751 +#define TCP_TIMER_VECTOR 1
15753 +#define TCP_TIMER_VECTOR 0
15755 +#define OTHER_VECTOR 1
15756 +#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR)
15758 +#define IXGBE_MAX_MSIX_VECTORS_82599 64
15759 +#define IXGBE_MAX_MSIX_Q_VECTORS_82599 64
15760 +#define IXGBE_MAX_MSIX_Q_VECTORS_82598 16
15761 +#define IXGBE_MAX_MSIX_VECTORS_82598 18
15764 + * Only for array allocations in our adapter struct. On 82598, there will be
15765 + * unused entries in the array, but that's not a big deal. Also, in 82599,
15766 + * we can actually assign 64 queue vectors based on our extended-extended
15767 + * interrupt registers. This is different than 82598, which is limited to 16.
15769 +#define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599
15770 +#define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599
15772 +#define MIN_MSIX_Q_VECTORS 2
15773 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
15775 +/* board specific private data structure */
15776 +struct ixgbe_adapter {
15777 + struct timer_list watchdog_timer;
15778 +#ifdef NETIF_F_HW_VLAN_TX
15779 + struct vlan_group *vlgrp;
15782 + struct work_struct reset_task;
15783 + struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
15784 + struct ixgbe_dcb_config dcb_cfg;
15785 + struct ixgbe_dcb_config temp_dcb_cfg;
15786 + u8 dcb_set_bitmap;
15787 + enum ixgbe_fc_mode last_lfc_mode;
15789 + /* Interrupt Throttle Rate */
15790 + u32 rx_itr_setting;
15791 + u32 tx_itr_setting;
15796 + struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
15797 + int num_tx_queues;
15798 + u32 tx_timeout_count;
15800 + u64 restart_queue;
15804 + struct ixgbe_ring *rx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
15805 + int num_rx_queues;
15806 + int num_rx_pools; /* == num_rx_queues in 82598 */
15807 + int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
15808 + u64 hw_csum_rx_error;
15809 + u64 hw_rx_no_dma_resources;
15810 + u64 non_eop_descs;
15811 +#ifndef CONFIG_IXGBE_NAPI
15812 + u64 rx_dropped_backlog; /* count drops from rx intr handler */
15814 + int num_msix_vectors;
15815 + int max_msix_q_vectors; /* true count of q_vectors for device */
15816 + struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
15817 + struct msix_entry *msix_entries;
15818 +#ifdef IXGBE_TCP_TIMER
15819 + irqreturn_t (*msix_handlers[MAX_MSIX_COUNT])(int irq, void *data,
15820 + struct pt_regs *regs);
15823 + u32 alloc_rx_page_failed;
15824 + u32 alloc_rx_buff_failed;
15826 + /* Some features need tri-state capability,
15827 + * thus the additional *_CAPABLE flags.
15830 +#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
15831 +#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
15832 +#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
15833 +#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
15834 +#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
15835 +#ifndef IXGBE_NO_LLI
15836 +#define IXGBE_FLAG_LLI_PUSH (u32)(1 << 5)
15838 +#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
15839 +#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
15840 +#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
15841 +#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
15842 +#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
15843 +#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
15844 +#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 12)
15845 +#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
15846 +#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
15847 +#define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 15)
15848 +#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
15849 +#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
15850 +#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
15851 +#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
15852 +#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
15853 +#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
15854 +#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24)
15855 +#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25)
15856 +#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26)
15857 +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
15859 +#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28)
15860 +#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
15861 +#endif /* IXGBE_FCOE */
15862 +#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 30)
15863 +#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 31)
15866 +#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
15867 +#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
15868 +#define IXGBE_FLAG2_SWLRO_ENABLED (u32)(1 << 2)
15869 +#define IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE (u32)(1 << 3)
15870 +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 5)
15872 +/* default to trying for four seconds */
15873 +#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
15875 + /* OS defined structs */
15876 + struct net_device *netdev;
15877 + struct pci_dev *pdev;
15878 + struct net_device_stats net_stats;
15879 +#ifndef IXGBE_NO_LRO
15880 + struct ixgbe_lro_stats lro_stats;
15883 +#ifdef ETHTOOL_TEST
15885 + struct ixgbe_ring test_tx_ring;
15886 + struct ixgbe_ring test_rx_ring;
15889 + /* structs defined in ixgbe_hw.h */
15890 + struct ixgbe_hw hw;
15892 + struct ixgbe_hw_stats stats;
15893 +#ifndef IXGBE_NO_LLI
15898 + u32 lli_vlan_pri;
15899 +#endif /* IXGBE_NO_LLI */
15900 + /* Interrupt Throttle Rate */
15901 + u32 rx_eitr_param;
15902 + u32 tx_eitr_param;
15904 + unsigned long state;
15905 + u32 *config_space;
15907 + unsigned int tx_ring_count;
15908 + unsigned int rx_ring_count;
15912 + unsigned long link_check_timeout;
15914 + struct work_struct watchdog_task;
15915 + struct work_struct sfp_task;
15916 + struct timer_list sfp_timer;
15917 + struct work_struct multispeed_fiber_task;
15918 + struct work_struct sfp_config_module_task;
15920 + u32 fdir_pballoc;
15921 + u32 atr_sample_rate;
15922 + spinlock_t fdir_perfect_lock;
15923 + struct work_struct fdir_reinit_task;
15925 + struct ixgbe_fcoe fcoe;
15926 +#endif /* IXGBE_FCOE */
15927 + u64 rsc_total_count;
15928 + u64 rsc_total_flush;
15930 + u16 eeprom_version;
15931 + bool netdev_registered;
15932 + char lsc_int_name[IFNAMSIZ + 9];
15933 +#ifdef IXGBE_TCP_TIMER
15934 + char tcp_timer_name[IFNAMSIZ + 9];
15936 + struct work_struct check_overtemp_task;
15937 + u32 interrupt_event;
15939 + DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
15940 + unsigned int num_vfs;
15941 + bool repl_enable;
15942 + bool l2switch_enable;
15943 + struct vf_data_storage *vfinfo;
15948 +enum ixbge_state_t {
15950 + __IXGBE_RESETTING,
15952 + __IXGBE_SFP_MODULE_NOT_FOUND
15955 +struct ixgbe_rsc_cb {
15958 + bool delay_unmap;
15960 +#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
15962 +extern struct dcbnl_rtnl_ops dcbnl_ops;
15963 +extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
15964 + struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max);
15966 +/* needed by ixgbe_main.c */
15967 +extern int ixgbe_validate_mac_addr(u8 *mc_addr);
15968 +extern void ixgbe_check_options(struct ixgbe_adapter *adapter);
15969 +extern void ixgbe_assign_netdev_ops(struct net_device *netdev);
15971 +/* needed by ixgbe_ethtool.c */
15972 +extern char ixgbe_driver_name[];
15973 +extern const char ixgbe_driver_version[];
15975 +extern int ixgbe_up(struct ixgbe_adapter *adapter);
15976 +extern void ixgbe_down(struct ixgbe_adapter *adapter);
15977 +extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
15978 +extern void ixgbe_reset(struct ixgbe_adapter *adapter);
15979 +extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
15980 +extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
15981 +extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
15982 +extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
15983 +extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
15984 +extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
15985 +extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
15986 +extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
15987 +extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
15988 +extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
15989 +extern bool ixgbe_is_ixgbe(struct pci_dev *pcidev);
15990 +extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
15991 + struct ixgbe_adapter *,
15992 + struct ixgbe_ring *);
15993 +extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
15994 + struct ixgbe_tx_buffer *);
15995 +extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
15996 +extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
15997 +extern void clear_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
15998 +extern void ixgbe_set_rx_mode(struct net_device *netdev);
15999 +extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
16000 +extern void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector);
16001 +#ifdef ETHTOOL_OPS_COMPAT
16002 +extern int ethtool_ioctl(struct ifreq *ifr);
16004 +extern int ixgbe_dcb_netlink_register(void);
16005 +extern int ixgbe_dcb_netlink_unregister(void);
16009 +extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
16010 +extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
16011 + u32 tx_flags, u8 *hdr_len);
16012 +extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
16013 +extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
16014 + union ixgbe_adv_rx_desc *rx_desc,
16015 + struct sk_buff *skb);
16016 +extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
16017 + struct scatterlist *sgl, unsigned int sgc);
16018 +extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
16019 +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE
16020 +extern int ixgbe_fcoe_enable(struct net_device *netdev);
16021 +extern int ixgbe_fcoe_disable(struct net_device *netdev);
16022 +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
16024 +#ifdef HAVE_DCBNL_OPS_GETAPP
16025 +extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
16026 +extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
16027 +#endif /* HAVE_DCBNL_OPS_GETAPP */
16028 +#endif /* CONFIG_DCB */
16029 +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN
16030 +extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
16032 +#endif /* IXGBE_FCOE */
16035 +#endif /* _IXGBE_H_ */
16036 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_main.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_main.c
16037 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_main.c 1969-12-31 19:00:00.000000000 -0500
16038 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_main.c 2010-08-25 17:56:26.000000000 -0400
16040 +/*******************************************************************************
16042 + Intel 10 Gigabit PCI Express Linux driver
16043 + Copyright(c) 1999 - 2010 Intel Corporation.
16045 + This program is free software; you can redistribute it and/or modify it
16046 + under the terms and conditions of the GNU General Public License,
16047 + version 2, as published by the Free Software Foundation.
16049 + This program is distributed in the hope it will be useful, but WITHOUT
16050 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16051 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16054 + You should have received a copy of the GNU General Public License along with
16055 + this program; if not, write to the Free Software Foundation, Inc.,
16056 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16058 + The full GNU General Public License is included in this distribution in
16059 + the file called "COPYING".
16061 + Contact Information:
16062 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
16063 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
16065 +*******************************************************************************/
16068 +/******************************************************************************
16069 + Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
16070 +******************************************************************************/
16071 +#include <linux/types.h>
16072 +#include <linux/module.h>
16073 +#include <linux/pci.h>
16074 +#include <linux/netdevice.h>
16075 +#include <linux/vmalloc.h>
16076 +#include <linux/string.h>
16077 +#include <linux/in.h>
16078 +#include <linux/ip.h>
16079 +#include <linux/tcp.h>
16080 +#include <linux/pkt_sched.h>
16081 +#include <linux/ipv6.h>
16082 +#ifdef NETIF_F_TSO
16083 +#include <net/checksum.h>
16084 +#ifdef NETIF_F_TSO6
16085 +#include <net/ip6_checksum.h>
16088 +#ifdef SIOCETHTOOL
16089 +#include <linux/ethtool.h>
16091 +#ifdef NETIF_F_HW_VLAN_TX
16092 +#include <linux/if_vlan.h>
16096 +#include "ixgbe.h"
16098 +#include "ixgbe_sriov.h"
16100 +char ixgbe_driver_name[] = "ixgbe";
16101 +static const char ixgbe_driver_string[] =
16102 + "Intel(R) 10 Gigabit PCI Express Network Driver";
16103 +#define DRV_HW_PERF
16105 +#ifndef CONFIG_IXGBE_NAPI
16106 +#define DRIVERNAPI
16108 +#define DRIVERNAPI "-NAPI"
16113 +#define DRV_VERSION "2.1.4" DRIVERNAPI DRV_HW_PERF FPGA
16114 +const char ixgbe_driver_version[] = DRV_VERSION;
16115 +static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
16116 +/* ixgbe_pci_tbl - PCI Device ID Table
16118 + * Wildcard entries (PCI_ANY_ID) should come last
16119 + * Last entry must be all 0s
16121 + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
16122 + * Class, Class Mask, private data (not used) }
16124 +static struct pci_device_id ixgbe_pci_tbl[] = {
16125 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598)},
16126 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_BX)},
16127 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)},
16128 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)},
16129 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT)},
16130 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT2)},
16131 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4)},
16132 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)},
16133 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)},
16134 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)},
16135 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR)},
16136 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)},
16137 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4)},
16138 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)},
16139 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KR)},
16140 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP)},
16141 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_EM)},
16142 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ)},
16143 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_T3_LOM)},
16144 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_CX4)},
16145 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE)},
16146 + /* required last entry */
16149 +MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
16151 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
16152 +static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
16154 +static struct notifier_block dca_notifier = {
16155 + .notifier_call = ixgbe_notify_dca,
16161 +MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
16162 +MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
16163 +MODULE_LICENSE("GPL");
16164 +MODULE_VERSION(DRV_VERSION);
16166 +#define DEFAULT_DEBUG_LEVEL_SHIFT 3
16168 +static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
16170 + struct ixgbe_hw *hw = &adapter->hw;
16175 +#ifdef CONFIG_PCI_IOV
16176 + /* disable iov and allow time for transactions to clear */
16177 + pci_disable_sriov(adapter->pdev);
16180 + /* turn off device IOV mode */
16181 + gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
16182 + gcr &= ~(IXGBE_GCR_EXT_SRIOV);
16183 + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
16184 + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
16185 + gpie &= ~IXGBE_GPIE_VTMODE_MASK;
16186 + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
16188 + /* set default pool back to 0 */
16189 + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
16190 + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
16191 + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
16193 + /* take a breather then clean up driver data */
16195 + if (adapter->vfinfo)
16196 + kfree(adapter->vfinfo);
16197 + adapter->vfinfo = NULL;
16199 + adapter->num_vfs = 0;
16200 + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
16203 +static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
16207 + /* Let firmware take over control of h/w */
16208 + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
16209 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
16210 + ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
16213 +static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
16217 + /* Let firmware know the driver has taken over */
16218 + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
16219 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
16220 + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
16224 + * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
16225 + * @adapter: pointer to adapter struct
16226 + * @direction: 0 for Rx, 1 for Tx, -1 for other causes
16227 + * @queue: queue to map the corresponding interrupt to
16228 + * @msix_vector: the vector to map to the corresponding queue
16231 +static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
16232 + u8 queue, u8 msix_vector)
16235 + struct ixgbe_hw *hw = &adapter->hw;
16236 + switch (hw->mac.type) {
16237 + case ixgbe_mac_82598EB:
16238 + msix_vector |= IXGBE_IVAR_ALLOC_VAL;
16239 + if (direction == -1)
16241 + index = (((direction * 64) + queue) >> 2) & 0x1F;
16242 + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
16243 + ivar &= ~(0xFF << (8 * (queue & 0x3)));
16244 + ivar |= (msix_vector << (8 * (queue & 0x3)));
16245 + IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
16247 + case ixgbe_mac_82599EB:
16248 + if (direction == -1) {
16249 + /* other causes */
16250 + msix_vector |= IXGBE_IVAR_ALLOC_VAL;
16251 + index = ((queue & 1) * 8);
16252 + ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
16253 + ivar &= ~(0xFF << index);
16254 + ivar |= (msix_vector << index);
16255 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
16258 + /* tx or rx causes */
16259 + msix_vector |= IXGBE_IVAR_ALLOC_VAL;
16260 + index = ((16 * (queue & 1)) + (8 * direction));
16261 + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
16262 + ivar &= ~(0xFF << index);
16263 + ivar |= (msix_vector << index);
16264 + IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
16272 +static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
16277 + switch (adapter->hw.mac.type) {
16278 + case ixgbe_mac_82598EB:
16279 + mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
16280 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
16282 + case ixgbe_mac_82599EB:
16283 + mask = (qmask & 0xFFFFFFFF);
16284 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
16285 + mask = (qmask >> 32);
16286 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
16293 +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
16294 + struct ixgbe_tx_buffer *tx_buffer_info)
16296 + if (tx_buffer_info->dma) {
16297 + if (tx_buffer_info->mapped_as_page)
16298 + dma_unmap_page(tx_ring->dev,
16299 + tx_buffer_info->dma,
16300 + tx_buffer_info->length,
16303 + dma_unmap_single(tx_ring->dev,
16304 + tx_buffer_info->dma,
16305 + tx_buffer_info->length,
16307 + tx_buffer_info->dma = 0;
16309 + if (tx_buffer_info->skb) {
16310 + dev_kfree_skb_any(tx_buffer_info->skb);
16311 + tx_buffer_info->skb = NULL;
16313 + tx_buffer_info->time_stamp = 0;
16314 + /* tx_buffer_info must be completely set up in the transmit path */
16318 + * ixgbe_tx_xon_state - check the tx ring xon state
16319 + * @adapter: the ixgbe adapter
16320 + * @tx_ring: the corresponding tx_ring
16322 + * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
16323 + * corresponding TC of this tx_ring when checking TFCS.
16325 + * Returns : true if in xon state (currently not paused)
16327 +static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
16328 + struct ixgbe_ring *tx_ring)
16330 + u32 txoff = IXGBE_TFCS_TXOFF;
16332 + if ((adapter->flags & IXGBE_FLAG_DCB_CAPABLE) &&
16333 + adapter->dcb_cfg.pfc_mode_enable) {
16335 + int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
16336 + u8 reg_idx = tx_ring->reg_idx;
16338 + if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
16339 + tc = reg_idx >> 2;
16340 + txoff = IXGBE_TFCS_TXOFF0;
16341 + } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
16343 + txoff = IXGBE_TFCS_TXOFF;
16344 + if (dcb_i == 8) {
16346 + tc = reg_idx >> 5;
16347 + if (tc == 2) /* TC2, TC3 */
16348 + tc += (reg_idx - 64) >> 4;
16349 + else if (tc == 3) /* TC4, TC5, TC6, TC7 */
16350 + tc += 1 + ((reg_idx - 96) >> 3);
16351 + } else if (dcb_i == 4) {
16353 + tc = reg_idx >> 6;
16355 + tc += (reg_idx - 64) >> 5;
16356 + if (tc == 2) /* TC2, TC3 */
16357 + tc += (reg_idx - 96) >> 4;
16363 + return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
16366 +static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
16367 + struct ixgbe_ring *tx_ring,
16368 + unsigned int eop)
16370 + struct ixgbe_hw *hw = &adapter->hw;
16373 + /* Detect a transmit hang in hardware, this serializes the
16374 + * check with the clearing of time_stamp and movement of eop */
16375 + head = IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx));
16376 + tail = IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx));
16377 + clear_check_for_tx_hang(tx_ring);
16378 + if ((head != tail) &&
16379 + tx_ring->tx_buffer_info[eop].time_stamp &&
16380 + time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
16381 + ixgbe_tx_xon_state(adapter, tx_ring)) {
16382 + /* detected Tx unit hang */
16383 + union ixgbe_adv_tx_desc *tx_desc;
16384 + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
16385 + DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
16386 + " Tx Queue <%d>\n"
16387 + " TDH, TDT <%x>, <%x>\n"
16388 + " next_to_use <%x>\n"
16389 + " next_to_clean <%x>\n",
16390 + tx_ring->queue_index, head, tail,
16391 + tx_ring->next_to_use, eop);
16392 + DPRINTK(DRV, ERR, "tx_buffer_info[next_to_clean]\n"
16393 + " time_stamp <%lx>\n"
16394 + " jiffies <%lx>\n",
16395 + tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
16397 + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
16405 +#define IXGBE_MAX_TXD_PWR 14
16406 +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
16408 +/* Tx Descriptors needed, worst case */
16409 +#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
16410 + (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
16411 +#ifdef MAX_SKB_FRAGS
16412 +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
16414 +#define DESC_NEEDED 4
16417 +static void ixgbe_tx_timeout(struct net_device *netdev);
16420 + * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
16421 + * @q_vector: structure containing interrupt and ring information
16422 + * @tx_ring: tx ring to clean
16424 +static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
16425 + struct ixgbe_ring *tx_ring)
16427 + struct ixgbe_adapter *adapter = q_vector->adapter;
16428 + union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
16429 + struct ixgbe_tx_buffer *tx_buffer_info;
16430 + unsigned int total_bytes = 0, total_packets = 0;
16431 + u16 i, eop, count = 0;
16433 + i = tx_ring->next_to_clean;
16434 + eop = tx_ring->tx_buffer_info[i].next_to_watch;
16435 + eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
16437 + while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
16438 + (count < tx_ring->work_limit)) {
16439 + bool cleaned = false;
16440 + rmb(); /* read buffer_info after eop_desc */
16441 + for ( ; !cleaned; count++) {
16442 + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
16443 + tx_buffer_info = &tx_ring->tx_buffer_info[i];
16445 + tx_desc->wb.status = 0;
16446 + cleaned = (i == eop);
16449 + if (i == tx_ring->count)
16452 + if (cleaned && tx_buffer_info->skb) {
16453 + total_bytes += tx_buffer_info->bytecount;
16454 + total_packets += tx_buffer_info->gso_segs;
16457 + ixgbe_unmap_and_free_tx_resource(tx_ring,
16461 + eop = tx_ring->tx_buffer_info[i].next_to_watch;
16462 + eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
16465 + tx_ring->next_to_clean = i;
16466 + tx_ring->total_bytes += total_bytes;
16467 + tx_ring->total_packets += total_packets;
16468 + tx_ring->stats.packets += total_packets;
16469 + tx_ring->stats.bytes += total_bytes;
16472 + if (check_for_tx_hang(tx_ring) &&
16473 + ixgbe_check_tx_hang(adapter, tx_ring, i)) {
16474 + /* schedule immediate reset if we believe we hung */
16475 + DPRINTK(PROBE, INFO,
16476 + "tx hang %d detected, resetting adapter\n",
16477 + adapter->tx_timeout_count + 1);
16478 + ixgbe_tx_timeout(tx_ring->netdev);
16480 + /* the adapter is about to reset, no point in enabling stuff */
16484 +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
16485 + if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
16486 + (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
16487 + /* Make sure that anybody stopping the queue after this
16488 + * sees the new next_to_clean.
16492 + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index)
16493 + && !test_bit(__IXGBE_DOWN, &adapter->state)) {
16494 + netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
16495 + ++tx_ring->tx_stats.restart_queue;
16498 + if (netif_queue_stopped(tx_ring->netdev) &&
16499 + !test_bit(__IXGBE_DOWN, &adapter->state)) {
16500 + netif_wake_queue(tx_ring->netdev);
16501 + ++tx_ring->tx_stats.restart_queue;
16506 +#ifndef CONFIG_IXGBE_NAPI
16507 + /* re-arm the interrupt */
16508 + if ((count >= tx_ring->work_limit) &&
16509 + (!test_bit(__IXGBE_DOWN, &adapter->state)))
16510 + ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
16513 + return (count < tx_ring->work_limit);
16516 +static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
16517 + struct ixgbe_ring *rx_ring,
16520 + struct ixgbe_hw *hw = &adapter->hw;
16522 + u8 reg_idx = rx_ring->reg_idx;
16524 + rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
16525 + switch (hw->mac.type) {
16526 + case ixgbe_mac_82598EB:
16527 + rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
16528 + rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
16530 + case ixgbe_mac_82599EB:
16531 + rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
16532 + rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
16533 + IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
16538 + rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
16539 + rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
16540 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED_DATA) {
16541 + /* just do the header data when in Packet Split mode */
16542 + if (ring_is_ps_enabled(rx_ring))
16543 + rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
16545 + rxctrl |= IXGBE_DCA_RXCTRL_DATA_DCA_EN;
16547 + rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
16548 + rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
16549 + IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
16550 + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
16553 +static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
16554 + struct ixgbe_ring *tx_ring,
16557 + struct ixgbe_hw *hw = &adapter->hw;
16559 + u8 reg_idx = tx_ring->reg_idx;
16561 + switch (hw->mac.type) {
16562 + case ixgbe_mac_82598EB:
16563 + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
16564 + txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
16565 + txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
16566 + txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
16567 + txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
16568 + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
16570 + case ixgbe_mac_82599EB:
16571 + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
16572 + txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
16573 + txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
16574 + IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
16575 + txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
16576 + txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
16577 + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
16584 +static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
16586 + struct ixgbe_adapter *adapter = q_vector->adapter;
16587 + int cpu = get_cpu();
16591 + if (q_vector->cpu == cpu)
16592 + goto out_no_update;
16594 + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
16595 + for (i = 0; i < q_vector->txr_count; i++) {
16596 + ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
16597 + r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
16601 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
16602 + for (i = 0; i < q_vector->rxr_count; i++) {
16603 + ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
16604 + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
16608 + q_vector->cpu = cpu;
16613 +static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
16615 + int num_q_vectors;
16618 + if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
16621 + /* always use CB2 mode, difference is masked in the CB driver */
16622 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
16624 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
16625 + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
16627 + num_q_vectors = 1;
16629 + for (i = 0; i < num_q_vectors; i++) {
16630 + adapter->q_vector[i]->cpu = -1;
16631 + ixgbe_update_dca(adapter->q_vector[i]);
16635 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
16636 +static int __ixgbe_notify_dca(struct device *dev, void *data)
16638 + struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
16639 + unsigned long event = *(unsigned long *)data;
16641 + if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
16645 + case DCA_PROVIDER_ADD:
16646 + /* if we're already enabled, don't do it again */
16647 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
16649 + if (dca_add_requester(dev) == 0) {
16650 + adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
16651 + ixgbe_setup_dca(adapter);
16654 + /* Fall Through since DCA is disabled. */
16655 + case DCA_PROVIDER_REMOVE:
16656 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
16657 + dca_remove_requester(dev);
16658 + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
16659 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
16667 +#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
16669 + * ixgbe_receive_skb - Send a completed packet up the stack
16670 + * @q_vector: structure containing interrupt and ring information
16671 + * @skb: packet to send up
16672 + * @vlan_tag: vlan tag for packet
16674 +static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
16675 + struct sk_buff *skb, u16 vlan_tag)
16677 + struct ixgbe_adapter *adapter = q_vector->adapter;
16678 + int ret = NET_RX_SUCCESS;
16680 +#ifdef CONFIG_IXGBE_NAPI
16681 + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
16682 +#ifdef NETIF_F_HW_VLAN_TX
16683 + if (vlan_tag & VLAN_VID_MASK) {
16684 + if (adapter->vlgrp)
16685 + vlan_gro_receive(&q_vector->napi,
16689 + dev_kfree_skb_any(skb);
16691 + napi_gro_receive(&q_vector->napi, skb);
16694 + napi_gro_receive(&q_vector->napi, skb);
16697 +#endif /* CONFIG_IXGBE_NAPI */
16699 +#ifdef NETIF_F_HW_VLAN_TX
16700 + if (vlan_tag & VLAN_VID_MASK) {
16701 + if (adapter->vlgrp)
16702 + ret = vlan_hwaccel_rx(skb,
16706 + dev_kfree_skb_any(skb);
16708 + ret = netif_rx(skb);
16711 + ret = netif_rx(skb);
16713 +#ifndef CONFIG_IXGBE_NAPI
16714 + if (ret == NET_RX_DROP)
16715 + adapter->rx_dropped_backlog++;
16717 +#ifdef CONFIG_IXGBE_NAPI
16719 +#endif /* CONFIG_IXGBE_NAPI */
16723 + * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
16724 + * @adapter: address of board private structure
16725 + * @rx_desc: current Rx descriptor being processed
16726 + * @skb: skb currently being received and modified
16728 +static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
16729 + union ixgbe_adv_rx_desc *rx_desc,
16730 + struct sk_buff *skb)
16732 + u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
16733 + skb->ip_summed = CHECKSUM_NONE;
16735 + /* Rx csum disabled */
16736 + if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
16739 + /* if IP and error */
16740 + if ((status_err & IXGBE_RXD_STAT_IPCS) &&
16741 + (status_err & IXGBE_RXDADV_ERR_IPE)) {
16742 + adapter->hw_csum_rx_error++;
16746 + if (!(status_err & IXGBE_RXD_STAT_L4CS))
16749 + if (status_err & IXGBE_RXDADV_ERR_TCPE) {
16750 + u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
16753 + * 82599 errata, UDP frames with a 0 checksum can be marked as
16754 + * checksum errors.
16756 + if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
16757 + (adapter->hw.mac.type == ixgbe_mac_82599EB))
16760 + adapter->hw_csum_rx_error++;
16764 + /* It must be a TCP or UDP packet with a valid checksum */
16765 + skb->ip_summed = CHECKSUM_UNNECESSARY;
16768 +static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
16771 + * Force memory writes to complete before letting h/w
16772 + * know there are new descriptors to fetch. (Only
16773 + * applicable for weak-ordered memory model archs,
16774 + * such as IA-64).
16777 + writel(val, rx_ring->tail);
16781 + * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
16782 + * @rx_ring: ring to place buffers on
16783 + * @cleaned_count: number of buffers to replace
16785 +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
16787 + union ixgbe_adv_rx_desc *rx_desc;
16788 + struct ixgbe_rx_buffer *bi;
16789 + struct sk_buff *skb;
16790 + u16 i = rx_ring->next_to_use;
16792 + /* do nothing if no valid netdev defined */
16793 + if (!rx_ring->netdev)
16796 + while (cleaned_count--) {
16797 + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
16798 + bi = &rx_ring->rx_buffer_info[i];
16802 + skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
16803 + rx_ring->rx_buf_len);
16805 + rx_ring->rx_stats.alloc_rx_buff_failed++;
16808 + /* initialize queue mapping */
16809 + skb_record_rx_queue(skb, rx_ring->queue_index);
16814 + bi->dma = dma_map_single(rx_ring->dev,
16816 + rx_ring->rx_buf_len,
16817 + DMA_FROM_DEVICE);
16818 + if (dma_mapping_error(rx_ring->dev, bi->dma)) {
16819 + rx_ring->rx_stats.alloc_rx_buff_failed++;
16825 + if (ring_is_ps_enabled(rx_ring)) {
16827 + bi->page = netdev_alloc_page(rx_ring->netdev);
16829 + rx_ring->rx_stats.alloc_rx_page_failed++;
16834 + if (!bi->page_dma) {
16835 + /* use a half page if we're re-using */
16836 + bi->page_offset ^= PAGE_SIZE / 2;
16837 + bi->page_dma = dma_map_page(rx_ring->dev,
16841 + DMA_FROM_DEVICE);
16842 + if (dma_mapping_error(rx_ring->dev,
16844 + rx_ring->rx_stats.alloc_rx_page_failed++;
16845 + bi->page_dma = 0;
16850 + /* Refresh the desc even if buffer_addrs didn't change
16851 + * because each write-back erases this info. */
16852 + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
16853 + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
16855 + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
16856 + rx_desc->read.hdr_addr = 0;
16860 + if (i == rx_ring->count)
16865 + if (rx_ring->next_to_use != i) {
16866 + rx_ring->next_to_use = i;
16867 + ixgbe_release_rx_desc(rx_ring, i);
16871 +static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
16873 + /* HW will not DMA in data larger than the given buffer, even if it
16874 + * parses the (NFS, of course) header to be larger. In that case, it
16875 + * fills the header buffer and spills the rest into the page.
16877 + u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
16878 + u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
16879 + IXGBE_RXDADV_HDRBUFLEN_SHIFT;
16880 + if (hlen > IXGBE_RX_HDR_SIZE)
16881 + hlen = IXGBE_RX_HDR_SIZE;
16885 + * ixgbe_transform_rsc_queue - change rsc queue into a full packet
16886 + * @skb: pointer to the last skb in the rsc queue
16888 + * This function changes a queue full of hw rsc buffers into a completed
16889 + * packet. It uses the ->prev pointers to find the first packet and then
16890 + * turns it into the frag list owner.
16892 +static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
16894 + unsigned int frag_list_size = 0;
16895 + unsigned int skb_cnt = 1;
16897 + while (skb->prev) {
16898 + struct sk_buff *prev = skb->prev;
16899 + frag_list_size += skb->len;
16900 + skb->prev = NULL;
16905 + skb_shinfo(skb)->frag_list = skb->next;
16906 + skb->next = NULL;
16907 + skb->len += frag_list_size;
16908 + skb->data_len += frag_list_size;
16909 + skb->truesize += frag_list_size;
16910 + IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
16915 +#ifndef IXGBE_NO_LRO
16917 + * ixgbe_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled
16918 + * @rx_ring: structure containing ring specific data
16919 + * @rx_desc: pointer to the rx descriptor
16922 +static inline bool ixgbe_can_lro(struct ixgbe_ring *rx_ring,
16923 + union ixgbe_adv_rx_desc *rx_desc)
16925 + u16 pkt_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info);
16927 + return (ring_is_lro_enabled(rx_ring) &&
16928 + !(rx_ring->netdev->flags & IFF_PROMISC) &&
16929 + (pkt_info & IXGBE_RXDADV_PKTTYPE_IPV4) &&
16930 + (pkt_info & IXGBE_RXDADV_PKTTYPE_TCP));
16934 + * ixgbe_lro_flush - Indicate packets to upper layer.
16936 + * Update IP and TCP header part of head skb if more than one
16937 + * skb's chained and indicate packets to upper layer.
16939 +static void ixgbe_lro_flush(struct ixgbe_q_vector *q_vector,
16940 + struct ixgbe_lro_desc *lrod)
16942 + struct ixgbe_lro_list *lrolist = q_vector->lrolist;
16943 + struct iphdr *iph;
16944 + struct tcphdr *th;
16945 + struct sk_buff *skb;
16948 + hlist_del(&lrod->lro_node);
16949 + lrolist->active_cnt--;
16952 + lrod->skb = NULL;
16954 + if (lrod->append_cnt) {
16955 + /* take the lro queue and convert to skb format */
16956 + skb = ixgbe_transform_rsc_queue(skb);
16958 + /* incorporate ip header and re-calculate checksum */
16959 + iph = (struct iphdr *)skb->data;
16960 + iph->tot_len = ntohs(skb->len);
16962 + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
16964 + /* incorporate the latest ack into the tcp header */
16965 + th = (struct tcphdr *) ((char *)skb->data + sizeof(*iph));
16966 + th->ack_seq = lrod->ack_seq;
16967 + th->psh = lrod->psh;
16968 + th->window = lrod->window;
16971 + /* incorporate latest timestamp into the tcp header */
16972 + if (lrod->opt_bytes) {
16973 + ts_ptr = (u32 *)(th + 1);
16974 + ts_ptr[1] = htonl(lrod->tsval);
16975 + ts_ptr[2] = lrod->tsecr;
16979 +#ifdef NETIF_F_TSO
16980 + skb_shinfo(skb)->gso_size = lrod->mss;
16982 + ixgbe_receive_skb(q_vector, skb, lrod->vlan_tag);
16983 + lrolist->stats.flushed++;
16986 + hlist_add_head(&lrod->lro_node, &lrolist->free);
16989 +static void ixgbe_lro_flush_all(struct ixgbe_q_vector *q_vector)
16991 + struct ixgbe_lro_desc *lrod;
16992 + struct hlist_node *node, *node2;
16993 + struct ixgbe_lro_list *lrolist = q_vector->lrolist;
16995 + hlist_for_each_entry_safe(lrod, node, node2, &lrolist->active, lro_node)
16996 + ixgbe_lro_flush(q_vector, lrod);
17000 + * ixgbe_lro_header_ok - Main LRO function.
17002 +static u16 ixgbe_lro_header_ok(struct sk_buff *new_skb, struct iphdr *iph,
17003 + struct tcphdr *th)
17005 + int opt_bytes, tcp_data_len;
17006 + u32 *ts_ptr = NULL;
17008 + /* If we see CE codepoint in IP header, packet is not mergeable */
17009 + if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
17012 + /* ensure there are no options */
17013 + if ((iph->ihl << 2) != sizeof(*iph))
17016 + /* .. and the packet is not fragmented */
17017 + if (iph->frag_off & htons(IP_MF|IP_OFFSET))
17020 + /* ensure no bits set besides ack or psh */
17021 + if (th->fin || th->syn || th->rst ||
17022 + th->urg || th->ece || th->cwr || !th->ack)
17025 + /* ensure that the checksum is valid */
17026 + if (new_skb->ip_summed != CHECKSUM_UNNECESSARY)
17030 + * check for timestamps. Since the only option we handle are timestamps,
17031 + * we only have to handle the simple case of aligned timestamps
17034 + opt_bytes = (th->doff << 2) - sizeof(*th);
17035 + if (opt_bytes != 0) {
17036 + ts_ptr = (u32 *)(th + 1);
17037 + if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) ||
17038 + (*ts_ptr != ntohl((TCPOPT_NOP << 24) |
17039 + (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) |
17040 + TCPOLEN_TIMESTAMP))) {
17045 + tcp_data_len = ntohs(iph->tot_len) - (th->doff << 2) - sizeof(*iph);
17047 + return tcp_data_len;
17051 + * ixgbe_lro_queue - if able, queue skb into lro chain
17052 + * @q_vector: structure containing interrupt and ring information
17053 + * @new_skb: pointer to current skb being checked
17054 + * @tag: vlan tag for skb
17056 + * Checks whether the skb given is eligible for LRO and if that's
17057 + * fine chains it to the existing lro_skb based on flowid. If an LRO for
17058 + * the flow doesn't exist create one.
17060 +static struct sk_buff *ixgbe_lro_queue(struct ixgbe_q_vector *q_vector,
17061 + struct sk_buff *new_skb,
17064 + struct sk_buff *lro_skb;
17065 + struct ixgbe_lro_desc *lrod;
17066 + struct hlist_node *node;
17067 + struct skb_shared_info *new_skb_info = skb_shinfo(new_skb);
17068 + struct ixgbe_lro_list *lrolist = q_vector->lrolist;
17069 + struct iphdr *iph = (struct iphdr *)new_skb->data;
17070 + struct tcphdr *th = (struct tcphdr *)(iph + 1);
17071 + int tcp_data_len = ixgbe_lro_header_ok(new_skb, iph, th);
17072 + u16 opt_bytes = (th->doff << 2) - sizeof(*th);
17073 + u32 *ts_ptr = (opt_bytes ? (u32 *)(th + 1) : NULL);
17074 + u32 seq = ntohl(th->seq);
17077 + * we have a packet that might be eligible for LRO,
17078 + * so see if it matches anything we might expect
17080 + hlist_for_each_entry(lrod, node, &lrolist->active, lro_node) {
17081 + if (lrod->source_port != th->source ||
17082 + lrod->dest_port != th->dest ||
17083 + lrod->source_ip != iph->saddr ||
17084 + lrod->dest_ip != iph->daddr ||
17085 + lrod->vlan_tag != tag)
17088 + /* malformed header, no tcp data, resultant packet would be too large */
17089 + if (tcp_data_len <= 0 || (tcp_data_len + lrod->len) > 65535) {
17090 + ixgbe_lro_flush(q_vector, lrod);
17094 + /* out of order packet */
17095 + if (seq != lrod->next_seq) {
17096 + ixgbe_lro_flush(q_vector, lrod);
17097 + tcp_data_len = -1;
17101 + /* packet without timestamp, or timestamp suddenly added to flow */
17102 + if (lrod->opt_bytes != opt_bytes) {
17103 + ixgbe_lro_flush(q_vector, lrod);
17108 + u32 tsval = ntohl(*(ts_ptr + 1));
17109 + /* make sure timestamp values are increasing */
17110 + if (opt_bytes != lrod->opt_bytes ||
17111 + lrod->tsval > tsval || *(ts_ptr + 2) == 0) {
17112 + ixgbe_lro_flush(q_vector, lrod);
17113 + tcp_data_len = -1;
17117 + lrod->tsval = tsval;
17118 + lrod->tsecr = *(ts_ptr + 2);
17121 + /* remove any padding from the end of the skb */
17122 + __pskb_trim(new_skb, ntohs(iph->tot_len));
17123 + /* Remove IP and TCP header*/
17124 + skb_pull(new_skb, ntohs(iph->tot_len) - tcp_data_len);
17126 + lrod->next_seq += tcp_data_len;
17127 + lrod->ack_seq = th->ack_seq;
17128 + lrod->window = th->window;
17129 + lrod->len += tcp_data_len;
17130 + lrod->psh |= th->psh;
17131 + lrod->append_cnt++;
17132 + lrolist->stats.coal++;
17134 + if (tcp_data_len > lrod->mss)
17135 + lrod->mss = tcp_data_len;
17137 + lro_skb = lrod->skb;
17139 + /* if header is empty pull pages into current skb */
17140 + if (!skb_headlen(new_skb) &&
17141 + ((skb_shinfo(lro_skb)->nr_frags +
17142 + skb_shinfo(new_skb)->nr_frags) <= MAX_SKB_FRAGS )) {
17143 + struct skb_shared_info *lro_skb_info = skb_shinfo(lro_skb);
17145 + /* copy frags into the last skb */
17146 + memcpy(lro_skb_info->frags + lro_skb_info->nr_frags,
17147 + new_skb_info->frags,
17148 + new_skb_info->nr_frags * sizeof(skb_frag_t));
17150 + lro_skb_info->nr_frags += new_skb_info->nr_frags;
17151 + lro_skb->len += tcp_data_len;
17152 + lro_skb->data_len += tcp_data_len;
17153 + lro_skb->truesize += tcp_data_len;
17155 + new_skb_info->nr_frags = 0;
17156 + new_skb->truesize -= tcp_data_len;
17157 + new_skb->len = new_skb->data_len = 0;
17158 + new_skb->data = skb_mac_header(new_skb);
17159 + __pskb_trim(new_skb, 0);
17160 + new_skb->protocol = 0;
17161 + lrolist->stats.recycled++;
17163 + /* Chain this new skb in frag_list */
17164 + new_skb->prev = lro_skb;
17165 + lro_skb->next = new_skb;
17166 + lrod->skb = new_skb ;
17171 + ixgbe_lro_flush(q_vector, lrod);
17176 + /* start a new packet */
17177 + if (tcp_data_len > 0 && !hlist_empty(&lrolist->free) && !th->psh) {
17178 + lrod = hlist_entry(lrolist->free.first, struct ixgbe_lro_desc,
17181 + lrod->skb = new_skb;
17182 + lrod->source_ip = iph->saddr;
17183 + lrod->dest_ip = iph->daddr;
17184 + lrod->source_port = th->source;
17185 + lrod->dest_port = th->dest;
17186 + lrod->vlan_tag = tag;
17187 + lrod->len = new_skb->len;
17188 + lrod->next_seq = seq + tcp_data_len;
17189 + lrod->ack_seq = th->ack_seq;
17190 + lrod->window = th->window;
17191 + lrod->mss = tcp_data_len;
17192 + lrod->opt_bytes = opt_bytes;
17194 + lrod->append_cnt = 0;
17196 + /* record timestamp if it is present */
17198 + lrod->tsval = ntohl(*(ts_ptr + 1));
17199 + lrod->tsecr = *(ts_ptr + 2);
17201 + /* remove first packet from freelist.. */
17202 + hlist_del(&lrod->lro_node);
17203 + /* .. and insert at the front of the active list */
17204 + hlist_add_head(&lrod->lro_node, &lrolist->active);
17205 + lrolist->active_cnt++;
17206 + lrolist->stats.coal++;
17210 + /* packet not handled by any of the above, pass it to the stack */
17211 + ixgbe_receive_skb(q_vector, new_skb, tag);
17215 +static void ixgbe_lro_ring_exit(struct ixgbe_lro_list *lrolist)
17217 + struct hlist_node *node, *node2;
17218 + struct ixgbe_lro_desc *lrod;
17220 + hlist_for_each_entry_safe(lrod, node, node2, &lrolist->active,
17222 + hlist_del(&lrod->lro_node);
17226 + hlist_for_each_entry_safe(lrod, node, node2, &lrolist->free,
17228 + hlist_del(&lrod->lro_node);
17233 +static void ixgbe_lro_ring_init(struct ixgbe_lro_list *lrolist)
17236 + struct ixgbe_lro_desc *lrod;
17238 + bytes = sizeof(struct ixgbe_lro_desc);
17240 + INIT_HLIST_HEAD(&lrolist->free);
17241 + INIT_HLIST_HEAD(&lrolist->active);
17243 + for (j = 0; j < IXGBE_LRO_MAX; j++) {
17244 + lrod = kzalloc(bytes, GFP_KERNEL);
17245 + if (lrod != NULL) {
17246 + INIT_HLIST_NODE(&lrod->lro_node);
17247 + hlist_add_head(&lrod->lro_node, &lrolist->free);
17252 +#endif /* IXGBE_NO_LRO */
17254 +static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
17256 + return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
17257 + IXGBE_RXDADV_RSCCNT_MASK);
17260 +static void ixgbe_rx_status_indication(u32 staterr,
17261 + struct ixgbe_adapter *adapter)
17263 + switch (adapter->hw.mac.type) {
17264 + case ixgbe_mac_82599EB:
17265 + if (staterr & IXGBE_RXD_STAT_FLM)
17267 +#ifndef IXGBE_NO_LLI
17268 + if (staterr & IXGBE_RXD_STAT_DYNINT)
17269 + adapter->lli_int++;
17270 +#endif /* IXGBE_NO_LLI */
17272 + case ixgbe_mac_82598EB:
17273 +#ifndef IXGBE_NO_LLI
17274 + if (staterr & IXGBE_RXD_STAT_DYNINT)
17275 + adapter->lli_int++;
17276 +#endif /* IXGBE_NO_LLI */
17283 +#ifdef CONFIG_IXGBE_NAPI
17284 +static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
17285 + struct ixgbe_ring *rx_ring,
17286 + int *work_done, int work_to_do)
17288 +static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
17289 + struct ixgbe_ring *rx_ring)
17292 + struct ixgbe_adapter *adapter = q_vector->adapter;
17293 + union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
17294 + struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
17295 + struct sk_buff *skb;
17296 + unsigned int total_rx_bytes = 0, total_rx_packets = 0;
17297 + const int current_node = numa_node_id();
17299 + int ddp_bytes = 0;
17300 +#endif /* IXGBE_FCOE */
17303 + u16 cleaned_count = 0;
17304 +#ifndef CONFIG_IXGBE_NAPI
17305 + u16 work_to_do = rx_ring->work_limit, local_work_done = 0;
17306 + u16 *work_done = &local_work_done;
17308 + bool pkt_is_rsc = false;
17310 + i = rx_ring->next_to_clean;
17311 + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
17312 + staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
17314 + while (staterr & IXGBE_RXD_STAT_DD) {
17315 + u32 upper_len = 0;
17317 + rx_buffer_info = &rx_ring->rx_buffer_info[i];
17319 + skb = rx_buffer_info->skb;
17320 + rx_buffer_info->skb = NULL;
17321 + prefetch(skb->data - NET_IP_ALIGN);
17323 + if (ring_is_rsc_enabled(rx_ring))
17324 + pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
17326 + /* if this is a skb from previous receive dma will be 0 */
17327 + if (rx_buffer_info->dma) {
17329 + if (pkt_is_rsc && !(staterr & IXGBE_RXD_STAT_EOP) &&
17332 + * When HWRSC is enabled, delay unmapping
17333 + * of the first packet. It carries the
17334 + * header information, HW may still
17335 + * access the header after the writeback.
17336 + * Only unmap it when EOP is reached
17338 + IXGBE_RSC_CB(skb)->delay_unmap = true;
17339 + IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
17341 + dma_unmap_single(rx_ring->dev,
17342 + rx_buffer_info->dma,
17343 + rx_ring->rx_buf_len,
17344 + DMA_FROM_DEVICE);
17346 + rx_buffer_info->dma = 0;
17348 + if (ring_is_ps_enabled(rx_ring)) {
17349 + hlen = ixgbe_get_hlen(rx_desc);
17350 + upper_len = le16_to_cpu(rx_desc->wb.upper.length);
17352 + hlen = le16_to_cpu(rx_desc->wb.upper.length);
17355 + /* small packet padding for queue-to-queue loopback */
17356 + if ((staterr & IXGBE_RXD_STAT_LB)
17357 + && hlen < 60 && upper_len == 0) {
17358 + memset(skb->data + hlen, 0, 60 - hlen);
17362 + skb_put(skb, hlen);
17364 + /* assume packet split since header is unmapped */
17365 + upper_len = le16_to_cpu(rx_desc->wb.upper.length);
17369 + dma_unmap_page(rx_ring->dev,
17370 + rx_buffer_info->page_dma,
17372 + DMA_FROM_DEVICE);
17373 + rx_buffer_info->page_dma = 0;
17374 + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
17375 + rx_buffer_info->page,
17376 + rx_buffer_info->page_offset,
17379 + if ((page_count(rx_buffer_info->page) == 1) &&
17380 + (page_to_nid(rx_buffer_info->page) == current_node))
17381 + get_page(rx_buffer_info->page);
17383 + rx_buffer_info->page = NULL;
17385 + skb->len += upper_len;
17386 + skb->data_len += upper_len;
17387 + skb->truesize += upper_len;
17391 + if (i == rx_ring->count)
17394 + next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
17395 + prefetch(next_rxd);
17398 + if (pkt_is_rsc) {
17399 + u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
17400 + IXGBE_RXDADV_NEXTP_SHIFT;
17401 + next_buffer = &rx_ring->rx_buffer_info[nextp];
17403 + next_buffer = &rx_ring->rx_buffer_info[i];
17406 + if (!(staterr & IXGBE_RXD_STAT_EOP)) {
17407 + if (ring_is_ps_enabled(rx_ring)) {
17408 + rx_buffer_info->skb = next_buffer->skb;
17409 + rx_buffer_info->dma = next_buffer->dma;
17410 + next_buffer->skb = skb;
17411 + next_buffer->dma = 0;
17413 + skb->next = next_buffer->skb;
17414 + skb->next->prev = skb;
17416 + rx_ring->rx_stats.non_eop_descs++;
17420 + ixgbe_rx_status_indication(staterr, adapter);
17422 + skb = ixgbe_transform_rsc_queue(skb);
17423 + /* if we got here without RSC the packet is invalid */
17424 + if (!pkt_is_rsc) {
17425 + __pskb_trim(skb, 0);
17426 + rx_buffer_info->skb = skb;
17431 + if (ring_is_rsc_enabled(rx_ring)) {
17432 + if (IXGBE_RSC_CB(skb)->delay_unmap) {
17433 + dma_unmap_single(rx_ring->dev,
17434 + IXGBE_RSC_CB(skb)->dma,
17435 + rx_ring->rx_buf_len,
17436 + DMA_FROM_DEVICE);
17437 + IXGBE_RSC_CB(skb)->dma = 0;
17438 + IXGBE_RSC_CB(skb)->delay_unmap = false;
17441 + if (pkt_is_rsc) {
17442 + if (ring_is_ps_enabled(rx_ring))
17443 + rx_ring->rx_stats.rsc_count += skb_shinfo(skb)->nr_frags;
17445 + rx_ring->rx_stats.rsc_count += IXGBE_RSC_CB(skb)->skb_cnt;
17446 + rx_ring->rx_stats.rsc_flush++;
17449 + /* ERR_MASK will only have valid bits if EOP set */
17450 + if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
17451 + /* trim packet back to size 0 and recycle it */
17452 + __pskb_trim(skb, 0);
17453 + rx_buffer_info->skb = skb;
17457 + ixgbe_rx_checksum(adapter, rx_desc, skb);
17459 + /* probably a little skewed due to removing CRC */
17460 + total_rx_bytes += skb->len;
17461 + total_rx_packets++;
17463 + skb->protocol = eth_type_trans(skb, rx_ring->netdev);
17466 + /* if ddp, not passing to ULD unless for FCP_RSP or error */
17467 + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
17468 + ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
17469 + if (!ddp_bytes) {
17470 + rx_ring->netdev->last_rx = jiffies;
17475 +#endif /* IXGBE_FCOE */
17476 + vlan_tag = ((staterr & IXGBE_RXD_STAT_VP) ?
17477 + le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
17479 +#ifndef IXGBE_NO_LRO
17480 + if (ixgbe_can_lro(rx_ring, rx_desc))
17481 + rx_buffer_info->skb = ixgbe_lro_queue(q_vector, skb, vlan_tag);
17484 + ixgbe_receive_skb(q_vector, skb, vlan_tag);
17486 + rx_ring->netdev->last_rx = jiffies;
17489 + rx_desc->wb.upper.status_error = 0;
17492 + if (*work_done >= work_to_do)
17495 + /* return some buffers to hardware, one at a time is too slow */
17496 + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
17497 + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
17498 + cleaned_count = 0;
17501 + /* use prefetched values */
17502 + rx_desc = next_rxd;
17503 + staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
17506 +#ifndef IXGBE_NO_LRO
17507 + if (ring_is_lro_enabled(rx_ring))
17508 + ixgbe_lro_flush_all(q_vector);
17510 +#endif /* IXGBE_NO_LRO */
17511 + rx_ring->next_to_clean = i;
17512 + cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
17514 + if (cleaned_count)
17515 + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
17518 + /* include DDPed FCoE data */
17519 + if (ddp_bytes > 0) {
17520 + unsigned int mss;
17522 + mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
17523 + sizeof(struct fc_frame_header) -
17524 + sizeof(struct fcoe_crc_eof);
17527 + total_rx_bytes += ddp_bytes;
17528 + total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
17530 +#endif /* IXGBE_FCOE */
17532 + rx_ring->total_packets += total_rx_packets;
17533 + rx_ring->total_bytes += total_rx_bytes;
17534 + rx_ring->stats.packets += total_rx_packets;
17535 + rx_ring->stats.bytes += total_rx_bytes;
17536 +#ifndef CONFIG_IXGBE_NAPI
17538 + /* re-arm the interrupt if we had to bail early and have more work */
17539 + if ((*work_done >= work_to_do) &&
17540 + (!test_bit(__IXGBE_DOWN, &adapter->state)))
17541 + ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
17543 + return local_work_done;
17547 +#ifdef CONFIG_IXGBE_NAPI
17548 +static int ixgbe_clean_rxonly(struct napi_struct *, int);
17551 + * ixgbe_configure_msix - Configure MSI-X hardware
17552 + * @adapter: board private structure
17554 + * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
17557 +static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
17559 + struct ixgbe_q_vector *q_vector;
17560 + int i, q_vectors, v_idx, r_idx;
17563 + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
17566 + * Populate the IVAR table and set the ITR values to the
17567 + * corresponding register.
17569 + for (v_idx = 0; v_idx < q_vectors; v_idx++) {
17570 + q_vector = adapter->q_vector[v_idx];
17571 + /* XXX for_each_bit(...) */
17572 + r_idx = find_first_bit(q_vector->rxr_idx,
17573 + adapter->num_rx_queues);
17575 + for (i = 0; i < q_vector->rxr_count; i++) {
17576 + u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
17577 + ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
17578 + r_idx = find_next_bit(q_vector->rxr_idx,
17579 + adapter->num_rx_queues,
17582 + r_idx = find_first_bit(q_vector->txr_idx,
17583 + adapter->num_tx_queues);
17585 + for (i = 0; i < q_vector->txr_count; i++) {
17586 + u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
17587 + ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
17588 + r_idx = find_next_bit(q_vector->txr_idx,
17589 + adapter->num_tx_queues,
17593 + if (q_vector->txr_count && !q_vector->rxr_count)
17594 + /* tx only vector */
17595 + q_vector->eitr = adapter->tx_eitr_param;
17596 + else if (q_vector->rxr_count)
17597 + /* rx or rx/tx vector */
17598 + q_vector->eitr = adapter->rx_eitr_param;
17600 + ixgbe_write_eitr(q_vector);
17603 + switch (adapter->hw.mac.type) {
17604 + case ixgbe_mac_82598EB:
17605 + ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
17608 + case ixgbe_mac_82599EB:
17609 + ixgbe_set_ivar(adapter, -1, 1, v_idx);
17615 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
17616 +#ifdef IXGBE_TCP_TIMER
17617 + ixgbe_set_ivar(adapter, -1, 0, ++v_idx);
17618 +#endif /* IXGBE_TCP_TIMER */
17620 + /* set up to autoclear timer, and the vectors */
17621 + mask = IXGBE_EIMS_ENABLE_MASK;
17622 + mask &= ~(IXGBE_EIMS_OTHER |
17623 + IXGBE_EIMS_MAILBOX |
17626 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
17629 +enum latency_range {
17630 + lowest_latency = 0,
17632 + bulk_latency = 2,
17633 + latency_invalid = 255
17637 + * ixgbe_update_itr - update the dynamic ITR value based on statistics
17638 + * @adapter: pointer to adapter
17639 + * @eitr: eitr setting (ints per sec) to give last timeslice
17640 + * @itr_setting: current throttle rate in ints/second
17641 + * @packets: the number of packets during this measurement interval
17642 + * @bytes: the number of bytes during this measurement interval
17644 + * Stores a new ITR value based on packets and byte
17645 + * counts during the last interrupt. The advantage of per interrupt
17646 + * computation is faster updates and more accurate ITR for the current
17647 + * traffic pattern. Constants in this function were computed
17648 + * based on theoretical maximum wire speed and thresholds were set based
17649 + * on testing data as well as attempting to minimize response time
17650 + * while increasing bulk throughput.
17651 + * this functionality is controlled by the InterruptThrottleRate module
17652 + * parameter (see ixgbe_param.c)
17654 +static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
17655 + u32 eitr, u8 itr_setting,
17656 + int packets, int bytes)
17658 + unsigned int retval = itr_setting;
17659 + u32 timepassed_us;
17660 + u64 bytes_perint;
17662 + if (packets == 0)
17663 + goto update_itr_done;
17666 + /* simple throttlerate management
17667 + * 0-20MB/s lowest (100000 ints/s)
17668 + * 20-100MB/s low (20000 ints/s)
17669 + * 100-1249MB/s bulk (8000 ints/s)
17671 + /* what was last interrupt timeslice? */
17672 + timepassed_us = 1000000/eitr;
17673 + bytes_perint = bytes / timepassed_us; /* bytes/usec */
17675 + switch (itr_setting) {
17676 + case lowest_latency:
17677 + if (bytes_perint > adapter->eitr_low) {
17678 + retval = low_latency;
17681 + case low_latency:
17682 + if (bytes_perint > adapter->eitr_high) {
17683 + retval = bulk_latency;
17685 + else if (bytes_perint <= adapter->eitr_low) {
17686 + retval = lowest_latency;
17689 + case bulk_latency:
17690 + if (bytes_perint <= adapter->eitr_high) {
17691 + retval = low_latency;
17701 + * ixgbe_write_eitr - write EITR register in hardware specific way
17702 + * @q_vector: structure containing interrupt and ring information
17704 + * This function is made to be called by ethtool and by the driver
17705 + * when it needs to update EITR registers at runtime. Hardware
17706 + * specific quirks/differences are taken care of here.
17708 +void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
17710 + struct ixgbe_adapter *adapter = q_vector->adapter;
17711 + struct ixgbe_hw *hw = &adapter->hw;
17712 + int v_idx = q_vector->v_idx;
17713 + u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
17715 + switch (adapter->hw.mac.type) {
17716 + case ixgbe_mac_82598EB:
17717 + /* must write high and low 16 bits to reset counter */
17718 + itr_reg |= (itr_reg << 16);
17720 + case ixgbe_mac_82599EB:
17722 + * 82599 can support a value of zero, so allow it for
17723 + * max interrupt rate, but there is an errata where it can
17724 + * not be zero with RSC
17726 + if (itr_reg == 8 &&
17727 + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
17731 + * set the WDIS bit to not clear the timer bits and cause an
17732 + * immediate assertion of the interrupt
17734 + itr_reg |= IXGBE_EITR_CNT_WDIS;
17739 + IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
17742 +static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
17744 + struct ixgbe_adapter *adapter = q_vector->adapter;
17746 + u8 current_itr, ret_itr;
17748 + struct ixgbe_ring *rx_ring = NULL, *tx_ring = NULL;
17750 + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
17751 + for (i = 0; i < q_vector->txr_count; i++) {
17752 + tx_ring = adapter->tx_ring[r_idx];
17753 + ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
17754 + q_vector->tx_itr,
17755 + tx_ring->total_packets,
17756 + tx_ring->total_bytes);
17757 + /* if the result for this queue would decrease interrupt
17758 + * rate for this vector then use that result */
17759 + q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
17760 + q_vector->tx_itr - 1 : ret_itr);
17761 + r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
17765 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
17766 + for (i = 0; i < q_vector->rxr_count; i++) {
17767 + rx_ring = adapter->rx_ring[r_idx];
17768 + ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
17769 + q_vector->rx_itr,
17770 + rx_ring->total_packets,
17771 + rx_ring->total_bytes);
17772 + /* if the result for this queue would decrease interrupt
17773 + * rate for this vector then use that result */
17774 + q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
17775 + q_vector->rx_itr - 1 : ret_itr);
17776 + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
17780 + current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
17782 + switch (current_itr) {
17783 + /* counts and packets in update_itr are dependent on these numbers */
17784 + case lowest_latency:
17785 + new_itr = 100000;
17787 + case low_latency:
17788 + new_itr = 20000; /* aka hwitr = ~200 */
17790 + case bulk_latency:
17796 + if (new_itr != q_vector->eitr) {
17797 + /* do an exponential smoothing */
17798 + new_itr = ((q_vector->eitr * 9) + new_itr)/10;
17800 + /* save the algorithm value here, not the smoothed one */
17801 + q_vector->eitr = new_itr;
17803 + ixgbe_write_eitr(q_vector);
17808 + * ixgbe_check_overtemp_task - worker thread to check over tempurature
17809 + * @work: pointer to work_struct containing our data
17811 +static void ixgbe_check_overtemp_task(struct work_struct *work)
17813 + struct ixgbe_adapter *adapter = container_of(work,
17814 + struct ixgbe_adapter,
17815 + check_overtemp_task);
17816 + struct ixgbe_hw *hw = &adapter->hw;
17817 + u32 eicr = adapter->interrupt_event;
17819 + if (test_bit(__IXGBE_DOWN, &adapter->state))
17822 + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
17825 + switch (hw->device_id) {
17826 + case IXGBE_DEV_ID_82599_T3_LOM: {
17828 + bool link_up = false;
17830 + if (hw->mac.ops.check_link)
17831 + hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
17834 + * Since the warning interrupt is for both ports
17835 + * we don't have to check if:
17836 + * - This interrupt wasn't for our port.
17837 + * - We may have missed the interrupt so always have to
17838 + * check if we got a LSC
17840 + if (((eicr & IXGBE_EICR_GPI_SDP0) && link_up) &&
17841 + (!(eicr & IXGBE_EICR_LSC)))
17844 + /* Check if this is not due to overtemp */
17845 + if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
17850 + if (!(eicr & IXGBE_EICR_GPI_SDP0))
17854 + DPRINTK(PROBE, CRIT, "Network adapter has been stopped because it has "
17855 + "over heated. Restart the computer. If the problem persists, "
17856 + "power off the system and replace the adapter\n");
17857 + /* write to clear the interrupt */
17858 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
17861 +static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
17863 + struct ixgbe_hw *hw = &adapter->hw;
17865 + if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
17866 + (eicr & IXGBE_EICR_GPI_SDP1)) {
17867 + DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
17868 + /* write to clear the interrupt */
17869 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
17873 +static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
17875 + struct ixgbe_hw *hw = &adapter->hw;
17877 + if (eicr & IXGBE_EICR_GPI_SDP2) {
17878 + /* Clear the interrupt */
17879 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
17880 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
17881 + schedule_work(&adapter->sfp_config_module_task);
17884 + if (eicr & IXGBE_EICR_GPI_SDP1) {
17885 + /* Clear the interrupt */
17886 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
17887 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
17888 + schedule_work(&adapter->multispeed_fiber_task);
17892 +static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
17894 + struct ixgbe_hw *hw = &adapter->hw;
17896 + adapter->lsc_int++;
17897 + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
17898 + adapter->link_check_timeout = jiffies;
17899 + if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
17900 + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
17901 + IXGBE_WRITE_FLUSH(hw);
17902 + schedule_work(&adapter->watchdog_task);
17906 +static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
17908 + struct net_device *netdev = data;
17909 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
17910 + struct ixgbe_hw *hw = &adapter->hw;
17914 + * Workaround for Silicon errata. Use clear-by-write instead
17915 + * of clear-by-read. Reading with EICS will return the
17916 + * interrupt causes without clearing, which later be done
17917 + * with the write to EICR.
17919 + eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
17920 + IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
17922 + if (eicr & IXGBE_EICR_LSC)
17923 + ixgbe_check_lsc(adapter);
17925 + if (eicr & IXGBE_EICR_MAILBOX)
17926 + ixgbe_msg_task(adapter);
17928 + switch (hw->mac.type) {
17929 + case ixgbe_mac_82599EB:
17930 + if (eicr & IXGBE_EICR_ECC) {
17931 + DPRINTK(LINK, INFO, "Received unrecoverable ECC Err, "
17932 + "please reboot\n");
17933 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
17936 + /* Handle Flow Director Full threshold interrupt */
17937 + if (eicr & IXGBE_EICR_FLOW_DIR) {
17939 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
17940 + /* Disable transmits before FDIR Re-initialization */
17941 + netif_tx_stop_all_queues(netdev);
17942 + for (i = 0; i < adapter->num_tx_queues; i++) {
17943 + struct ixgbe_ring *tx_ring =
17944 + adapter->tx_ring[i];
17945 + if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
17946 + &tx_ring->state))
17947 + schedule_work(&adapter->fdir_reinit_task);
17951 + ixgbe_check_sfp_event(adapter, eicr);
17952 + adapter->interrupt_event = eicr;
17953 + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
17954 + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
17955 + adapter->interrupt_event = eicr;
17956 + schedule_work(&adapter->check_overtemp_task);
17963 + ixgbe_check_fan_failure(adapter, eicr);
17965 + /* re-enable the original interrupt state, no lsc, no queues */
17966 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
17967 + IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr &
17968 + ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE));
17970 + return IRQ_HANDLED;
17973 +#ifdef IXGBE_TCP_TIMER
17974 +static irqreturn_t ixgbe_msix_pba(int irq, void *data)
17976 + struct net_device *netdev = data;
17977 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
17980 + u32 pba = readl(adapter->msix_addr + IXGBE_MSIXPBA);
17981 + for (i = 0; i < MAX_MSIX_COUNT; i++) {
17982 + if (pba & (1 << i))
17983 + adapter->msix_handlers[i](irq, data, regs);
17985 + adapter->pba_zero[i]++;
17988 + adapter->msix_pba++;
17989 + return IRQ_HANDLED;
17992 +static irqreturn_t ixgbe_msix_tcp_timer(int irq, void *data)
17994 + struct net_device *netdev = data;
17995 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
17997 + adapter->msix_tcp_timer++;
17999 + return IRQ_HANDLED;
18002 +#endif /* IXGBE_TCP_TIMER */
18003 +void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask)
18006 + struct ixgbe_hw *hw = &adapter->hw;
18008 + switch (hw->mac.type) {
18009 + case ixgbe_mac_82598EB:
18010 + mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
18011 + IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
18013 + case ixgbe_mac_82599EB:
18014 + mask = (qmask & 0xFFFFFFFF);
18016 + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
18017 + mask = (qmask >> 32);
18019 + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
18024 + /* skip the flush */
18027 +void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, u64 qmask)
18030 + struct ixgbe_hw *hw = &adapter->hw;
18032 + switch (hw->mac.type) {
18033 + case ixgbe_mac_82598EB:
18034 + mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
18035 + IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
18037 + case ixgbe_mac_82599EB:
18038 + mask = (qmask & 0xFFFFFFFF);
18040 + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
18041 + mask = (qmask >> 32);
18043 + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
18048 + /* skip the flush */
18051 +static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
18053 + struct ixgbe_q_vector *q_vector = data;
18054 + struct ixgbe_adapter *adapter = q_vector->adapter;
18055 + struct ixgbe_ring *tx_ring;
18057 +#ifndef CONFIG_IXGBE_NAPI
18058 + bool tx_clean_complete = false;
18061 + if (!q_vector->txr_count)
18062 + return IRQ_HANDLED;
18064 +#ifndef CONFIG_IXGBE_NAPI
18065 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
18066 + ixgbe_update_dca(q_vector);
18068 + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
18069 + for (i = 0; i < q_vector->txr_count; i++) {
18070 + tx_ring = adapter->tx_ring[r_idx];
18071 + tx_ring->total_bytes = 0;
18072 + tx_ring->total_packets = 0;
18073 +#ifndef CONFIG_IXGBE_NAPI
18074 + tx_clean_complete = ixgbe_clean_tx_irq(q_vector, tx_ring);
18076 + r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
18080 +#ifdef CONFIG_IXGBE_NAPI
18081 + /* EIAM disabled interrupts (on this vector) for us */
18082 + napi_schedule(&q_vector->napi);
18084 + if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
18085 + u64 eics = ((u64)1 << q_vector->v_idx);
18086 + ixgbe_irq_enable_queues(adapter, eics);
18087 + if (!tx_clean_complete)
18088 + ixgbe_irq_rearm_queues(adapter, eics);
18092 + * possibly later we can enable tx auto-adjustment if necessary
18094 + if (adapter->itr_setting & 1)
18095 + ixgbe_set_itr_msix(q_vector);
18098 + return IRQ_HANDLED;
18102 + * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
18104 + * @data: pointer to our q_vector struct for this interrupt vector
18106 +static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
18108 + struct ixgbe_q_vector *q_vector = data;
18109 + struct ixgbe_adapter *adapter = q_vector->adapter;
18110 + struct ixgbe_ring *rx_ring;
18113 +#ifndef CONFIG_IXGBE_NAPI
18114 + bool rx_clean_complete = false;
18117 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
18118 + ixgbe_update_dca(q_vector);
18120 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
18121 + for (i = 0; i < q_vector->rxr_count; i++) {
18122 + rx_ring = adapter->rx_ring[r_idx];
18123 + rx_ring->total_bytes = 0;
18124 + rx_ring->total_packets = 0;
18125 +#ifndef CONFIG_IXGBE_NAPI
18126 + rx_clean_complete = ixgbe_clean_rx_irq(q_vector, rx_ring);
18128 + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
18132 + if (adapter->rx_itr_setting & 1)
18133 + ixgbe_set_itr_msix(q_vector);
18134 + if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
18135 + u64 eics = ((u64)1 << q_vector->v_idx);
18136 + ixgbe_irq_enable_queues(adapter, eics);
18137 + if (!rx_clean_complete)
18138 + ixgbe_irq_rearm_queues(adapter, eics);
18141 + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
18145 + if (!q_vector->rxr_count)
18146 + return IRQ_HANDLED;
18148 + /* EIAM disabled interrupts (on this vector) for us */
18149 + napi_schedule(&q_vector->napi);
18152 + return IRQ_HANDLED;
18155 +static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
18157 + struct ixgbe_q_vector *q_vector = data;
18158 + struct ixgbe_adapter *adapter = q_vector->adapter;
18159 + struct ixgbe_ring *ring;
18162 +#ifndef CONFIG_IXGBE_NAPI
18163 + bool clean_complete = true;
18166 + if (!q_vector->txr_count && !q_vector->rxr_count)
18167 + return IRQ_HANDLED;
18169 +#ifndef CONFIG_IXGBE_NAPI
18170 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
18171 + ixgbe_update_dca(q_vector);
18173 + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
18174 + for (i = 0; i < q_vector->txr_count; i++) {
18175 + ring = adapter->tx_ring[r_idx];
18176 + ring->total_bytes = 0;
18177 + ring->total_packets = 0;
18178 +#ifndef CONFIG_IXGBE_NAPI
18179 + clean_complete = ixgbe_clean_tx_irq(q_vector, ring);
18181 + r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
18185 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
18186 + for (i = 0; i < q_vector->rxr_count; i++) {
18187 + ring = adapter->rx_ring[r_idx];
18188 + ring->total_bytes = 0;
18189 + ring->total_packets = 0;
18190 +#ifndef CONFIG_IXGBE_NAPI
18191 + clean_complete &= ixgbe_clean_rx_irq(q_vector, ring);
18193 + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
18197 + if (adapter->rx_itr_setting & 1)
18198 + ixgbe_set_itr_msix(q_vector);
18199 + if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
18200 + u64 eics = ((u64)1 << q_vector->v_idx);
18201 + ixgbe_irq_enable_queues(adapter, eics);
18202 + if (!clean_complete)
18203 + ixgbe_irq_rearm_queues(adapter, eics);
18206 + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
18210 + /* EIAM disabled interrupts (on this vector) for us */
18211 + napi_schedule(&q_vector->napi);
18214 + return IRQ_HANDLED;
18217 +#ifdef CONFIG_IXGBE_NAPI
18219 + * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
18220 + * @napi: napi struct with our devices info in it
18221 + * @budget: amount of work driver is allowed to do this pass, in packets
18223 + * This function is optimized for cleaning one queue only on a single
18226 +static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
18228 + struct ixgbe_q_vector *q_vector =
18229 + container_of(napi, struct ixgbe_q_vector, napi);
18230 + struct ixgbe_adapter *adapter = q_vector->adapter;
18231 + struct ixgbe_ring *rx_ring = NULL;
18232 + int work_done = 0;
18235 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
18236 + ixgbe_update_dca(q_vector);
18238 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
18239 + rx_ring = adapter->rx_ring[r_idx];
18241 + ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
18243 +#ifndef HAVE_NETDEV_NAPI_LIST
18244 + if (!netif_running(adapter->netdev))
18248 + /* If all Rx work done, exit the polling mode */
18249 + if (work_done < budget) {
18250 + napi_complete(napi);
18251 + if (adapter->rx_itr_setting & 1)
18252 + ixgbe_set_itr_msix(q_vector);
18253 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
18254 + ixgbe_irq_enable_queues(adapter,
18255 + ((u64)1 << q_vector->v_idx));
18258 + return work_done;
18262 + * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
18263 + * @napi: napi struct with our devices info in it
18264 + * @budget: amount of work driver is allowed to do this pass, in packets
18266 + * This function will clean more than one rx queue associated with a
18269 +static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
18271 + struct ixgbe_q_vector *q_vector =
18272 + container_of(napi, struct ixgbe_q_vector, napi);
18273 + struct ixgbe_adapter *adapter = q_vector->adapter;
18274 + struct ixgbe_ring *ring = NULL;
18276 + int work_done = 0, total_work = 0, i;
18277 + bool rx_clean_complete = true, tx_clean_complete = true;
18279 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
18280 + ixgbe_update_dca(q_vector);
18282 + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
18283 + for (i = 0; i < q_vector->txr_count; i++) {
18284 + ring = adapter->tx_ring[r_idx];
18285 + tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
18286 + r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
18290 + /* attempt to distribute budget to each queue fairly, but don't allow
18291 + * the budget to go below 1 because we'll exit polling */
18292 + budget /= (q_vector->rxr_count ?: 1);
18293 + budget = max(budget, 1);
18294 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
18295 + for (i = 0; i < q_vector->rxr_count; i++) {
18297 + ring = adapter->rx_ring[r_idx];
18298 + ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
18299 + total_work += work_done;
18300 + rx_clean_complete &= (work_done < budget);
18301 + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
18305 + if (!tx_clean_complete || !rx_clean_complete)
18306 + work_done = budget;
18308 +#ifndef HAVE_NETDEV_NAPI_LIST
18309 + if (!netif_running(adapter->netdev))
18313 + /* If all Rx work done, exit the polling mode */
18314 + if (work_done < budget) {
18315 + napi_complete(napi);
18316 + if (adapter->rx_itr_setting & 1)
18317 + ixgbe_set_itr_msix(q_vector);
18318 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
18319 + ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
18322 + return work_done;
18326 + * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
18327 + * @napi: napi struct with our devices info in it
18328 + * @budget: amount of work driver is allowed to do this pass, in packets
18330 + * This function is optimized for cleaning one queue only on a single
18333 +static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
18335 + struct ixgbe_q_vector *q_vector =
18336 + container_of(napi, struct ixgbe_q_vector, napi);
18337 + struct ixgbe_adapter *adapter = q_vector->adapter;
18338 + struct ixgbe_ring *tx_ring = NULL;
18340 + int work_done = 0;
18342 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
18343 + ixgbe_update_dca(q_vector);
18345 + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
18346 + tx_ring = adapter->tx_ring[r_idx];
18348 + if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
18349 + work_done = budget;
18351 +#ifndef HAVE_NETDEV_NAPI_LIST
18352 + if (!netif_running(adapter->netdev))
18356 + /* If all Tx work done, exit the polling mode */
18357 + if (work_done < budget) {
18358 + napi_complete(napi);
18359 + if (adapter->tx_itr_setting & 1)
18360 + ixgbe_set_itr_msix(q_vector);
18361 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
18362 + ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
18365 + return work_done;
18368 +#endif /* CONFIG_IXGBE_NAPI */
18369 +static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
18372 + struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
18373 + struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
18375 + set_bit(r_idx, q_vector->rxr_idx);
18376 + q_vector->rxr_count++;
18377 + rx_ring->q_vector = q_vector;
18380 +static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
18383 + struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
18384 + struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
18386 + set_bit(t_idx, q_vector->txr_idx);
18387 + q_vector->txr_count++;
18388 + tx_ring->q_vector = q_vector;
18392 + * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
18393 + * @adapter: board private structure to initialize
18395 + * This function maps descriptor rings to the queue-specific vectors
18396 + * we were allotted through the MSI-X enabling code. Ideally, we'd have
18397 + * one vector per ring/queue, but on a constrained vector budget, we
18398 + * group the rings as "efficiently" as possible. You would add new
18399 + * mapping configurations in here.
18401 +static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
18406 + int rxr_idx = 0, txr_idx = 0;
18407 + int rxr_remaining = adapter->num_rx_queues;
18408 + int txr_remaining = adapter->num_tx_queues;
18413 + /* No mapping required if MSI-X is disabled. */
18414 + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
18417 + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
18420 + * The ideal configuration...
18421 + * We have enough vectors to map one per queue.
18423 + if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
18424 + for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
18425 + map_vector_to_rxq(adapter, v_start, rxr_idx);
18427 + for (; txr_idx < txr_remaining; v_start++, txr_idx++)
18428 + map_vector_to_txq(adapter, v_start, txr_idx);
18433 + * If we don't have enough vectors for a 1-to-1
18434 + * mapping, we'll have to group them so there are
18435 + * multiple queues per vector.
18437 + /* Re-adjusting *qpv takes care of the remainder. */
18439 + q_split = q_vectors;
18441 + for (i = v_start; i < q_split; i++) {
18442 + rqpv = DIV_ROUND_UP(rxr_remaining, q_split - i);
18443 + for (j = 0; j < rqpv; j++) {
18444 + map_vector_to_rxq(adapter, i, rxr_idx);
18451 + for (i = v_start; i < q_vectors; i++) {
18452 + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
18453 + for (j = 0; j < tqpv; j++) {
18454 + map_vector_to_txq(adapter, i, txr_idx);
18465 + * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
18466 + * @adapter: board private structure
18468 + * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
18469 + * interrupts from the kernel.
18471 +static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
18473 + struct net_device *netdev = adapter->netdev;
18474 + irqreturn_t (*handler)(int, void *);
18475 + int i, vector, q_vectors, err;
18476 + int ri = 0, ti = 0;
18478 + /* Decrement for Other and TCP Timer vectors */
18479 + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
18481 +#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
18482 + ? &ixgbe_msix_clean_many : \
18483 + (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
18484 + (_v)->txr_count ? &ixgbe_msix_clean_tx : \
18486 + for (vector = 0; vector < q_vectors; vector++) {
18487 + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
18488 + handler = SET_HANDLER(q_vector);
18490 + if (handler == &ixgbe_msix_clean_rx) {
18491 + sprintf(q_vector->name, "%s-%s-%d",
18492 + netdev->name, "rx", ri++);
18493 + } else if (handler == &ixgbe_msix_clean_tx) {
18494 + sprintf(q_vector->name, "%s-%s-%d",
18495 + netdev->name, "tx", ti++);
18496 + } else if (handler == &ixgbe_msix_clean_many) {
18497 + sprintf(q_vector->name, "%s-%s-%d",
18498 + netdev->name, "TxRx", ri++);
18501 + /* skip this unused q_vector */
18504 + err = request_irq(adapter->msix_entries[vector].vector,
18505 + handler, 0, q_vector->name,
18508 + DPRINTK(PROBE, ERR,
18509 + "request_irq failed for MSIX interrupt "
18510 + "Error: %d\n", err);
18511 + goto free_queue_irqs;
18515 + sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
18516 + err = request_irq(adapter->msix_entries[vector].vector,
18517 + &ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
18519 + DPRINTK(PROBE, ERR,
18520 + "request_irq for msix_lsc failed: %d\n", err);
18521 + goto free_queue_irqs;
18524 +#ifdef IXGBE_TCP_TIMER
18526 + sprintf(adapter->tcp_timer_name, "%s:timer", netdev->name);
18527 + err = request_irq(adapter->msix_entries[vector].vector,
18528 + &ixgbe_msix_tcp_timer, 0, adapter->tcp_timer_name,
18531 + DPRINTK(PROBE, ERR,
18532 + "request_irq for msix_tcp_timer failed: %d\n", err);
18533 + /* Free "Other" interrupt */
18534 + free_irq(adapter->msix_entries[--vector].vector, netdev);
18535 + goto free_queue_irqs;
18542 + for (i = vector - 1; i >= 0; i--)
18543 + free_irq(adapter->msix_entries[--vector].vector,
18544 + adapter->q_vector[i]);
18545 + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
18546 + pci_disable_msix(adapter->pdev);
18547 + kfree(adapter->msix_entries);
18548 + adapter->msix_entries = NULL;
18552 +static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
18554 + struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
18556 + u32 new_itr = q_vector->eitr;
18557 + struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
18558 + struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
18560 + q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
18561 + q_vector->tx_itr,
18562 + tx_ring->total_packets,
18563 + tx_ring->total_bytes);
18564 + q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
18565 + q_vector->rx_itr,
18566 + rx_ring->total_packets,
18567 + rx_ring->total_bytes);
18569 + current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
18571 + switch (current_itr) {
18572 + /* counts and packets in update_itr are dependent on these numbers */
18573 + case lowest_latency:
18574 + new_itr = 100000;
18576 + case low_latency:
18577 + new_itr = 20000; /* aka hwitr = ~200 */
18579 + case bulk_latency:
18586 + if (new_itr != q_vector->eitr) {
18588 + /* do an exponential smoothing */
18589 + new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
18591 + /* save the algorithm value here */
18592 + q_vector->eitr = new_itr;
18594 + ixgbe_write_eitr(q_vector);
18601 + * ixgbe_irq_enable - Enable default interrupt generation settings
18602 + * @adapter: board private structure
18604 +static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, bool flush)
18609 + mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
18612 + /* don't reenable LSC while waiting for link */
18613 + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
18614 + mask &= ~IXGBE_EIMS_LSC;
18615 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
18616 + mask |= IXGBE_EIMS_GPI_SDP0;
18617 + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
18618 + mask |= IXGBE_EIMS_GPI_SDP1;
18619 + switch (adapter->hw.mac.type) {
18620 + case ixgbe_mac_82599EB:
18621 + mask |= IXGBE_EIMS_ECC;
18622 + mask |= IXGBE_EIMS_GPI_SDP1;
18623 + mask |= IXGBE_EIMS_GPI_SDP2;
18624 + mask |= IXGBE_EIMS_MAILBOX;
18629 + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
18630 + adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
18631 + mask |= IXGBE_EIMS_FLOW_DIR;
18633 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
18635 + ixgbe_irq_enable_queues(adapter, qmask);
18637 + IXGBE_WRITE_FLUSH(&adapter->hw);
18639 + if (adapter->num_vfs > 32) {
18640 + u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
18641 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
18646 + * ixgbe_intr - legacy mode Interrupt Handler
18647 + * @irq: interrupt number
18648 + * @data: pointer to a network interface device structure
18650 +static irqreturn_t ixgbe_intr(int irq, void *data)
18652 + struct net_device *netdev = data;
18653 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
18654 + struct ixgbe_hw *hw = &adapter->hw;
18655 + struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
18659 + * Workaround of Silicon errata on 82598. Mask the interrupt
18660 + * before the read of EICR.
18662 + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
18664 + /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
18665 + * therefore no explict interrupt disable is necessary */
18666 + eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
18669 + * shared interrupt alert!
18670 + * make sure interrupts are enabled because the read will
18671 + * have disabled interrupts due to EIAM
18672 + * finish the workaround of silicon errata on 82598. Unmask
18673 + * the interrupt that we masked before the EICR read.
18675 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
18676 + ixgbe_irq_enable(adapter, true, true);
18677 + return IRQ_NONE; /* Not our interrupt */
18680 + if (eicr & IXGBE_EICR_LSC)
18681 + ixgbe_check_lsc(adapter);
18683 + switch (hw->mac.type) {
18684 + case ixgbe_mac_82599EB:
18685 + if (eicr & IXGBE_EICR_ECC)
18686 + DPRINTK(LINK, INFO, "Received unrecoverable ECC Err, "
18687 + "please reboot\n");
18688 + ixgbe_check_sfp_event(adapter, eicr);
18689 + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
18690 + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
18691 + adapter->interrupt_event = eicr;
18692 + schedule_work(&adapter->check_overtemp_task);
18699 + ixgbe_check_fan_failure(adapter, eicr);
18701 +#ifdef CONFIG_IXGBE_NAPI
18702 + if (napi_schedule_prep(&(q_vector->napi))) {
18703 + adapter->tx_ring[0]->total_packets = 0;
18704 + adapter->tx_ring[0]->total_bytes = 0;
18705 + adapter->rx_ring[0]->total_packets = 0;
18706 + adapter->rx_ring[0]->total_bytes = 0;
18707 + /* would disable interrupts here but EIAM disabled it */
18708 + __napi_schedule(&(q_vector->napi));
18712 + * re-enable link(maybe) and non-queue interrupts, no flush.
18713 + * ixgbe_poll will re-enable the queue interrupts
18715 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
18716 + ixgbe_irq_enable(adapter, false, false);
18718 + adapter->tx_ring[0]->total_packets = 0;
18719 + adapter->tx_ring[0]->total_bytes = 0;
18720 + adapter->rx_ring[0]->total_packets = 0;
18721 + adapter->rx_ring[0]->total_bytes = 0;
18722 + ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
18723 + ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0]);
18725 + /* dynamically adjust throttle */
18726 + if (adapter->rx_itr_setting & 1)
18727 + ixgbe_set_itr(adapter);
18730 + * Workaround of Silicon errata on 82598. Unmask
18731 + * the interrupt that we masked before the EICR read
18732 + * no flush of the re-enable is necessary here
18734 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
18735 + ixgbe_irq_enable(adapter, true, false);
18737 + return IRQ_HANDLED;
18740 +static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
18742 + int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
18744 + for (i = 0; i < adapter->num_rx_queues; i++)
18745 + adapter->rx_ring[i]->q_vector = NULL;
18746 + for (i = 0; i < adapter->num_tx_queues; i++)
18747 + adapter->tx_ring[i]->q_vector = NULL;
18749 + for (i = 0; i < q_vectors; i++) {
18750 + struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
18751 + bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
18752 + bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
18753 + q_vector->rxr_count = 0;
18754 + q_vector->txr_count = 0;
18755 + q_vector->eitr = adapter->rx_eitr_param;
18760 + * ixgbe_request_irq - initialize interrupts
18761 + * @adapter: board private structure
18763 + * Attempts to configure interrupts using the best available
18764 + * capabilities of the hardware and kernel.
18766 +static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
18768 + struct net_device *netdev = adapter->netdev;
18771 +#ifdef HAVE_DEVICE_NUMA_NODE
18772 + DPRINTK(TX_ERR, INFO, "numa_node before request_irq %d\n",
18773 + dev_to_node(&adapter->pdev->dev));
18774 +#endif /* HAVE_DEVICE_NUMA_NODE */
18775 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
18776 + err = ixgbe_request_msix_irqs(adapter);
18777 + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
18778 + err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
18779 + netdev->name, netdev);
18781 + err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
18782 + netdev->name, netdev);
18786 + DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
18791 +static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
18793 + struct net_device *netdev = adapter->netdev;
18795 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
18796 + int i, q_vectors;
18798 + q_vectors = adapter->num_msix_vectors;
18799 + i = q_vectors - 1;
18800 +#ifdef IXGBE_TCP_TIMER
18801 + free_irq(adapter->msix_entries[i].vector, netdev);
18804 + free_irq(adapter->msix_entries[i].vector, netdev);
18807 + /* free only the irqs that were actually requested */
18808 + for (; i >= 0; i--) {
18809 + if (adapter->q_vector[i]->rxr_count ||
18810 + adapter->q_vector[i]->txr_count)
18811 + free_irq(adapter->msix_entries[i].vector,
18812 + adapter->q_vector[i]);
18815 + ixgbe_reset_q_vectors(adapter);
18817 + free_irq(adapter->pdev->irq, netdev);
18822 + * ixgbe_irq_disable - Mask off interrupt generation on the NIC
18823 + * @adapter: board private structure
18825 +static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
18827 + switch (adapter->hw.mac.type) {
18828 + case ixgbe_mac_82598EB:
18829 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
18831 + case ixgbe_mac_82599EB:
18832 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
18833 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
18834 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
18835 + if (adapter->num_vfs > 32)
18836 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
18841 + IXGBE_WRITE_FLUSH(&adapter->hw);
18842 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
18844 + for (i = 0; i < adapter->num_msix_vectors; i++)
18845 + synchronize_irq(adapter->msix_entries[i].vector);
18847 + synchronize_irq(adapter->pdev->irq);
18852 + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
18855 +static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
18857 + struct ixgbe_hw *hw = &adapter->hw;
18859 + IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
18860 + EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
18862 + ixgbe_set_ivar(adapter, 0, 0, 0);
18863 + ixgbe_set_ivar(adapter, 1, 0, 0);
18865 + map_vector_to_rxq(adapter, 0, 0);
18866 + map_vector_to_txq(adapter, 0, 0);
18868 + DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
18872 + * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
18873 + * @adapter: board private structure
18874 + * @ring: structure containing ring specific data
18876 + * Configure the Tx descriptor ring after a reset.
18878 +void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
18879 + struct ixgbe_ring *ring)
18881 + struct ixgbe_hw *hw = &adapter->hw;
18882 + u64 tdba = ring->dma;
18883 + int wait_loop = 10;
18885 + u8 reg_idx = ring->reg_idx;
18887 + /* disable queue to avoid issues while updating state */
18888 + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
18889 + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
18890 + txdctl & ~IXGBE_TXDCTL_ENABLE);
18891 + IXGBE_WRITE_FLUSH(hw);
18893 + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
18894 + (tdba & DMA_BIT_MASK(32)));
18895 + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
18896 + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
18897 + ring->count * sizeof(union ixgbe_adv_tx_desc));
18898 + IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
18899 + IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
18900 + ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
18902 + /* configure fetching thresholds */
18903 + if (adapter->rx_itr_setting == 0) {
18904 + /* cannot set wthresh when itr==0 */
18905 + txdctl &= ~0x007F0000;
18907 + /* enable WTHRESH=8 descriptors, to encourage burst writeback */
18908 + txdctl |= (8 << 16);
18910 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
18911 + /* PThresh workaround for Tx hang with DFP enabled. */
18915 + /* reinitialize flowdirector state */
18916 + set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
18918 + /* enable queue */
18919 + txdctl |= IXGBE_TXDCTL_ENABLE;
18920 + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
18922 + /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
18923 + if (hw->mac.type == ixgbe_mac_82598EB &&
18924 + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
18927 + /* poll to verify queue is enabled */
18930 + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
18931 + } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
18933 + DPRINTK(DRV, ERR, "Could not enable Tx Queue %d\n", reg_idx);
18936 +static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
18938 + struct ixgbe_hw *hw = &adapter->hw;
18942 + if (hw->mac.type == ixgbe_mac_82598EB)
18945 + /* disable the arbiter while setting MTQC */
18946 + rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
18947 + rttdcs |= IXGBE_RTTDCS_ARBDIS;
18948 + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
18950 + /* set transmit pool layout */
18951 + mask = IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED;
18952 + mask |= IXGBE_FLAG_DCB_ENABLED;
18953 + switch (adapter->flags & mask) {
18955 + case (IXGBE_FLAG_VMDQ_ENABLED):
18956 + case (IXGBE_FLAG_SRIOV_ENABLED):
18957 + case (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
18958 + IXGBE_WRITE_REG(hw, IXGBE_MTQC,
18959 + (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
18961 + case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_DCB_ENABLED):
18962 + case (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED):
18963 + case (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED
18964 + | IXGBE_FLAG_DCB_ENABLED):
18965 + IXGBE_WRITE_REG(hw, IXGBE_MTQC,
18966 + (IXGBE_MTQC_RT_ENA
18967 + | IXGBE_MTQC_VT_ENA
18968 + | IXGBE_MTQC_4TC_4TQ));
18971 + case (IXGBE_FLAG_DCB_ENABLED):
18972 + IXGBE_WRITE_REG(hw, IXGBE_MTQC,
18973 + IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ);
18977 + IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
18981 + /* re-enable the arbiter */
18982 + rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
18983 + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
18987 + * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
18988 + * @adapter: board private structure
18990 + * Configure the Tx unit of the MAC after a reset.
18992 +static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
18994 + struct ixgbe_hw *hw = &adapter->hw;
18998 +#ifdef CONFIG_NETDEVICES_MULTIQUEUE
18999 + if (adapter->num_tx_queues > 1)
19000 + adapter->netdev->features |= NETIF_F_MULTI_QUEUE;
19002 + adapter->netdev->features &= ~NETIF_F_MULTI_QUEUE;
19005 + ixgbe_setup_mtqc(adapter);
19007 + if (hw->mac.type != ixgbe_mac_82598EB) {
19008 + /* DMATXCTL.EN must be before Tx queues are enabled */
19009 + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
19010 + dmatxctl |= IXGBE_DMATXCTL_TE;
19011 + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
19014 + /* Setup the HW Tx Head and Tail descriptor pointers */
19015 + for (i = 0; i < adapter->num_tx_queues; i++)
19016 + ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
19019 +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
19021 +static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
19022 + struct ixgbe_ring *rx_ring)
19025 + u8 reg_idx = rx_ring->reg_idx;
19027 + switch (adapter->hw.mac.type) {
19028 + case ixgbe_mac_82598EB: {
19029 + struct ixgbe_ring_feature *feature = adapter->ring_feature;
19030 + /* program one srrctl register per VMDq index */
19031 + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
19032 + unsigned long mask;
19034 + mask = (unsigned long) feature[RING_F_VMDQ].mask;
19035 + len = sizeof(feature[RING_F_VMDQ].mask) * 8;
19036 + shift = find_first_bit(&mask, len);
19037 + reg_idx = (reg_idx & mask) >> shift;
19040 + * if VMDq is not active we must program one srrctl
19041 + * register per RSS queue since we have enabled
19044 + const int mask = feature[RING_F_RSS].mask;
19045 + reg_idx = reg_idx & mask;
19049 + case ixgbe_mac_82599EB:
19054 + srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
19056 + srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
19057 + srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
19058 + if (adapter->num_vfs)
19059 + srrctl |= IXGBE_SRRCTL_DROP_EN;
19061 + srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
19062 + IXGBE_SRRCTL_BSIZEHDR_MASK;
19064 + if (ring_is_ps_enabled(rx_ring)) {
19065 +#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
19066 + srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
19068 + srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
19070 + srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
19072 + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
19073 + IXGBE_SRRCTL_BSIZEPKT_SHIFT;
19074 + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
19077 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
19080 +static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
19082 + struct ixgbe_hw *hw = &adapter->hw;
19083 + static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
19084 + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
19085 + 0x6A3E67EA, 0x14364D17, 0x3BED200D};
19086 + u32 mrqc = 0, reta = 0;
19091 + /* Fill out hash function seeds */
19092 + for (i = 0; i < 10; i++)
19093 + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
19095 + /* Fill out redirection table */
19096 + for (i = 0, j = 0; i < 128; i++, j++) {
19097 + if (j == adapter->ring_feature[RING_F_RSS].indices)
19099 + /* reta = 4-byte sliding window of
19100 + * 0x00..(indices-1)(indices-1)00..etc. */
19101 + reta = (reta << 8) | (j * 0x11);
19102 + if ((i & 3) == 3)
19103 + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
19106 + /* Disable indicating checksum in descriptor, enables RSS hash */
19107 + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
19108 + rxcsum |= IXGBE_RXCSUM_PCSD;
19109 + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
19111 + if (adapter->hw.mac.type == ixgbe_mac_82598EB)
19112 + mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
19114 + mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
19115 + | IXGBE_FLAG_DCB_ENABLED
19116 + | IXGBE_FLAG_VMDQ_ENABLED
19117 + | IXGBE_FLAG_SRIOV_ENABLED
19121 + case (IXGBE_FLAG_RSS_ENABLED):
19122 + mrqc = IXGBE_MRQC_RSSEN;
19124 + case (IXGBE_FLAG_SRIOV_ENABLED):
19125 + mrqc = IXGBE_MRQC_VMDQEN;
19127 + case (IXGBE_FLAG_VMDQ_ENABLED):
19128 + case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED):
19129 + mrqc = IXGBE_MRQC_VMDQEN;
19131 + case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
19132 + if (adapter->ring_feature[RING_F_RSS].indices == 4)
19133 + mrqc = IXGBE_MRQC_VMDQRSS32EN;
19134 + else if (adapter->ring_feature[RING_F_RSS].indices == 2)
19135 + mrqc = IXGBE_MRQC_VMDQRSS64EN;
19137 + mrqc = IXGBE_MRQC_VMDQEN;
19139 + case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
19140 + case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_VMDQ_ENABLED
19141 + | IXGBE_FLAG_SRIOV_ENABLED):
19142 + mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
19144 + case (IXGBE_FLAG_DCB_ENABLED):
19145 + mrqc = IXGBE_MRQC_RT8TCEN;
19151 + /* Perform hash on these packet types */
19152 + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
19153 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
19154 + | IXGBE_MRQC_RSS_FIELD_IPV6
19155 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
19157 + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
19161 + * ixgbe_configure_rscctl - enable RSC for the indicated ring
19162 + * @adapter: address of board private structure
19163 + * @ring: structure containing ring specific data
19165 +void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
19166 + struct ixgbe_ring *ring)
19168 + struct ixgbe_hw *hw = &adapter->hw;
19171 + u8 reg_idx = ring->reg_idx;
19173 + if (!ring_is_rsc_enabled(ring))
19176 + rx_buf_len = ring->rx_buf_len;
19177 + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
19178 + rscctrl |= IXGBE_RSCCTL_RSCEN;
19180 + * we must limit the number of descriptors so that
19181 + * the total size of max desc * buf_len is not greater
19184 + if (ring_is_ps_enabled(ring)) {
19185 +#if (MAX_SKB_FRAGS > 16)
19186 + rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
19187 +#elif (MAX_SKB_FRAGS > 8)
19188 + rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
19189 +#elif (MAX_SKB_FRAGS > 4)
19190 + rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
19192 + rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
19195 + if (rx_buf_len < IXGBE_RXBUFFER_4096)
19196 + rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
19197 + else if (rx_buf_len < IXGBE_RXBUFFER_8192)
19198 + rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
19200 + rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
19203 + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
19207 + * ixgbe_clear_rscctl - disable RSC for the indicated ring
19208 + * @adapter: address of board private structure
19209 + * @ring: structure containing ring specific data
19211 +void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
19212 + struct ixgbe_ring *ring)
19214 + struct ixgbe_hw *hw = &adapter->hw;
19216 + u8 reg_idx = ring->reg_idx;
19218 + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
19219 + rscctrl &= ~IXGBE_RSCCTL_RSCEN;
19220 + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
19222 + clear_ring_rsc_enabled(ring);
19226 + * ixgbe_set_uta - Set unicast filter table address
19227 + * @adapter: board private structure
19229 + * The unicast table address is a register array of 32-bit registers.
19230 + * The table is meant to be used in a way similar to how the MTA is used
19231 + * however due to certain limitations in the hardware it is necessary to
19232 + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
19233 + * enable bit to allow vlan tag stripping when promiscuous mode is enabled
19235 +static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
19237 + struct ixgbe_hw *hw = &adapter->hw;
19240 + /* The UTA table only exists on 82599 hardware and newer */
19241 + if (hw->mac.type < ixgbe_mac_82599EB)
19244 + /* we only need to do this if VMDq is enabled */
19245 + if (!(adapter->flags &
19246 + (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED)))
19249 + for (i = 0; i < 128; i++)
19250 + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
19253 +static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
19254 + struct ixgbe_ring *ring)
19256 + struct ixgbe_hw *hw = &adapter->hw;
19257 + int wait_loop = IXGBE_MAX_RX_DESC_POLL;
19259 + u8 reg_idx = ring->reg_idx;
19261 + /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
19262 + if (hw->mac.type == ixgbe_mac_82598EB &&
19263 + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
19268 + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
19269 + } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
19271 + if (!wait_loop) {
19272 + DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
19273 + "not set within the polling period\n", reg_idx);
19277 +void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
19278 + struct ixgbe_ring *ring)
19280 + struct ixgbe_hw *hw = &adapter->hw;
19281 + u64 rdba = ring->dma;
19283 + u8 reg_idx = ring->reg_idx;
19285 + /* disable queue to avoid issues while updating state */
19286 + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
19287 + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
19288 + rxdctl & ~IXGBE_RXDCTL_ENABLE);
19289 + IXGBE_WRITE_FLUSH(hw);
19291 + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
19292 + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
19293 + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
19294 + ring->count * sizeof(union ixgbe_adv_rx_desc));
19295 + IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
19296 + IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
19297 + ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
19299 + ixgbe_configure_srrctl(adapter, ring);
19300 + ixgbe_configure_rscctl(adapter, ring);
19302 + if (hw->mac.type == ixgbe_mac_82598EB) {
19304 + * enable cache line friendly hardware writes:
19305 + * PTHRESH=32 descriptors (half the internal cache),
19306 + * this also removes ugly rx_no_buffer_count increment
19307 + * HTHRESH=4 descriptors (to minimize latency on fetch)
19308 + * WTHRESH=8 burst writeback up to two cache lines
19310 + rxdctl &= ~0x3FFFFF;
19311 + rxdctl |= 0x080420;
19314 + /* enable receive descriptor ring */
19315 + rxdctl |= IXGBE_RXDCTL_ENABLE;
19316 + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
19318 + ixgbe_rx_desc_queue_enable(adapter, ring);
19319 + ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
19322 +static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
19324 + struct ixgbe_hw *hw = &adapter->hw;
19327 + /* PSRTYPE must be initialized in non 82598 adapters */
19328 + u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
19329 + IXGBE_PSRTYPE_UDPHDR |
19330 + IXGBE_PSRTYPE_IPV4HDR |
19331 + IXGBE_PSRTYPE_L2HDR |
19332 + IXGBE_PSRTYPE_IPV6HDR;
19334 + if (hw->mac.type == ixgbe_mac_82598EB)
19337 + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
19338 + psrtype |= (adapter->num_rx_queues_per_pool << 29);
19340 + for (p = 0; p < adapter->num_rx_pools; p++)
19341 + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), psrtype);
19344 +static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
19346 + struct ixgbe_hw *hw = &adapter->hw;
19353 + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED ||
19354 + adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
19357 + switch (hw->mac.type) {
19358 + case ixgbe_mac_82598EB:
19359 + vt_reg = IXGBE_VMD_CTL;
19360 + vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN;
19361 + vmdctl = IXGBE_READ_REG(hw, vt_reg);
19362 + IXGBE_WRITE_REG(hw, vt_reg, vmdctl | vt_reg_bits);
19364 + case ixgbe_mac_82599EB:
19365 + vt_reg = IXGBE_VT_CTL;
19366 + vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
19367 + | IXGBE_VT_CTL_REPLEN;
19368 + if (adapter->num_vfs) {
19369 + vt_reg_bits &= ~IXGBE_VT_CTL_POOL_MASK;
19370 + vt_reg_bits |= (adapter->num_vfs <<
19371 + IXGBE_VT_CTL_POOL_SHIFT);
19373 + vmdctl = IXGBE_READ_REG(hw, vt_reg);
19374 + IXGBE_WRITE_REG(hw, vt_reg, vmdctl | vt_reg_bits);
19375 + for (pool = 1; pool < adapter->num_rx_pools; pool++) {
19377 + int vmdq_pool = VMDQ_P(pool);
19380 + * accept untagged packets until a vlan tag
19381 + * is specifically set for the VMDQ queue/pool
19383 + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vmdq_pool));
19384 + vmolr |= IXGBE_VMOLR_AUPE;
19385 + vmolr |= IXGBE_VMOLR_BAM;
19386 + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vmdq_pool), vmolr);
19388 + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0xFFFFFFFF);
19389 + IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0xFFFFFFFF);
19390 + IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0xFFFFFFFF);
19391 + IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0xFFFFFFFF);
19397 + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
19400 + /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
19401 + hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
19404 + * Set up VF register offsets for selected VT Mode,
19405 + * i.e. 32 or 64 VFs for SR-IOV
19407 + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
19408 + gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
19409 + gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
19410 + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
19412 + /* enable Tx loopback for VF/PF communication */
19413 + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
19416 +static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
19418 + struct ixgbe_hw *hw = &adapter->hw;
19419 + struct net_device *netdev = adapter->netdev;
19420 + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
19422 + struct ixgbe_ring *rx_ring;
19424 + u32 mhadd, hlreg0;
19426 + /* Decide whether to use packet split mode or not */
19427 + if (netdev->mtu > ETH_DATA_LEN) {
19428 + if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
19429 + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
19431 + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
19433 + if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
19434 + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
19436 + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
19439 + /* Set the RX buffer length according to the mode */
19440 + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
19441 + rx_buf_len = IXGBE_RX_HDR_SIZE;
19443 + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
19444 + (netdev->mtu <= ETH_DATA_LEN))
19445 + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
19447 + rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
19451 + /* adjust max frame to be able to do baby jumbo for FCoE */
19452 + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
19453 + (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
19454 + max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
19456 +#endif /* IXGBE_FCOE */
19457 + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
19458 + if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
19459 + mhadd &= ~IXGBE_MHADD_MFS_MASK;
19460 + mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
19462 + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
19465 + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
19466 + /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
19467 + hlreg0 |= IXGBE_HLREG0_JUMBOEN;
19468 + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
19471 + * Setup the HW Rx Head and Tail Descriptor Pointers and
19472 + * the Base and Length of the Rx Descriptor Ring
19474 + for (i = 0; i < adapter->num_rx_queues; i++) {
19475 + rx_ring = adapter->rx_ring[i];
19476 + rx_ring->rx_buf_len = rx_buf_len;
19478 + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
19479 + set_ring_ps_enabled(rx_ring);
19481 + clear_ring_ps_enabled(rx_ring);
19483 + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
19484 + set_ring_rsc_enabled(rx_ring);
19485 +#ifndef IXGBE_NO_LRO
19486 + clear_ring_lro_enabled(rx_ring);
19487 + } else if (adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED) {
19488 + set_ring_lro_enabled(rx_ring);
19489 + clear_ring_rsc_enabled(rx_ring);
19491 + clear_ring_lro_enabled(rx_ring);
19493 + clear_ring_rsc_enabled(rx_ring);
19497 + if (netdev->features & NETIF_F_FCOE_MTU)
19499 + struct ixgbe_ring_feature *f;
19500 + f = &adapter->ring_feature[RING_F_FCOE];
19501 + if ((i >= f->mask) && (i < f->mask + f->indices)) {
19502 + clear_ring_ps_enabled(rx_ring);
19503 + if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
19504 + rx_ring->rx_buf_len =
19505 + IXGBE_FCOE_JUMBO_FRAME_SIZE;
19506 + } else if (!ring_is_rsc_enabled(rx_ring) &&
19507 + !ring_is_ps_enabled(rx_ring)) {
19508 + rx_ring->rx_buf_len =
19509 + IXGBE_FCOE_JUMBO_FRAME_SIZE;
19512 +#endif /* IXGBE_FCOE */
19516 +static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
19518 + struct ixgbe_hw *hw = &adapter->hw;
19519 + u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
19521 + switch (hw->mac.type) {
19522 + case ixgbe_mac_82598EB:
19524 + * For VMDq support of different descriptor types or
19525 + * buffer sizes through the use of multiple SRRCTL
19526 + * registers, RDRXCTL.MVMEN must be set to 1
19528 + * also, the manual doesn't mention it clearly but DCA hints
19529 + * will only use queue 0's tags unless this bit is set. Side
19530 + * effects of setting this bit are only that SRRCTL must be
19531 + * fully programmed [0..15]
19533 + rdrxctl |= IXGBE_RDRXCTL_MVMEN;
19535 + case ixgbe_mac_82599EB:
19536 + /* Disable RSC for ACK packets */
19537 + IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
19538 + (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
19539 + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
19540 + /* hardware requires some bits to be set by default */
19541 + rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
19542 + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
19545 + /* We should do nothing since we don't know this hardware */
19549 + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
19553 + * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
19554 + * @adapter: board private structure
19556 + * Configure the Rx unit of the MAC after a reset.
19558 +static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
19560 + struct ixgbe_hw *hw = &adapter->hw;
19564 + /* disable receives while setting up the descriptors */
19565 + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
19566 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
19568 + ixgbe_setup_psrtype(adapter);
19569 + ixgbe_setup_rdrxctl(adapter);
19571 + /* Program registers for the distribution of queues */
19572 + ixgbe_setup_mrqc(adapter);
19574 + ixgbe_set_uta(adapter);
19576 + /* set_rx_buffer_len must be called before ring initialization */
19577 + ixgbe_set_rx_buffer_len(adapter);
19580 + * Setup the HW Rx Head and Tail Descriptor Pointers and
19581 + * the Base and Length of the Rx Descriptor Ring
19583 + for (i = 0; i < adapter->num_rx_queues; i++)
19584 + ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
19586 + /* disable drop enable for 82598 parts */
19587 + if (hw->mac.type == ixgbe_mac_82598EB)
19588 + rxctrl |= IXGBE_RXCTRL_DMBYPS;
19590 + /* enable all receives */
19591 + rxctrl |= IXGBE_RXCTRL_RXEN;
19592 + ixgbe_enable_rx_dma(hw, rxctrl);
19596 +#ifdef NETIF_F_HW_VLAN_TX
19597 +static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
19599 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
19600 + struct ixgbe_hw *hw = &adapter->hw;
19601 + int pool_ndx = adapter->num_vfs;
19602 +#ifndef HAVE_NETDEV_VLAN_FEATURES
19603 + struct net_device *v_netdev;
19604 +#endif /* HAVE_NETDEV_VLAN_FEATURES */
19607 + /* add VID to filter table */
19608 + if (hw->mac.ops.set_vfta) {
19609 + hw->mac.ops.set_vfta(hw, vid, pool_ndx, true);
19610 + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
19611 + switch (adapter->hw.mac.type) {
19612 + case ixgbe_mac_82599EB:
19613 + /* enable vlan id for all pools */
19614 + for (i = 1; i < adapter->num_rx_pools; i++) {
19615 + hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), true);
19624 +#ifndef HAVE_NETDEV_VLAN_FEATURES
19626 + * Copy feature flags from netdev to the vlan netdev for this vid.
19627 + * This allows things like TSO to bubble down to our vlan device.
19628 + * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so
19629 + * we will not have a netdev that needs updating.
19631 + if (adapter->vlgrp) {
19632 + v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
19634 + v_netdev->features |= adapter->netdev->features;
19635 + vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
19638 +#endif /* HAVE_NETDEV_VLAN_FEATURES */
19641 +static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
19643 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
19644 + struct ixgbe_hw *hw = &adapter->hw;
19645 + int pool_ndx = adapter->num_vfs;
19648 + /* User is not allowed to remove vlan ID 0 */
19652 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
19653 + ixgbe_irq_disable(adapter);
19655 + vlan_group_set_device(adapter->vlgrp, vid, NULL);
19657 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
19658 + ixgbe_irq_enable(adapter, true, true);
19660 + /* remove VID from filter table */
19662 + if (hw->mac.ops.set_vfta) {
19663 + hw->mac.ops.set_vfta(hw, vid, pool_ndx, false);
19664 + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
19665 + switch (adapter->hw.mac.type) {
19666 + case ixgbe_mac_82599EB:
19667 + /* remove vlan id from all pools */
19668 + for (i = 1; i < adapter->num_rx_pools; i++) {
19669 + hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), false);
19681 + * ixgbe_vlan_stripping_disable - helper to disable vlan tag stripping
19682 + * @adapter: driver data
19684 +static void ixgbe_vlan_stripping_disable(struct ixgbe_adapter *adapter)
19686 + struct ixgbe_hw *hw = &adapter->hw;
19690 + /* leave vlan tag stripping enabled for DCB */
19691 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
19694 + switch (hw->mac.type) {
19695 + case ixgbe_mac_82598EB:
19696 + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
19697 + vlnctrl &= ~IXGBE_VLNCTRL_VME;
19698 + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
19700 + case ixgbe_mac_82599EB:
19701 + for (i = 0; i < adapter->num_rx_queues; i++) {
19702 + u8 reg_idx = adapter->rx_ring[i]->reg_idx;
19703 + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
19704 + vlnctrl &= ~IXGBE_RXDCTL_VME;
19705 + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl);
19714 + * ixgbe_vlan_stripping_enable - helper to enable vlan tag stripping
19715 + * @adapter: driver data
19717 +static void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter)
19719 + struct ixgbe_hw *hw = &adapter->hw;
19723 + switch (hw->mac.type) {
19724 + case ixgbe_mac_82598EB:
19725 + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
19726 + vlnctrl |= IXGBE_VLNCTRL_VME;
19727 + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
19729 + case ixgbe_mac_82599EB:
19730 + for (i = 0; i < adapter->num_rx_queues; i++) {
19731 + u8 reg_idx = adapter->rx_ring[i]->reg_idx;
19732 + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
19733 + vlnctrl |= IXGBE_RXDCTL_VME;
19734 + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl);
19742 +#ifdef NETIF_F_HW_VLAN_TX
19743 +static void ixgbe_vlan_rx_register(struct net_device *netdev,
19744 + struct vlan_group *grp)
19746 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
19748 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
19749 + ixgbe_irq_disable(adapter);
19750 + adapter->vlgrp = grp;
19752 + if (grp || (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
19753 + /* enable VLAN tag insert/strip */
19754 + ixgbe_vlan_stripping_enable(adapter);
19756 + /* disable VLAN tag insert/strip */
19757 + ixgbe_vlan_stripping_disable(adapter);
19759 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
19760 + ixgbe_irq_enable(adapter, true, true);
19763 +static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
19765 + ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
19767 + /* add vlan ID 0 so we always accept priority-tagged traffic */
19768 + ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
19770 + if (adapter->vlgrp) {
19772 + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
19773 + if (!vlan_group_get_device(adapter->vlgrp, vid))
19775 + ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
19781 +static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
19783 +#ifdef NETDEV_HW_ADDR_T_MULTICAST
19784 + struct netdev_hw_addr *mc_ptr;
19786 + struct dev_mc_list *mc_ptr;
19788 + struct ixgbe_adapter *adapter = hw->back;
19789 + u8 *addr = *mc_addr_ptr;
19791 + *vmdq = adapter->num_vfs;
19793 +#ifdef NETDEV_HW_ADDR_T_MULTICAST
19794 + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]);
19795 + if (mc_ptr->list.next) {
19796 + struct netdev_hw_addr *ha;
19798 + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list);
19799 + *mc_addr_ptr = ha->addr;
19802 + mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
19803 + if (mc_ptr->next)
19804 + *mc_addr_ptr = mc_ptr->next->dmi_addr;
19807 + *mc_addr_ptr = NULL;
19813 + * ixgbe_write_mc_addr_list - write multicast addresses to MTA
19814 + * @netdev: network interface device structure
19816 + * Writes multicast address list to the MTA hash table.
19817 + * Returns: -ENOMEM on failure
19818 + * 0 on no addresses written
19819 + * X on writing X addresses to MTA
19821 +static int ixgbe_write_mc_addr_list(struct net_device *netdev)
19823 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
19824 + struct ixgbe_hw *hw = &adapter->hw;
19825 +#ifdef NETDEV_HW_ADDR_T_MULTICAST
19826 + struct netdev_hw_addr *ha;
19828 + u8 *addr_list = NULL;
19831 + if (netdev_mc_empty(netdev)) {
19832 + /* nothing to program, so clear mc list */
19833 + hw->mac.ops.update_mc_addr_list(hw, NULL, 0, ixgbe_addr_list_itr);
19837 + if (!hw->mac.ops.update_mc_addr_list)
19840 +#ifdef NETDEV_HW_ADDR_T_MULTICAST
19841 + ha = list_first_entry(&netdev->mc.list, struct netdev_hw_addr, list);
19842 + addr_list = ha->addr;
19844 + addr_list = netdev->mc_list->dmi_addr;
19846 + addr_count = netdev_mc_count(netdev);
19848 + hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, ixgbe_addr_list_itr);
19850 + return addr_count;
19853 +#ifdef HAVE_SET_RX_MODE
19855 + * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
19856 + * @netdev: network interface device structure
19858 + * Writes unicast address list to the RAR table.
19859 + * Returns: -ENOMEM on failure/insufficient address space
19860 + * 0 on no addresses written
19861 + * X on writing X addresses to the RAR table
19863 +static int ixgbe_write_uc_addr_list(struct net_device *netdev)
19865 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
19866 + struct ixgbe_hw *hw = &adapter->hw;
19867 + unsigned int vfn = adapter->num_vfs;
19868 + unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
19871 + /* return ENOMEM indicating insufficient memory for addresses */
19872 + if (netdev_uc_count(netdev) > rar_entries)
19875 + if (!netdev_uc_empty(netdev) && rar_entries) {
19876 +#ifdef NETDEV_HW_ADDR_T_UNICAST
19877 + struct netdev_hw_addr *ha;
19879 + struct dev_mc_list *ha;
19881 + /* return error if we do not support writing to RAR table */
19882 + if (!hw->mac.ops.set_rar)
19885 + netdev_for_each_uc_addr(ha, netdev) {
19886 + if (!rar_entries)
19888 +#ifdef NETDEV_HW_ADDR_T_UNICAST
19889 + hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
19890 + vfn, IXGBE_RAH_AV);
19892 + hw->mac.ops.set_rar(hw, rar_entries--, ha->da_addr,
19893 + vfn, IXGBE_RAH_AV);
19898 + /* write the addresses in reverse order to avoid write combining */
19899 + for (; rar_entries > 0 ; rar_entries--)
19900 + hw->mac.ops.clear_rar(hw, rar_entries);
19907 + * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
19908 + * @netdev: network interface device structure
19910 + * The set_rx_method entry point is called whenever the unicast/multicast
19911 + * address list or the network interface flags are updated. This routine is
19912 + * responsible for configuring the hardware for proper unicast, multicast and
19913 + * promiscuous mode.
19915 +void ixgbe_set_rx_mode(struct net_device *netdev)
19917 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
19918 + struct ixgbe_hw *hw = &adapter->hw;
19919 + u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
19923 + /* Check for Promiscuous and All Multicast modes */
19924 + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
19925 + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
19927 + /* set all bits that we expect to always be set */
19928 + fctrl |= IXGBE_FCTRL_BAM;
19929 + fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
19930 + fctrl |= IXGBE_FCTRL_PMCF;
19932 + /* clear the bits we are changing the status of */
19933 + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
19934 + vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
19936 + if (netdev->flags & IFF_PROMISC) {
19937 + hw->addr_ctrl.user_set_promisc = true;
19938 + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
19939 + vmolr |= IXGBE_VMOLR_MPE;
19941 + if (netdev->flags & IFF_ALLMULTI) {
19942 + fctrl |= IXGBE_FCTRL_MPE;
19943 + vmolr |= IXGBE_VMOLR_MPE;
19946 + * Write addresses to the MTA, if the attempt fails
19947 + * then we should just turn on promiscous mode so
19948 + * that we can at least receive multicast traffic
19950 + count = ixgbe_write_mc_addr_list(netdev);
19952 + fctrl |= IXGBE_FCTRL_MPE;
19953 + vmolr |= IXGBE_VMOLR_MPE;
19954 + } else if (count) {
19955 + vmolr |= IXGBE_VMOLR_ROMPE;
19958 +#ifdef NETIF_F_HW_VLAN_TX
19959 + /* enable hardware vlan filtering */
19960 + vlnctrl |= IXGBE_VLNCTRL_VFE;
19962 + hw->addr_ctrl.user_set_promisc = false;
19963 +#ifdef HAVE_SET_RX_MODE
19965 + * Write addresses to available RAR registers, if there is not
19966 + * sufficient space to store all the addresses then enable
19967 + * unicast promiscous mode
19969 + count = ixgbe_write_uc_addr_list(netdev);
19971 + fctrl |= IXGBE_FCTRL_UPE;
19972 + vmolr |= IXGBE_VMOLR_ROPE;
19977 +#ifdef CONFIG_PCI_IOV
19978 + if (adapter->num_vfs)
19979 + ixgbe_restore_vf_multicasts(adapter);
19982 + if (hw->mac.type != ixgbe_mac_82598EB) {
19983 + vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
19984 + ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
19985 + IXGBE_VMOLR_ROPE);
19986 + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
19989 + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
19990 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
19993 +static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
19995 +#ifdef CONFIG_IXGBE_NAPI
19997 + struct ixgbe_q_vector *q_vector;
19998 + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
20000 + /* legacy and MSI only use one vector */
20001 + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
20004 + for (q_idx = 0; q_idx < q_vectors; q_idx++) {
20005 + struct napi_struct *napi;
20006 + q_vector = adapter->q_vector[q_idx];
20007 + napi = &q_vector->napi;
20008 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
20009 + if (!q_vector->rxr_count || !q_vector->txr_count) {
20010 + if (q_vector->txr_count == 1)
20011 + napi->poll = &ixgbe_clean_txonly;
20012 + else if (q_vector->rxr_count == 1)
20013 + napi->poll = &ixgbe_clean_rxonly;
20017 + napi_enable(napi);
20019 +#endif /* CONFIG_IXGBE_NAPI */
20022 +static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
20024 +#ifdef CONFIG_IXGBE_NAPI
20026 + struct ixgbe_q_vector *q_vector;
20027 + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
20029 + /* legacy and MSI only use one vector */
20030 + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
20033 + for (q_idx = 0; q_idx < q_vectors; q_idx++) {
20034 + q_vector = adapter->q_vector[q_idx];
20035 + napi_disable(&q_vector->napi);
20041 + * ixgbe_configure_dcb - Configure DCB hardware
20042 + * @adapter: ixgbe adapter struct
20044 + * This is called by the driver on open to configure the DCB hardware.
20045 + * This is also called by the gennetlink interface when reconfiguring
20048 +static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
20050 + struct ixgbe_hw *hw = &adapter->hw;
20052 + u32 mtu = adapter->netdev->mtu;
20054 + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
20055 + if (hw->mac.type == ixgbe_mac_82598EB)
20056 + netif_set_gso_max_size(adapter->netdev, 65536);
20060 + if (hw->mac.type == ixgbe_mac_82598EB)
20061 + netif_set_gso_max_size(adapter->netdev, 32768);
20064 + if (adapter->netdev->features & NETIF_F_FCOE_MTU)
20065 + mtu = max(mtu, (unsigned) IXGBE_FCOE_JUMBO_FRAME_SIZE);
20068 + adapter->dcb_cfg.num_tcs.pg_tcs = adapter->ring_feature[RING_F_DCB].indices;
20069 + err = ixgbe_dcb_check_config(&adapter->dcb_cfg);
20071 + DPRINTK(DRV, ERR, "err in dcb_check_config\n");
20072 + err = ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, mtu,
20075 + DPRINTK(DRV, ERR, "err in dcb_calculate_tc_credits (TX)\n");
20076 + err = ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, mtu,
20079 + DPRINTK(DRV, ERR, "err in dcb_calculate_tc_credits (RX)\n");
20081 + /* reconfigure the hardware */
20082 + ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
20085 +#ifndef IXGBE_NO_LLI
20086 +static void ixgbe_configure_lli_82599(struct ixgbe_adapter *adapter)
20090 + if (adapter->lli_etype) {
20091 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
20092 + (IXGBE_IMIR_LLI_EN_82599 | IXGBE_IMIR_SIZE_BP_82599 |
20093 + IXGBE_IMIR_CTRL_BP_82599));
20094 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQS(0), IXGBE_ETQS_LLI);
20095 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQF(0),
20096 + (adapter->lli_etype | IXGBE_ETQF_FILTER_EN));
20099 + if (adapter->lli_port) {
20100 + port = ntohs((u16)adapter->lli_port);
20101 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
20102 + (IXGBE_IMIR_LLI_EN_82599 | IXGBE_IMIR_SIZE_BP_82599 |
20103 + IXGBE_IMIR_CTRL_BP_82599));
20104 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0),
20105 + (IXGBE_FTQF_POOL_MASK_EN |
20106 + (IXGBE_FTQF_PRIORITY_MASK <<
20107 + IXGBE_FTQF_PRIORITY_SHIFT) |
20108 + (IXGBE_FTQF_DEST_PORT_MASK <<
20109 + IXGBE_FTQF_5TUPLE_MASK_SHIFT)));
20110 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SDPQF(0), (port << 16));
20113 + if (adapter->flags & IXGBE_FLAG_LLI_PUSH) {
20114 + switch (adapter->hw.mac.type) {
20115 + case ixgbe_mac_82599EB:
20116 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
20117 + (IXGBE_IMIR_LLI_EN_82599 | IXGBE_IMIR_SIZE_BP_82599 |
20118 + IXGBE_IMIR_CTRL_PSH_82599 | IXGBE_IMIR_CTRL_SYN_82599 |
20119 + IXGBE_IMIR_CTRL_URG_82599 | IXGBE_IMIR_CTRL_ACK_82599 |
20120 + IXGBE_IMIR_CTRL_RST_82599 | IXGBE_IMIR_CTRL_FIN_82599));
20121 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, 0xfc000000);
20126 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0),
20127 + (IXGBE_FTQF_POOL_MASK_EN |
20128 + (IXGBE_FTQF_PRIORITY_MASK <<
20129 + IXGBE_FTQF_PRIORITY_SHIFT) |
20130 + (IXGBE_FTQF_5TUPLE_MASK_MASK <<
20131 + IXGBE_FTQF_5TUPLE_MASK_SHIFT)));
20133 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYNQF, 0x80000100);
20136 + if (adapter->lli_size) {
20137 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0),
20138 + (IXGBE_IMIR_LLI_EN_82599 | IXGBE_IMIR_CTRL_BP_82599));
20139 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, adapter->lli_size);
20140 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0),
20141 + (IXGBE_FTQF_POOL_MASK_EN |
20142 + (IXGBE_FTQF_PRIORITY_MASK <<
20143 + IXGBE_FTQF_PRIORITY_SHIFT) |
20144 + (IXGBE_FTQF_5TUPLE_MASK_MASK <<
20145 + IXGBE_FTQF_5TUPLE_MASK_SHIFT)));
20148 + if (adapter->lli_vlan_pri) {
20149 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIRVP,
20150 + (IXGBE_IMIRVP_PRIORITY_EN | adapter->lli_vlan_pri));
20154 +static void ixgbe_configure_lli(struct ixgbe_adapter *adapter)
20158 + /* lli should only be enabled with MSI-X and MSI */
20159 + if (!(adapter->flags & IXGBE_FLAG_MSI_ENABLED) &&
20160 + !(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
20163 + if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
20164 + ixgbe_configure_lli_82599(adapter);
20168 + if (adapter->lli_port) {
20169 + /* use filter 0 for port */
20170 + port = ntohs((u16)adapter->lli_port);
20171 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(0),
20172 + (port | IXGBE_IMIR_PORT_IM_EN));
20173 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(0),
20174 + (IXGBE_IMIREXT_SIZE_BP |
20175 + IXGBE_IMIREXT_CTRL_BP));
20178 + if (adapter->flags & IXGBE_FLAG_LLI_PUSH) {
20179 + /* use filter 1 for push flag */
20180 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(1),
20181 + (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN));
20182 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(1),
20183 + (IXGBE_IMIREXT_SIZE_BP |
20184 + IXGBE_IMIREXT_CTRL_PSH));
20187 + if (adapter->lli_size) {
20188 + /* use filter 2 for size */
20189 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(2),
20190 + (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN));
20191 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(2),
20192 + (adapter->lli_size | IXGBE_IMIREXT_CTRL_BP));
20196 +#endif /* IXGBE_NO_LLI */
20197 +static void ixgbe_configure(struct ixgbe_adapter *adapter)
20199 + ixgbe_set_rx_mode(adapter->netdev);
20201 +#ifdef NETIF_F_HW_VLAN_TX
20202 + ixgbe_restore_vlan(adapter);
20204 + ixgbe_configure_dcb(adapter);
20207 + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
20208 + ixgbe_configure_fcoe(adapter);
20210 +#endif /* IXGBE_FCOE */
20211 + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
20212 + ixgbe_init_fdir_signature_82599(&adapter->hw,
20213 + adapter->fdir_pballoc);
20214 + else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
20215 + ixgbe_init_fdir_perfect_82599(&adapter->hw,
20216 + adapter->fdir_pballoc);
20217 + ixgbe_configure_virtualization(adapter);
20219 + ixgbe_configure_tx(adapter);
20220 + ixgbe_configure_rx(adapter);
20223 +static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
20225 + switch (hw->phy.type) {
20226 + case ixgbe_phy_sfp_avago:
20227 + case ixgbe_phy_sfp_ftl:
20228 + case ixgbe_phy_sfp_intel:
20229 + case ixgbe_phy_sfp_unknown:
20230 + case ixgbe_phy_sfp_passive_tyco:
20231 + case ixgbe_phy_sfp_passive_unknown:
20232 + case ixgbe_phy_sfp_active_unknown:
20233 + case ixgbe_phy_sfp_ftl_active:
20241 + * ixgbe_sfp_link_config - set up SFP+ link
20242 + * @adapter: pointer to private adapter struct
20244 +static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
20246 + struct ixgbe_hw *hw = &adapter->hw;
20248 + if (hw->phy.multispeed_fiber) {
20250 + * In multispeed fiber setups, the device may not have
20251 + * had a physical connection when the driver loaded.
20252 + * If that's the case, the initial link configuration
20253 + * couldn't get the MAC into 10G or 1G mode, so we'll
20254 + * never have a link status change interrupt fire.
20255 + * We need to try and force an autonegotiation
20256 + * session, then bring up link.
20258 + hw->mac.ops.setup_sfp(hw);
20259 + if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
20260 + schedule_work(&adapter->multispeed_fiber_task);
20263 + * Direct Attach Cu and non-multispeed fiber modules
20264 + * still need to be configured properly prior to
20265 + * attempting link.
20267 + if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
20268 + schedule_work(&adapter->sfp_config_module_task);
20273 + * ixgbe_non_sfp_link_config - set up non-SFP+ link
20274 + * @hw: pointer to private hardware struct
20276 + * Returns 0 on success, negative on failure
20278 +static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
20281 + bool negotiation, link_up = false;
20282 + u32 ret = IXGBE_ERR_LINK_SETUP;
20284 + if (hw->mac.ops.check_link)
20285 + ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
20288 + goto link_cfg_out;
20290 + autoneg = hw->phy.autoneg_advertised;
20291 + if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
20292 + ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
20295 + goto link_cfg_out;
20297 + if (hw->mac.ops.setup_link)
20298 + ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
20304 + * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
20305 + * @adapter: board private structure
20307 + * On a reset we need to clear out the VF stats or accounting gets
20308 + * messed up because they're not clear on read.
20310 +void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter)
20312 + struct ixgbe_hw *hw = &adapter->hw;
20315 + for(i = 0; i < adapter->num_vfs; i++) {
20316 + adapter->vfinfo[i].last_vfstats.gprc =
20317 + IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i));
20318 + adapter->vfinfo[i].saved_rst_vfstats.gprc +=
20319 + adapter->vfinfo[i].vfstats.gprc;
20320 + adapter->vfinfo[i].vfstats.gprc = 0;
20321 + adapter->vfinfo[i].last_vfstats.gptc =
20322 + IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i));
20323 + adapter->vfinfo[i].saved_rst_vfstats.gptc +=
20324 + adapter->vfinfo[i].vfstats.gptc;
20325 + adapter->vfinfo[i].vfstats.gptc = 0;
20326 + adapter->vfinfo[i].last_vfstats.gorc =
20327 + IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i));
20328 + adapter->vfinfo[i].saved_rst_vfstats.gorc +=
20329 + adapter->vfinfo[i].vfstats.gorc;
20330 + adapter->vfinfo[i].vfstats.gorc = 0;
20331 + adapter->vfinfo[i].last_vfstats.gotc =
20332 + IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i));
20333 + adapter->vfinfo[i].saved_rst_vfstats.gotc +=
20334 + adapter->vfinfo[i].vfstats.gotc;
20335 + adapter->vfinfo[i].vfstats.gotc = 0;
20336 + adapter->vfinfo[i].last_vfstats.mprc =
20337 + IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i));
20338 + adapter->vfinfo[i].saved_rst_vfstats.mprc +=
20339 + adapter->vfinfo[i].vfstats.mprc;
20340 + adapter->vfinfo[i].vfstats.mprc = 0;
20344 +static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
20346 + struct ixgbe_hw *hw = &adapter->hw;
20349 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
20350 + gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
20352 +#ifdef CONFIG_IXGBE_NAPI
20353 + gpie |= IXGBE_GPIE_EIAME;
20355 + * use EIAM to auto-mask when MSI-X interrupt is asserted
20356 + * this saves a register write for every interrupt
20358 + switch (hw->mac.type) {
20359 + case ixgbe_mac_82598EB:
20360 + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
20363 + case ixgbe_mac_82599EB:
20364 + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
20365 + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
20369 + /* legacy interrupts, use EIAM to auto-mask when reading EICR,
20370 + * specifically only auto mask tx and rx interrupts */
20371 + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
20375 + /* XXX: to interrupt immediately for EICS writes, enable this */
20376 + /* gpie |= IXGBE_GPIE_EIMEN; */
20378 + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
20379 + gpie &= ~IXGBE_GPIE_VTMODE_MASK;
20380 + gpie |= IXGBE_GPIE_VTMODE_64;
20383 + /* Enable Thermal over heat sensor interrupt */
20384 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
20385 + gpie |= IXGBE_SDP0_GPIEN;
20387 + /* Enable fan failure interrupt */
20388 + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
20389 + gpie |= IXGBE_SDP1_GPIEN;
20391 + if (hw->mac.type == ixgbe_mac_82599EB)
20392 + gpie |= IXGBE_SDP1_GPIEN;
20393 + gpie |= IXGBE_SDP2_GPIEN;
20395 + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
20396 +#ifdef IXGBE_TCP_TIMER
20398 + if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
20399 + (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
20400 + u32 tcp_timer = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
20401 + tcp_timer |= IXGBE_TCPTIMER_DURATION_MASK;
20402 + tcp_timer |= (IXGBE_TCPTIMER_KS |
20403 + IXGBE_TCPTIMER_COUNT_ENABLE |
20404 + IXGBE_TCPTIMER_LOOP);
20405 + IXGBE_WRITE_REG(hw, IXGBE_TCPTIMER, tcp_timer);
20406 + tcp_timer = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
20411 +static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
20413 + struct ixgbe_hw *hw = &adapter->hw;
20417 + ixgbe_get_hw_control(adapter);
20418 + ixgbe_setup_gpie(adapter);
20420 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
20421 + ixgbe_configure_msix(adapter);
20423 + ixgbe_configure_msi_and_legacy(adapter);
20425 +#ifndef IXGBE_NO_LLI
20426 + ixgbe_configure_lli(adapter);
20429 + /* enable the optics */
20430 + if (hw->phy.multispeed_fiber)
20431 + ixgbe_enable_tx_laser(hw);
20433 + clear_bit(__IXGBE_DOWN, &adapter->state);
20434 + ixgbe_napi_enable_all(adapter);
20436 + if (ixgbe_is_sfp(hw)) {
20437 + ixgbe_sfp_link_config(adapter);
20439 + err = ixgbe_non_sfp_link_config(hw);
20441 + DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
20444 + /* clear any pending interrupts, may auto mask */
20445 + IXGBE_READ_REG(hw, IXGBE_EICR);
20446 + ixgbe_irq_enable(adapter, true, true);
20449 + * For hot-pluggable SFP+ devices, a SFP+ module may have arrived
20450 + * before interrupts were enabled but after probe. Such devices
20451 + * wouldn't have their type indentified yet. We need to kick off
20452 + * the SFP+ module setup first, then try to bring up link. If we're
20453 + * not hot-pluggable SFP+, we just need to configure link and bring
20456 + if (hw->phy.type == ixgbe_phy_none)
20457 + schedule_work(&adapter->sfp_config_module_task);
20459 + /* enable transmits */
20460 + netif_tx_start_all_queues(adapter->netdev);
20462 + /* bring the link up in the watchdog, this could race with our first
20463 + * link up interrupt but shouldn't be a problem */
20464 + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
20465 + adapter->link_check_timeout = jiffies;
20466 + mod_timer(&adapter->watchdog_timer, jiffies);
20468 + ixgbe_clear_vf_stats_counters(adapter);
20469 + /* Set PF Reset Done bit so PF/VF Mail Ops can work */
20470 + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
20471 + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
20472 + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
20477 +void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
20479 + WARN_ON(in_interrupt());
20480 + /* put off any impending NetWatchDogTimeout */
20481 + adapter->netdev->trans_start = jiffies;
20483 + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
20485 + ixgbe_down(adapter);
20487 + * If SR-IOV enabled then wait a bit before bringing the adapter
20488 + * back up to give the VFs time to respond to the reset. The
20489 + * two second wait is based upon the watchdog timer cycle in
20492 + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
20494 + ixgbe_up(adapter);
20495 + clear_bit(__IXGBE_RESETTING, &adapter->state);
20498 +int ixgbe_up(struct ixgbe_adapter *adapter)
20502 + ixgbe_configure(adapter);
20504 + err = ixgbe_up_complete(adapter);
20509 +void ixgbe_reset(struct ixgbe_adapter *adapter)
20511 + struct ixgbe_hw *hw = &adapter->hw;
20514 + err = hw->mac.ops.init_hw(hw);
20517 + case IXGBE_ERR_SFP_NOT_PRESENT:
20519 + case IXGBE_ERR_MASTER_REQUESTS_PENDING:
20520 + DPRINTK(HW, INFO, "master disable timed out\n");
20522 + case IXGBE_ERR_EEPROM_VERSION:
20523 + /* We are running on a pre-production device, log a warning */
20524 + DPRINTK(PROBE, INFO, "This device is a pre-production adapter/"
20525 + "LOM. Please be aware there may be issues associated "
20526 + "with your hardware. If you are experiencing problems "
20527 + "please contact your Intel or hardware representative "
20528 + "who provided you with this hardware.\n");
20531 + DPRINTK(PROBE, ERR, "Hardware Error: %d\n", err);
20534 + /* reprogram the RAR[0] in case user changed it. */
20535 + if (hw->mac.ops.set_rar)
20536 + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
20540 + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
20541 + * @rx_ring: ring to free buffers from
20543 +void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
20545 + struct device *dev = rx_ring->dev;
20546 + unsigned long size;
20549 + /* ring already cleared, nothing to do */
20550 + if (!rx_ring->rx_buffer_info)
20553 + /* Free all the Rx ring sk_buffs */
20554 + for (i = 0; i < rx_ring->count; i++) {
20555 + struct ixgbe_rx_buffer *rx_buffer_info;
20557 + rx_buffer_info = &rx_ring->rx_buffer_info[i];
20558 + if (rx_buffer_info->dma) {
20559 + dma_unmap_single(dev,
20560 + rx_buffer_info->dma,
20561 + rx_ring->rx_buf_len,
20562 + DMA_FROM_DEVICE);
20563 + rx_buffer_info->dma = 0;
20565 + if (rx_buffer_info->skb) {
20566 + struct sk_buff *skb = rx_buffer_info->skb;
20567 + rx_buffer_info->skb = NULL;
20569 + struct sk_buff *this = skb;
20570 + if (IXGBE_RSC_CB(this)->delay_unmap) {
20571 + dma_unmap_single(dev,
20572 + IXGBE_RSC_CB(this)->dma,
20573 + rx_ring->rx_buf_len,
20574 + DMA_FROM_DEVICE);
20575 + IXGBE_RSC_CB(skb)->dma = 0;
20576 + IXGBE_RSC_CB(skb)->delay_unmap = false;
20579 + dev_kfree_skb(this);
20582 + if (!rx_buffer_info->page)
20584 + if (rx_buffer_info->page_dma) {
20585 + dma_unmap_page(dev,
20586 + rx_buffer_info->page_dma,
20588 + DMA_FROM_DEVICE);
20591 + rx_buffer_info->page_dma = 0;
20592 + put_page(rx_buffer_info->page);
20593 + rx_buffer_info->page = NULL;
20594 + rx_buffer_info->page_offset = 0;
20597 + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
20598 + memset(rx_ring->rx_buffer_info, 0, size);
20600 + /* Zero out the descriptor ring */
20601 + memset(rx_ring->desc, 0, rx_ring->size);
20603 + rx_ring->next_to_clean = 0;
20604 + rx_ring->next_to_use = 0;
20608 + * ixgbe_clean_tx_ring - Free Tx Buffers
20609 + * @tx_ring: ring to be cleaned
20611 +static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
20613 + struct ixgbe_tx_buffer *tx_buffer_info;
20614 + unsigned long size;
20617 + /* ring already cleared, nothing to do */
20618 + if (!tx_ring->tx_buffer_info)
20621 + /* Free all the Tx ring sk_buffs */
20622 + for (i = 0; i < tx_ring->count; i++) {
20623 + tx_buffer_info = &tx_ring->tx_buffer_info[i];
20624 + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
20627 + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
20628 + memset(tx_ring->tx_buffer_info, 0, size);
20630 + /* Zero out the descriptor ring */
20631 + memset(tx_ring->desc, 0, tx_ring->size);
20633 + tx_ring->next_to_use = 0;
20634 + tx_ring->next_to_clean = 0;
20638 + * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
20639 + * @adapter: board private structure
20641 +static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
20645 + for (i = 0; i < adapter->num_rx_queues; i++)
20646 + ixgbe_clean_rx_ring(adapter->rx_ring[i]);
20650 + * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
20651 + * @adapter: board private structure
20653 +static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
20657 + for (i = 0; i < adapter->num_tx_queues; i++)
20658 + ixgbe_clean_tx_ring(adapter->tx_ring[i]);
20661 +void ixgbe_down(struct ixgbe_adapter *adapter)
20663 + struct net_device *netdev = adapter->netdev;
20664 + struct ixgbe_hw *hw = &adapter->hw;
20669 + /* signal that we are down to the interrupt handler */
20670 + set_bit(__IXGBE_DOWN, &adapter->state);
20672 + /* disable receives */
20673 + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
20674 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
20676 + IXGBE_WRITE_FLUSH(hw);
20679 + netif_tx_stop_all_queues(netdev);
20681 + /* call carrier off first to avoid false dev_watchdog timeouts */
20682 + netif_carrier_off(netdev);
20683 + netif_tx_disable(netdev);
20685 + ixgbe_irq_disable(adapter);
20687 + ixgbe_napi_disable_all(adapter);
20689 + clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
20690 + del_timer_sync(&adapter->sfp_timer);
20691 + del_timer_sync(&adapter->watchdog_timer);
20693 + /* disable receive for all VFs and wait one second */
20694 + if (adapter->num_vfs) {
20695 + /* Mark all the VFs as inactive */
20696 + for (i = 0 ; i < adapter->num_vfs; i++)
20697 + adapter->vfinfo[i].clear_to_send = 0;
20699 + /* ping all the active vfs to let them know we are going down */
20700 + ixgbe_ping_all_vfs(adapter);
20702 + /* Disable all VFTE/VFRE TX/RX */
20703 + ixgbe_disable_tx_rx(adapter);
20706 + /* disable transmits in the hardware now that interrupts are off */
20707 + for (i = 0; i < adapter->num_tx_queues; i++) {
20708 + u8 reg_idx = adapter->tx_ring[i]->reg_idx;
20709 + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
20710 + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
20711 + (txdctl & ~IXGBE_TXDCTL_ENABLE));
20713 + /* Disable the Tx DMA engine on 82599 */
20714 + switch (hw->mac.type) {
20715 + case ixgbe_mac_82599EB:
20716 + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
20717 + (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
20718 + ~IXGBE_DMATXCTL_TE));
20724 + /* power down the optics */
20725 + if (hw->phy.multispeed_fiber)
20726 + ixgbe_disable_tx_laser(hw);
20728 +#ifdef NETIF_F_NTUPLE
20729 + ethtool_ntuple_flush(netdev);
20730 +#endif /* NETIF_F_NTUPLE */
20732 +#ifdef HAVE_PCI_ERS
20733 + if (!pci_channel_offline(adapter->pdev))
20735 + ixgbe_reset(adapter);
20736 + ixgbe_clean_all_tx_rings(adapter);
20737 + ixgbe_clean_all_rx_rings(adapter);
20739 + /* since we reset the hardware DCA settings were cleared */
20740 + ixgbe_setup_dca(adapter);
20743 +#ifdef CONFIG_IXGBE_NAPI
20745 + * ixgbe_poll - NAPI Rx polling callback
20746 + * @napi: structure for representing this polling device
20747 + * @budget: how many packets driver is allowed to clean
20749 + * This function is used for legacy and MSI, NAPI mode
20751 +static int ixgbe_poll(struct napi_struct *napi, int budget)
20753 + struct ixgbe_q_vector *q_vector =
20754 + container_of(napi, struct ixgbe_q_vector, napi);
20755 + struct ixgbe_adapter *adapter = q_vector->adapter;
20756 + int tx_clean_complete, work_done = 0;
20758 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
20759 + ixgbe_update_dca(q_vector);
20761 + tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
20762 + ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
20764 + if (!tx_clean_complete)
20765 + work_done = budget;
20767 +#ifndef HAVE_NETDEV_NAPI_LIST
20768 + if (!netif_running(adapter->netdev))
20772 + /* If no Tx and not enough Rx work done, exit the polling mode */
20773 + if (work_done < budget) {
20774 + napi_complete(napi);
20775 + if (adapter->rx_itr_setting & 1)
20776 + ixgbe_set_itr(adapter);
20777 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
20778 + ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
20780 + return work_done;
20783 +#endif /* CONFIG_IXGBE_NAPI */
20785 + * ixgbe_tx_timeout - Respond to a Tx Hang
20786 + * @netdev: network interface device structure
20788 +static void ixgbe_tx_timeout(struct net_device *netdev)
20790 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
20792 + /* Do the reset outside of interrupt context */
20793 + schedule_work(&adapter->reset_task);
20796 +static void ixgbe_reset_task(struct work_struct *work)
20798 + struct ixgbe_adapter *adapter;
20799 + adapter = container_of(work, struct ixgbe_adapter, reset_task);
20801 + /* If we're already down or resetting, just bail */
20802 + if (test_bit(__IXGBE_DOWN, &adapter->state) ||
20803 + test_bit(__IXGBE_RESETTING, &adapter->state))
20806 + adapter->tx_timeout_count++;
20808 + ixgbe_reinit_locked(adapter);
20813 + * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device
20814 + * @adapter: board private structure to initialize
20816 + * When DCB (Data Center Bridging) is enabled, allocate queues for
20817 + * each traffic class. If multiqueue isn't availabe, then abort DCB
20818 + * initialization.
20821 +static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
20823 + bool ret = false;
20824 + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
20826 + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
20830 + f->mask = 0x7 << 3;
20831 + adapter->num_rx_queues = f->indices;
20832 + adapter->num_tx_queues = f->indices;
20835 + DPRINTK(DRV, INFO, "Kernel has no multiqueue support, disabling DCB\n");
20844 + * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices
20845 + * @adapter: board private structure to initialize
20847 + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues
20848 + * and VM pools where appropriate. If RSS is available, then also try and
20849 + * enable RSS and map accordingly.
20852 +static inline bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter)
20854 + int vmdq_i = adapter->ring_feature[RING_F_VMDQ].indices;
20856 + int rss_i = adapter->ring_feature[RING_F_RSS].indices;
20857 + int rss_m = adapter->ring_feature[RING_F_RSS].mask;
20860 + bool ret = false;
20862 + switch (adapter->flags & (IXGBE_FLAG_RSS_ENABLED
20863 + | IXGBE_FLAG_DCB_ENABLED
20864 + | IXGBE_FLAG_VMDQ_ENABLED)) {
20866 + case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
20867 + switch (adapter->hw.mac.type) {
20868 + case ixgbe_mac_82599EB:
20869 + vmdq_i = min(IXGBE_MAX_VMDQ_INDICES, vmdq_i);
20875 + rss_shift = find_first_bit(&i, sizeof(i) * 8);
20876 + rss_m = (rss_i - 1);
20877 + vmdq_m = ((IXGBE_MAX_VMDQ_INDICES - 1) <<
20878 + rss_shift) & (MAX_RX_QUEUES - 1);
20883 + adapter->num_rx_queues = vmdq_i * rss_i;
20884 + adapter->num_tx_queues = min(MAX_TX_QUEUES, vmdq_i * rss_i);
20888 + case (IXGBE_FLAG_VMDQ_ENABLED):
20889 + switch (adapter->hw.mac.type) {
20890 + case ixgbe_mac_82598EB:
20891 + vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1);
20893 + case ixgbe_mac_82599EB:
20894 + vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1) << 1;
20899 + adapter->num_rx_queues = vmdq_i;
20900 + adapter->num_tx_queues = vmdq_i;
20906 + goto vmdq_queues_out;
20909 + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
20910 + adapter->num_rx_pools = vmdq_i;
20911 + adapter->num_rx_queues_per_pool = adapter->num_rx_queues /
20914 + adapter->num_rx_pools = adapter->num_rx_queues;
20915 + adapter->num_rx_queues_per_pool = 1;
20917 + /* save the mask for later use */
20918 + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
20924 + * ixgbe_set_rss_queues: Allocate queues for RSS
20925 + * @adapter: board private structure to initialize
20927 + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
20928 + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
20931 +static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
20933 + bool ret = false;
20934 + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
20936 + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
20938 + adapter->num_rx_queues = f->indices;
20940 + adapter->num_tx_queues = f->indices;
20949 + * ixgbe_set_fdir_queues: Allocate queues for Flow Director
20950 + * @adapter: board private structure to initialize
20952 + * Flow Director is an advanced Rx filter, attempting to get Rx flows back
20953 + * to the original CPU that initiated the Tx session. This runs in addition
20954 + * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
20955 + * Rx load across CPUs using RSS.
20958 +static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
20960 + bool ret = false;
20961 + struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
20963 + f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
20964 + f_fdir->mask = 0;
20967 + * Use RSS in addition to Flow Director to ensure the best
20968 + * distribution of flows across cores, even when an FDIR flow
20971 + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
20972 + ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
20973 + (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
20974 + adapter->num_rx_queues = f_fdir->indices;
20976 + adapter->num_tx_queues = f_fdir->indices;
20980 + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
20981 + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
20988 + * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
20989 + * @adapter: board private structure to initialize
20991 + * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
20992 + * The ring feature mask is not used as a mask for FCoE, as it can take any 8
20993 + * rx queues out of the max numberof rx queues, instead, it is used as the
20994 + * index of the first rx queue.
20997 +static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
20999 + bool ret = false;
21000 + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
21002 + f->indices = min((int)num_online_cpus(), f->indices);
21003 + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
21004 + adapter->num_rx_queues = 1;
21005 + adapter->num_tx_queues = 1;
21006 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
21007 + DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
21008 + ixgbe_set_dcb_queues(adapter);
21010 + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
21011 + DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
21012 + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
21013 + (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
21014 + ixgbe_set_fdir_queues(adapter);
21016 + ixgbe_set_rss_queues(adapter);
21018 + /* adding FCoE queues */
21019 + f->mask = adapter->num_rx_queues;
21020 + adapter->num_rx_queues += f->indices;
21021 + adapter->num_tx_queues += f->indices;
21029 +#endif /* IXGBE_FCOE */
21032 + * ixgbe_set_sriov_queues: Allocate queues for IOV use
21033 + * @adapter: board private structure to initialize
21035 + * IOV doesn't actually use anything, so just NAK the
21036 + * request for now and let the other queue routines
21037 + * figure out what to do.
21039 +static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
21045 + * ixgbe_set_num_queues: Allocate queues for device, feature dependant
21046 + * @adapter: board private structure to initialize
21048 + * This is the top level queue allocation routine. The order here is very
21049 + * important, starting with the "most" number of features turned on at once,
21050 + * and ending with the smallest set of features. This way large combinations
21051 + * can be allocated if they're turned on, and smaller combinations are the
21052 + * fallthrough conditions.
21055 +static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
21057 + /* Start with base case */
21058 + adapter->num_rx_queues = 1;
21059 + adapter->num_tx_queues = 1;
21060 + adapter->num_rx_pools = adapter->num_rx_queues;
21061 + adapter->num_rx_queues_per_pool = 1;
21063 + if (ixgbe_set_sriov_queues(adapter))
21066 + if (ixgbe_set_vmdq_queues(adapter))
21070 + if (ixgbe_set_fcoe_queues(adapter))
21073 +#endif /* IXGBE_FCOE */
21074 + if (ixgbe_set_dcb_queues(adapter))
21077 + if (ixgbe_set_fdir_queues(adapter))
21081 + if (ixgbe_set_rss_queues(adapter))
21085 +static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
21088 + int err, vector_threshold;
21090 + /* We'll want at least 3 (vector_threshold):
21091 + * 1) TxQ[0] Cleanup
21092 + * 2) RxQ[0] Cleanup
21093 + * 3) Other (Link Status Change, etc.)
21094 + * 4) TCP Timer (optional)
21096 + vector_threshold = MIN_MSIX_COUNT;
21098 + /* The more we get, the more we will assign to Tx/Rx Cleanup
21099 + * for the separate queues...where Rx Cleanup >= Tx Cleanup.
21100 + * Right now, we simply care about how many we'll get; we'll
21101 + * set them up later while requesting irq's.
21103 + while (vectors >= vector_threshold) {
21104 + err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
21106 + if (!err) /* Success in acquiring all requested vectors. */
21108 + else if (err < 0)
21109 + vectors = 0; /* Nasty failure, quit now */
21110 + else /* err == number of vectors we should try again with */
21114 + if (vectors < vector_threshold) {
21115 + /* Can't allocate enough MSI-X interrupts? Oh well.
21116 + * This just means we'll go with either a single MSI
21117 + * vector or fall back to legacy interrupts.
21119 + DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
21120 + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
21121 + kfree(adapter->msix_entries);
21122 + adapter->msix_entries = NULL;
21124 + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
21126 + * Adjust for only the vectors we'll use, which is minimum
21127 + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
21128 + * vectors we were allocated.
21130 + adapter->num_msix_vectors = min(vectors,
21131 + adapter->max_msix_q_vectors + NON_Q_VECTORS);
21136 + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
21137 + * @adapter: board private structure to initialize
21139 + * Cache the descriptor ring offsets for RSS to the assigned rings.
21142 +static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
21146 + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
21149 + for (i = 0; i < adapter->num_rx_queues; i++)
21150 + adapter->rx_ring[i]->reg_idx = i;
21151 + for (i = 0; i < adapter->num_tx_queues; i++)
21152 + adapter->tx_ring[i]->reg_idx = i;
21158 + * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
21159 + * @adapter: board private structure to initialize
21161 + * Cache the descriptor ring offsets for DCB to the assigned rings.
21164 +static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
21167 + bool ret = false;
21168 + int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
21170 + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
21173 + /* the number of queues is assumed to be symmetric */
21174 + switch (adapter->hw.mac.type) {
21175 + case ixgbe_mac_82598EB:
21176 + for (i = 0; i < dcb_i; i++) {
21177 + adapter->rx_ring[i]->reg_idx = i << 3;
21178 + adapter->tx_ring[i]->reg_idx = i << 2;
21182 + case ixgbe_mac_82599EB:
21183 + if (dcb_i == 8) {
21185 + * Tx TC0 starts at: descriptor queue 0
21186 + * Tx TC1 starts at: descriptor queue 32
21187 + * Tx TC2 starts at: descriptor queue 64
21188 + * Tx TC3 starts at: descriptor queue 80
21189 + * Tx TC4 starts at: descriptor queue 96
21190 + * Tx TC5 starts at: descriptor queue 104
21191 + * Tx TC6 starts at: descriptor queue 112
21192 + * Tx TC7 starts at: descriptor queue 120
21194 + * Rx TC0-TC7 are offset by 16 queues each
21196 + for (i = 0; i < 3; i++) {
21197 + adapter->tx_ring[i]->reg_idx = i << 5;
21198 + adapter->rx_ring[i]->reg_idx = i << 4;
21200 + for ( ; i < 5; i++) {
21201 + adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
21202 + adapter->rx_ring[i]->reg_idx = i << 4;
21204 + for ( ; i < dcb_i; i++) {
21205 + adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
21206 + adapter->rx_ring[i]->reg_idx = i << 4;
21209 + } else if (dcb_i == 4) {
21211 + * Tx TC0 starts at: descriptor queue 0
21212 + * Tx TC1 starts at: descriptor queue 64
21213 + * Tx TC2 starts at: descriptor queue 96
21214 + * Tx TC3 starts at: descriptor queue 112
21216 + * Rx TC0-TC3 are offset by 32 queues each
21218 + adapter->tx_ring[0]->reg_idx = 0;
21219 + adapter->tx_ring[1]->reg_idx = 64;
21220 + adapter->tx_ring[2]->reg_idx = 96;
21221 + adapter->tx_ring[3]->reg_idx = 112;
21222 + for (i = 0 ; i < dcb_i; i++)
21223 + adapter->rx_ring[i]->reg_idx = i << 5;
21234 + * ixgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq
21235 + * @adapter: board private structure to initialize
21237 + * Cache the descriptor ring offsets for VMDq to the assigned rings. It
21238 + * will also try to cache the proper offsets if RSS is enabled along with
21242 +static inline bool ixgbe_cache_ring_vmdq(struct ixgbe_adapter *adapter)
21245 + bool ret = false;
21247 + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
21248 +#endif /* IXGBE_FCOE */
21249 + switch (adapter->flags & (IXGBE_FLAG_RSS_ENABLED
21250 + | IXGBE_FLAG_DCB_ENABLED
21251 + | IXGBE_FLAG_VMDQ_ENABLED)) {
21253 + case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
21254 + switch (adapter->hw.mac.type) {
21255 + case ixgbe_mac_82599EB:
21256 + /* since the # of rss queues per vmdq pool is
21257 + * limited to either 2 or 4, there is no index
21258 + * skipping and we can set them up with no
21261 + for (i = 0; i < adapter->num_rx_queues; i++)
21262 + adapter->rx_ring[i]->reg_idx = i;
21263 + for (i = 0; i < adapter->num_tx_queues; i++)
21264 + adapter->tx_ring[i]->reg_idx = i;
21272 + case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_DCB_ENABLED):
21273 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
21274 + for (i = 0; i < adapter->num_rx_queues; i++) {
21275 + adapter->rx_ring[i]->reg_idx =
21276 + (adapter->num_vfs + i) *
21277 + adapter->ring_feature[RING_F_DCB].indices;
21279 + adapter->rx_ring[i]->reg_idx +=
21280 + (i >= f->mask ? adapter->fcoe.tc : 0);
21281 +#endif /* IXGBE_FCOE */
21284 + for (i = 0; i < adapter->num_tx_queues; i++) {
21285 + adapter->tx_ring[i]->reg_idx =
21286 + (adapter->num_vfs + i) *
21287 + adapter->ring_feature[RING_F_DCB].indices;
21289 + adapter->tx_ring[i]->reg_idx +=
21290 + (i >= f->mask ? adapter->fcoe.tc : 0);
21291 +#endif /* IXGBE_FCOE */
21297 + case (IXGBE_FLAG_VMDQ_ENABLED):
21298 + switch (adapter->hw.mac.type) {
21299 + case ixgbe_mac_82598EB:
21300 + for (i = 0; i < adapter->num_rx_queues; i++)
21301 + adapter->rx_ring[i]->reg_idx = i;
21302 + for (i = 0; i < adapter->num_tx_queues; i++)
21303 + adapter->tx_ring[i]->reg_idx = i;
21306 + case ixgbe_mac_82599EB:
21307 + /* even without rss, there are 2 queues per
21308 + * pool, the odd numbered ones are unused.
21310 + for (i = 0; i < adapter->num_rx_queues; i++)
21311 + adapter->rx_ring[i]->reg_idx = VMDQ_P(i) * 2;
21312 + for (i = 0; i < adapter->num_tx_queues; i++)
21313 + adapter->tx_ring[i]->reg_idx = VMDQ_P(i) * 2;
21326 + * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
21327 + * @adapter: board private structure to initialize
21329 + * Cache the descriptor ring offsets for Flow Director to the assigned rings.
21332 +static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
21335 + bool ret = false;
21337 + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
21338 + ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
21339 + (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
21340 + for (i = 0; i < adapter->num_rx_queues; i++)
21341 + adapter->rx_ring[i]->reg_idx = i;
21342 + for (i = 0; i < adapter->num_tx_queues; i++)
21343 + adapter->tx_ring[i]->reg_idx = i;
21352 + * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
21353 + * @adapter: board private structure to initialize
21355 + * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
21358 +static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
21360 + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
21362 + u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
21364 + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
21367 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
21368 + struct ixgbe_fcoe *fcoe = &adapter->fcoe;
21370 + ixgbe_cache_ring_dcb(adapter);
21371 + /* find out queues in TC for FCoE */
21372 + fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
21373 + fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
21375 + * In 82599, the number of Tx queues for each traffic
21376 + * class for both 8-TC and 4-TC modes are:
21377 + * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
21378 + * 8 TCs: 32 32 16 16 8 8 8 8
21379 + * 4 TCs: 64 64 32 32
21380 + * We have max 8 queues for FCoE, where 8 the is
21381 + * FCoE redirection table size. If TC for FCoE is
21382 + * less than or equal to TC3, we have enough queues
21383 + * to add max of 8 queues for FCoE, so we start FCoE
21384 + * tx queue from the next one, i.e., reg_idx + 1.
21385 + * If TC for FCoE is above TC3, implying 8 TC mode,
21386 + * and we need 8 for FCoE, we have to take all queues
21387 + * in that traffic class for FCoE.
21389 + if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
21392 + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
21393 + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
21394 + (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
21395 + ixgbe_cache_ring_fdir(adapter);
21397 + ixgbe_cache_ring_rss(adapter);
21399 + fcoe_rx_i = f->mask;
21400 + fcoe_tx_i = f->mask;
21402 + for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
21403 + adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
21404 + adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
21409 +#endif /* IXGBE_FCOE */
21411 + * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
21412 + * @adapter: board private structure to initialize
21414 + * SR-IOV doesn't use any descriptor rings but changes the default if
21415 + * no other mapping is used.
21418 +static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
21420 + adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
21421 + adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
21426 + * ixgbe_cache_ring_register - Descriptor ring to register mapping
21427 + * @adapter: board private structure to initialize
21429 + * Once we know the feature-set enabled for the device, we'll cache
21430 + * the register offset the descriptor ring is assigned to.
21432 + * Note, the order the various feature calls is important. It must start with
21433 + * the "most" features enabled at the same time, then trickle down to the
21434 + * least amount of features turned on at once.
21436 +static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
21438 + /* start with default case */
21439 + adapter->rx_ring[0]->reg_idx = 0;
21440 + adapter->tx_ring[0]->reg_idx = 0;
21442 + if (ixgbe_cache_ring_sriov(adapter))
21445 + if (ixgbe_cache_ring_vmdq(adapter))
21449 + if (ixgbe_cache_ring_fcoe(adapter))
21452 +#endif /* IXGBE_FCOE */
21453 + if (ixgbe_cache_ring_dcb(adapter))
21456 + if (ixgbe_cache_ring_fdir(adapter))
21459 + if (ixgbe_cache_ring_rss(adapter))
21465 + * ixgbe_alloc_queues - Allocate memory for all rings
21466 + * @adapter: board private structure to initialize
21468 + * We allocate one ring per queue at run-time since we don't know the
21469 + * number of queues at compile-time. The polling_netdev array is
21470 + * intended for Multiqueue, but should work fine with a single queue.
21472 +static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
21476 +#ifdef HAVE_DEVICE_NUMA_NODE
21477 + int orig_node = adapter->node;
21479 + WARN_ON(orig_node != -1 && !node_online(orig_node));
21480 +#endif /* HAVE_DEVICE_NUMA_NODE */
21482 + for (i = 0; i < adapter->num_tx_queues; i++) {
21483 + struct ixgbe_ring *ring = adapter->tx_ring[i];
21484 +#ifdef HAVE_DEVICE_NUMA_NODE
21485 + if (orig_node == -1) {
21486 + int cur_node = next_online_node(adapter->node);
21487 + if (cur_node == MAX_NUMNODES)
21488 + cur_node = first_online_node;
21489 + adapter->node = cur_node;
21491 +#endif /* HAVE_DEVICE_NUMA_NODE */
21492 + ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
21495 + ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
21497 + goto err_tx_ring_allocation;
21498 + ring->count = adapter->tx_ring_count;
21499 + ring->queue_index = i;
21500 + ring->dev = pci_dev_to_dev(adapter->pdev);
21501 + ring->netdev = adapter->netdev;
21502 + ring->numa_node = adapter->node;
21503 + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
21504 + ring->atr_sample_rate = adapter->atr_sample_rate;
21505 + ring->atr_count = 0;
21507 + adapter->tx_ring[i] = ring;
21510 +#ifdef HAVE_DEVICE_NUMA_NODE
21511 + /* Restore the adapter's original node */
21512 + adapter->node = orig_node;
21514 +#endif /* HAVE_DEVICE_NUMA_NODE */
21515 + rx_count = adapter->rx_ring_count;
21516 + for (i = 0; i < adapter->num_rx_queues; i++) {
21517 + struct ixgbe_ring *ring = adapter->rx_ring[i];
21518 +#ifdef HAVE_DEVICE_NUMA_NODE
21519 + if (orig_node == -1) {
21520 + int cur_node = next_online_node(adapter->node);
21521 + if (cur_node == MAX_NUMNODES)
21522 + cur_node = first_online_node;
21523 + adapter->node = cur_node;
21525 +#endif /* HAVE_DEVICE_NUMA_NODE */
21526 + ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
21529 + ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
21531 + goto err_rx_ring_allocation;
21532 + ring->count = rx_count;
21533 + ring->queue_index = i;
21534 + ring->dev = pci_dev_to_dev(adapter->pdev);
21535 + ring->netdev = adapter->netdev;
21536 + ring->numa_node = adapter->node;
21538 + adapter->rx_ring[i] = ring;
21541 +#ifdef HAVE_DEVICE_NUMA_NODE
21542 + /* Restore the adapter's original node */
21543 + adapter->node = orig_node;
21545 +#endif /* HAVE_DEVICE_NUMA_NODE */
21546 + ixgbe_cache_ring_register(adapter);
21550 +err_rx_ring_allocation:
21551 + for (i = 0; i < adapter->num_tx_queues; i++)
21552 + kfree(adapter->tx_ring[i]);
21553 +err_tx_ring_allocation:
21558 + * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
21559 + * @adapter: board private structure to initialize
21561 + * Attempt to configure the interrupts using the best available
21562 + * capabilities of the hardware and the kernel.
21564 +static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
21566 + struct ixgbe_hw *hw = &adapter->hw;
21568 + int vector, v_budget;
21570 + if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE))
21574 + * It's easy to be greedy for MSI-X vectors, but it really
21575 + * doesn't do us much good if we have a lot more vectors
21576 + * than CPU's. So let's be conservative and only ask for
21577 + * (roughly) the same number of vectors as there are CPU's.
21579 + v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
21580 + (int)num_online_cpus()) + NON_Q_VECTORS;
21583 + * At the same time, hardware can only support a maximum of
21584 + * hw.mac->max_msix_vectors vectors. With features
21585 + * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
21586 + * descriptor queues supported by our device. Thus, we cap it off in
21587 + * those rare cases where the cpu count also exceeds our vector limit.
21589 + v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
21591 + /* A failure in MSI-X entry allocation isn't fatal, but it does
21592 + * mean we disable MSI-X capabilities of the adapter. */
21593 + adapter->msix_entries = kcalloc(v_budget,
21594 + sizeof(struct msix_entry), GFP_KERNEL);
21595 + if (adapter->msix_entries) {
21596 + for (vector = 0; vector < v_budget; vector++)
21597 + adapter->msix_entries[vector].entry = vector;
21599 + ixgbe_acquire_msix_vectors(adapter, v_budget);
21601 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
21605 + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
21606 + adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
21607 + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
21608 + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
21609 + adapter->atr_sample_rate = 0;
21610 + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
21611 + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
21612 + DPRINTK(PROBE, ERR, "MSIX interrupt not available - "
21613 + "disabling SR-IOV\n");
21614 + ixgbe_disable_sriov(adapter);
21617 + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
21618 + ixgbe_set_num_queues(adapter);
21621 + if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE))
21624 + err = pci_enable_msi(adapter->pdev);
21626 + adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
21628 + DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
21629 + "falling back to legacy. Error: %d\n", err);
21636 + /* Notify the stack of the (possibly) reduced Tx Queue count. */
21637 +#ifdef CONFIG_NETDEVICES_MULTIQUEUE
21638 + adapter->netdev->egress_subqueue_count = adapter->num_tx_queues;
21640 + adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
21642 +#endif /* HAVE_TX_MQ */
21647 + * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
21648 + * @adapter: board private structure to initialize
21650 + * We allocate one q_vector per queue interrupt. If allocation fails we
21651 + * return -ENOMEM.
21653 +static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
21655 + int v_idx, num_q_vectors;
21656 + struct ixgbe_q_vector *q_vector;
21658 +#ifdef CONFIG_IXGBE_NAPI
21659 + int (*poll)(struct napi_struct *, int);
21662 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
21663 + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
21664 + rx_vectors = adapter->num_rx_queues;
21665 +#ifdef CONFIG_IXGBE_NAPI
21666 + poll = &ixgbe_clean_rxtx_many;
21669 + num_q_vectors = 1;
21671 +#ifdef CONFIG_IXGBE_NAPI
21672 + poll = &ixgbe_poll;
21676 + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
21677 + q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
21678 + GFP_KERNEL, adapter->node);
21680 + q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
21684 + q_vector->adapter = adapter;
21685 + if (q_vector->txr_count && !q_vector->rxr_count)
21686 + q_vector->eitr = adapter->tx_eitr_param;
21688 + q_vector->eitr = adapter->rx_eitr_param;
21689 + q_vector->v_idx = v_idx;
21690 +#ifndef IXGBE_NO_LRO
21691 + if (v_idx < rx_vectors) {
21692 + int size = sizeof(struct ixgbe_lro_list);
21693 + q_vector->lrolist = vmalloc_node(size, adapter->node);
21694 + if (!q_vector->lrolist)
21695 + q_vector->lrolist = vmalloc(size);
21696 + if (!q_vector->lrolist) {
21700 + memset(q_vector->lrolist, 0, size);
21701 + ixgbe_lro_ring_init(q_vector->lrolist);
21704 +#ifdef CONFIG_IXGBE_NAPI
21705 + netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
21707 + adapter->q_vector[v_idx] = q_vector;
21715 + q_vector = adapter->q_vector[v_idx];
21716 +#ifdef CONFIG_IXGBE_NAPI
21717 + netif_napi_del(&q_vector->napi);
21719 +#ifndef IXGBE_NO_LRO
21720 + if (q_vector->lrolist) {
21721 + ixgbe_lro_ring_exit(q_vector->lrolist);
21722 + vfree(q_vector->lrolist);
21723 + q_vector->lrolist = NULL;
21727 + adapter->q_vector[v_idx] = NULL;
21733 + * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
21734 + * @adapter: board private structure to initialize
21736 + * This function frees the memory allocated to the q_vectors. In addition if
21737 + * NAPI is enabled it will delete any references to the NAPI struct prior
21738 + * to freeing the q_vector.
21740 +static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
21742 + int v_idx, num_q_vectors;
21744 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
21745 + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
21747 + num_q_vectors = 1;
21750 + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
21751 + struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
21753 + adapter->q_vector[v_idx] = NULL;
21754 +#ifdef CONFIG_IXGBE_NAPI
21755 + netif_napi_del(&q_vector->napi);
21757 +#ifndef IXGBE_NO_LRO
21758 + if (q_vector->lrolist) {
21759 + ixgbe_lro_ring_exit(q_vector->lrolist);
21760 + vfree(q_vector->lrolist);
21761 + q_vector->lrolist = NULL;
21768 +static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
21770 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
21771 + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
21772 + pci_disable_msix(adapter->pdev);
21773 + kfree(adapter->msix_entries);
21774 + adapter->msix_entries = NULL;
21775 + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
21776 + adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
21777 + pci_disable_msi(adapter->pdev);
21783 + * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
21784 + * @adapter: board private structure to initialize
21786 + * We determine which interrupt scheme to use based on...
21787 + * - Kernel support (MSI, MSI-X)
21788 + * - which can be user-defined (via MODULE_PARAM)
21789 + * - Hardware queue count (num_*_queues)
21790 + * - defined by miscellaneous hardware support/features (RSS, etc.)
21792 +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
21796 + /* Number of supported queues */
21797 + ixgbe_set_num_queues(adapter);
21799 + err = ixgbe_set_interrupt_capability(adapter);
21801 + DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
21802 + goto err_set_interrupt;
21805 + err = ixgbe_alloc_q_vectors(adapter);
21807 + DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
21809 + goto err_alloc_q_vectors;
21812 + err = ixgbe_alloc_queues(adapter);
21814 + DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
21815 + goto err_alloc_queues;
21818 + DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
21819 + "Tx Queue count = %u\n",
21820 + (adapter->num_rx_queues > 1) ? "Enabled" :
21821 + "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
21823 + set_bit(__IXGBE_DOWN, &adapter->state);
21827 + ixgbe_free_q_vectors(adapter);
21828 +err_alloc_q_vectors:
21829 + ixgbe_reset_interrupt_capability(adapter);
21830 +err_set_interrupt:
21835 + * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
21836 + * @adapter: board private structure to clear interrupt scheme on
21838 + * We go through and clear interrupt specific resources and reset the structure
21839 + * to pre-load conditions
21841 +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
21845 + for (i = 0; i < adapter->num_tx_queues; i++) {
21846 + kfree(adapter->tx_ring[i]);
21847 + adapter->tx_ring[i] = NULL;
21849 + for (i = 0; i < adapter->num_rx_queues; i++) {
21850 + kfree(adapter->rx_ring[i]);
21851 + adapter->rx_ring[i] = NULL;
21854 + ixgbe_free_q_vectors(adapter);
21855 + ixgbe_reset_interrupt_capability(adapter);
21859 + * ixgbe_sfp_timer - worker thread to find a missing module
21860 + * @data: pointer to our adapter struct
21862 +static void ixgbe_sfp_timer(unsigned long data)
21864 + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
21866 + /* Do the sfp_timer outside of interrupt context due to the
21867 + * delays that sfp+ detection requires */
21868 + schedule_work(&adapter->sfp_task);
21872 + * ixgbe_sfp_task - worker thread to find a missing module
21873 + * @work: pointer to work_struct containing our data
21875 +static void ixgbe_sfp_task(struct work_struct *work)
21877 + struct ixgbe_adapter *adapter = container_of(work,
21878 + struct ixgbe_adapter,
21880 + struct ixgbe_hw *hw = &adapter->hw;
21882 + if ((hw->phy.type == ixgbe_phy_nl) &&
21883 + (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
21884 + s32 ret = hw->phy.ops.identify_sfp(hw);
21885 + if (ret && ret != IXGBE_ERR_SFP_NOT_SUPPORTED)
21887 + ret = hw->phy.ops.reset(hw);
21888 + if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
21889 + DPRINTK(PROBE, ERR, "failed to initialize because an "
21890 + "unsupported SFP+ module type was detected.\n"
21891 + "Reload the driver after installing a "
21892 + "supported module.\n");
21893 + unregister_netdev(adapter->netdev);
21894 + adapter->netdev_registered = false;
21896 + DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
21897 + hw->phy.sfp_type);
21899 + /* don't need this routine any more */
21900 + clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
21904 + if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
21905 + mod_timer(&adapter->sfp_timer,
21906 + round_jiffies(jiffies + (2 * HZ)));
21910 + * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
21911 + * @adapter: board private structure to initialize
21913 + * ixgbe_sw_init initializes the Adapter private data structure.
21914 + * Fields are initialized based on PCI device information and
21915 + * OS network device settings (MTU size).
21917 +static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
21919 + struct ixgbe_hw *hw = &adapter->hw;
21920 + struct pci_dev *pdev = adapter->pdev;
21923 + /* PCI config space info */
21925 + hw->vendor_id = pdev->vendor;
21926 + hw->device_id = pdev->device;
21927 + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
21928 + hw->subsystem_vendor_id = pdev->subsystem_vendor;
21929 + hw->subsystem_device_id = pdev->subsystem_device;
21931 + err = ixgbe_init_shared_code(hw);
21933 + DPRINTK(PROBE, ERR, "init_shared_code failed: %d\n", err);
21937 + /* Set capability flags */
21938 + switch (hw->mac.type) {
21939 + case ixgbe_mac_82598EB:
21940 + if (hw->device_id == IXGBE_DEV_ID_82598AT)
21941 + adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
21942 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
21943 + adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
21945 + adapter->flags |= IXGBE_FLAG_MSI_CAPABLE;
21946 + adapter->flags |= IXGBE_FLAG_MSIX_CAPABLE;
21947 + if (adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)
21948 + adapter->flags |= IXGBE_FLAG_MQ_CAPABLE;
21949 + if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
21950 + adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
21951 + if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
21952 + adapter->flags |= IXGBE_FLAG_RSS_CAPABLE;
21953 + if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
21954 + adapter->flags |= IXGBE_FLAG_VMDQ_CAPABLE;
21955 + adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
21956 + adapter->flags &= ~IXGBE_FLAG_SRIOV_CAPABLE;
21957 + adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598;
21959 + case ixgbe_mac_82599EB:
21960 +#ifndef IXGBE_NO_SMART_SPEED
21961 + hw->phy.smart_speed = ixgbe_smart_speed_on;
21963 + hw->phy.smart_speed = ixgbe_smart_speed_off;
21965 + adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
21966 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
21967 + adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
21969 + adapter->flags |= IXGBE_FLAG_MSI_CAPABLE;
21970 + adapter->flags |= IXGBE_FLAG_MSIX_CAPABLE;
21971 + if (adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)
21972 + adapter->flags |= IXGBE_FLAG_MQ_CAPABLE;
21973 + if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
21974 + adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
21975 + if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
21976 + adapter->flags |= IXGBE_FLAG_RSS_CAPABLE;
21977 + if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
21978 + adapter->flags |= IXGBE_FLAG_VMDQ_CAPABLE;
21980 + if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE) {
21981 + adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
21982 + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
21983 + adapter->ring_feature[RING_F_FCOE].indices = 0;
21985 + /* Default traffic class to use for FCoE */
21986 + adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
21987 + adapter->fcoe.up = IXGBE_FCOE_DEFTC;
21991 + if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
21992 + adapter->flags |= IXGBE_FLAG_SRIOV_CAPABLE;
21993 + if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
21994 + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
21995 +#ifdef NETIF_F_NTUPLE
21996 + /* n-tuple support exists, always init our spinlock */
21997 + spin_lock_init(&adapter->fdir_perfect_lock);
21998 +#endif /* NETIF_F_NTUPLE */
21999 + adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599;
22005 + /* Default DCB settings, if applicable */
22006 + adapter->ring_feature[RING_F_DCB].indices = 8;
22008 + if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) {
22010 + struct tc_configuration *tc;
22011 + dcb_i = adapter->ring_feature[RING_F_DCB].indices;
22012 + adapter->dcb_cfg.num_tcs.pg_tcs = dcb_i;
22013 + adapter->dcb_cfg.num_tcs.pfc_tcs = dcb_i;
22014 + for (j = 0; j < dcb_i; j++) {
22015 + tc = &adapter->dcb_cfg.tc_config[j];
22016 + tc->path[DCB_TX_CONFIG].bwg_id = 0;
22017 + tc->path[DCB_TX_CONFIG].bwg_percent = 100 / dcb_i;
22018 + tc->path[DCB_RX_CONFIG].bwg_id = 0;
22019 + tc->path[DCB_RX_CONFIG].bwg_percent = 100 / dcb_i;
22020 + tc->dcb_pfc = pfc_disabled;
22022 + /* total of all TCs bandwidth needs to be 100 */
22023 + tc->path[DCB_TX_CONFIG].bwg_percent += 100 % dcb_i;
22024 + tc->path[DCB_RX_CONFIG].bwg_percent += 100 % dcb_i;
22027 + adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
22028 + adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
22029 + adapter->dcb_cfg.rx_pba_cfg = pba_equal;
22030 + adapter->dcb_cfg.pfc_mode_enable = false;
22032 + adapter->dcb_cfg.round_robin_enable = false;
22033 + adapter->dcb_set_bitmap = 0x00;
22036 + /* XXX does this need to be initialized even w/o DCB? */
22037 + ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
22038 + adapter->ring_feature[RING_F_DCB].indices);
22040 + if (hw->mac.type == ixgbe_mac_82599EB)
22041 + hw->mbx.ops.init_params(hw);
22043 + /* default flow control settings */
22044 + hw->fc.requested_mode = ixgbe_fc_full;
22045 + hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
22047 + adapter->last_lfc_mode = hw->fc.current_mode;
22048 + hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
22049 + hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
22050 + hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
22051 + hw->fc.send_xon = true;
22052 + hw->fc.disable_fc_autoneg = false;
22054 + /* set defaults for eitr in MegaBytes */
22055 + adapter->eitr_low = 10;
22056 + adapter->eitr_high = 20;
22058 + /* set default ring sizes */
22059 + adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
22060 + adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
22062 + /* enable rx csum by default */
22063 + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
22065 + set_bit(__IXGBE_DOWN, &adapter->state);
22071 + * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
22072 + * @tx_ring: tx descriptor ring (for a specific queue) to setup
22074 + * Return 0 on success, negative on failure
22076 +int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
22078 + struct device *dev = tx_ring->dev;
22081 + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
22082 + tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
22083 + if (!tx_ring->tx_buffer_info)
22084 + tx_ring->tx_buffer_info = vmalloc(size);
22085 + if (!tx_ring->tx_buffer_info)
22087 + memset(tx_ring->tx_buffer_info, 0, size);
22089 + /* round up to nearest 4K */
22090 + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
22091 + tx_ring->size = ALIGN(tx_ring->size, 4096);
22093 + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
22094 + &tx_ring->dma, GFP_KERNEL);
22095 + if (!tx_ring->desc)
22098 + tx_ring->next_to_use = 0;
22099 + tx_ring->next_to_clean = 0;
22100 + tx_ring->work_limit = tx_ring->count;
22104 + vfree(tx_ring->tx_buffer_info);
22105 + tx_ring->tx_buffer_info = NULL;
22106 + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
22111 + * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
22112 + * @adapter: board private structure
22114 + * If this function returns with an error, then it's possible one or
22115 + * more of the rings is populated (while the rest are not). It is the
22116 + * callers duty to clean those orphaned rings.
22118 + * Return 0 on success, negative on failure
22120 +static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
22124 + for (i = 0; i < adapter->num_tx_queues; i++) {
22125 +#ifdef HAVE_DEVICE_NUMA_NODE
22126 + DPRINTK(TX_ERR, INFO, "tx[%02d] bd: %d - assigning node %d\n",
22127 + i, adapter->bd_number, adapter->tx_ring[i]->numa_node);
22128 +#endif /* HAVE_DEVICE_NUMA_NODE */
22129 + err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
22132 + DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
22140 + * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
22141 + * @rx_ring: rx descriptor ring (for a specific queue) to setup
22143 + * Returns 0 on success, negative on failure
22145 +int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
22147 + struct device *dev = rx_ring->dev;
22150 + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
22151 + rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
22152 + if (!rx_ring->rx_buffer_info)
22153 + rx_ring->rx_buffer_info = vmalloc(size);
22154 + if (!rx_ring->rx_buffer_info)
22156 + memset(rx_ring->rx_buffer_info, 0, size);
22158 + /* Round up to nearest 4K */
22159 + rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
22160 + rx_ring->size = ALIGN(rx_ring->size, 4096);
22162 + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
22163 + &rx_ring->dma, GFP_KERNEL);
22165 + if (!rx_ring->desc)
22168 + rx_ring->next_to_clean = 0;
22169 + rx_ring->next_to_use = 0;
22170 +#ifndef CONFIG_IXGBE_NAPI
22171 + rx_ring->work_limit = rx_ring->count / 2;
22176 + vfree(rx_ring->rx_buffer_info);
22177 + rx_ring->rx_buffer_info = NULL;
22178 + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
22183 + * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
22184 + * @adapter: board private structure
22186 + * If this function returns with an error, then it's possible one or
22187 + * more of the rings is populated (while the rest are not). It is the
22188 + * callers duty to clean those orphaned rings.
22190 + * Return 0 on success, negative on failure
22192 +static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
22196 + for (i = 0; i < adapter->num_rx_queues; i++) {
22197 +#ifdef HAVE_DEVICE_NUMA_NODE
22198 + DPRINTK(RX_ERR, INFO, "rx[%02d] bd: %d - assigning node %d\n",
22199 + i, adapter->bd_number, adapter->rx_ring[i]->numa_node);
22200 +#endif /* HAVE_DEVICE_NUMA_NODE */
22201 + err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
22204 + DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
22212 + * ixgbe_free_tx_resources - Free Tx Resources per Queue
22213 + * @tx_ring: Tx descriptor ring for a specific queue
22215 + * Free all transmit software resources
22217 +void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
22219 + ixgbe_clean_tx_ring(tx_ring);
22221 + vfree(tx_ring->tx_buffer_info);
22222 + tx_ring->tx_buffer_info = NULL;
22224 + /* if not set, then don't free */
22225 + if (!tx_ring->desc)
22228 + dma_free_coherent(tx_ring->dev, tx_ring->size,
22229 + tx_ring->desc, tx_ring->dma);
22231 + tx_ring->desc = NULL;
22235 + * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
22236 + * @adapter: board private structure
22238 + * Free all transmit software resources
22240 +static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
22244 + for (i = 0; i < adapter->num_tx_queues; i++)
22245 + if (adapter->tx_ring[i]->desc)
22246 + ixgbe_free_tx_resources(adapter->tx_ring[i]);
22250 + * ixgbe_free_rx_resources - Free Rx Resources
22251 + * @rx_ring: ring to clean the resources from
22253 + * Free all receive software resources
22255 +void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
22257 + ixgbe_clean_rx_ring(rx_ring);
22259 + vfree(rx_ring->rx_buffer_info);
22260 + rx_ring->rx_buffer_info = NULL;
22262 + /* if not set, then don't free */
22263 + if (!rx_ring->desc)
22266 + dma_free_coherent(rx_ring->dev, rx_ring->size,
22267 + rx_ring->desc, rx_ring->dma);
22269 + rx_ring->desc = NULL;
22273 + * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
22274 + * @adapter: board private structure
22276 + * Free all receive software resources
22278 +static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
22282 + for (i = 0; i < adapter->num_rx_queues; i++)
22283 + if (adapter->rx_ring[i]->desc)
22284 + ixgbe_free_rx_resources(adapter->rx_ring[i]);
22288 + * ixgbe_change_mtu - Change the Maximum Transfer Unit
22289 + * @netdev: network interface device structure
22290 + * @new_mtu: new value for maximum frame size
22292 + * Returns 0 on success, negative on failure
22294 +static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
22296 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
22297 + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
22299 + /* MTU < 68 is an error and causes problems on some kernels */
22300 + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
22301 + if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
22304 + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
22308 + DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
22309 + netdev->mtu, new_mtu);
22310 + /* must set new MTU before calling down or up */
22311 + netdev->mtu = new_mtu;
22313 + if (netif_running(netdev))
22314 + ixgbe_reinit_locked(adapter);
22320 + * ixgbe_open - Called when a network interface is made active
22321 + * @netdev: network interface device structure
22323 + * Returns 0 on success, negative value on failure
22325 + * The open entry point is called when a network interface is made
22326 + * active by the system (IFF_UP). At this point all resources needed
22327 + * for transmit and receive operations are allocated, the interrupt
22328 + * handler is registered with the OS, the watchdog timer is started,
22329 + * and the stack is notified that the interface is ready.
22331 +static int ixgbe_open(struct net_device *netdev)
22333 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
22334 + struct ixgbe_hw *hw = &adapter->hw;
22337 + /* disallow open during test */
22338 + if (test_bit(__IXGBE_TESTING, &adapter->state))
22341 + netif_carrier_off(netdev);
22343 + /* allocate transmit descriptors */
22344 + err = ixgbe_setup_all_tx_resources(adapter);
22346 + goto err_setup_tx;
22348 + /* allocate receive descriptors */
22349 + err = ixgbe_setup_all_rx_resources(adapter);
22351 + goto err_setup_rx;
22353 + ixgbe_configure(adapter);
22356 + * Map the Tx/Rx rings to the vectors we were allotted.
22357 + * if request_irq will be called in this function map_rings
22358 + * must be called *before* up_complete
22360 + ixgbe_map_rings_to_vectors(adapter);
22362 + err = ixgbe_up_complete(adapter);
22364 + goto err_setup_rx;
22366 + /* clear any pending interrupts, may auto mask */
22367 + IXGBE_READ_REG(hw, IXGBE_EICR);
22369 + err = ixgbe_request_irq(adapter);
22371 + goto err_req_irq;
22373 + ixgbe_irq_enable(adapter, true, true);
22376 + * If this adapter has a fan, check to see if we had a failure
22377 + * before we enabled the interrupt.
22379 + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
22380 + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
22381 + if (esdp & IXGBE_ESDP_SDP1)
22382 + DPRINTK(DRV, CRIT,
22383 + "Fan has stopped, replace the adapter\n");
22389 + ixgbe_down(adapter);
22390 + ixgbe_release_hw_control(adapter);
22391 + ixgbe_free_irq(adapter);
22393 + ixgbe_free_all_rx_resources(adapter);
22395 + ixgbe_free_all_tx_resources(adapter);
22396 + ixgbe_reset(adapter);
22402 + * ixgbe_close - Disables a network interface
22403 + * @netdev: network interface device structure
22405 + * Returns 0, this is not allowed to fail
22407 + * The close entry point is called when an interface is de-activated
22408 + * by the OS. The hardware is still under the drivers control, but
22409 + * needs to be disabled. A global MAC reset is issued to stop the
22410 + * hardware, and all transmit and receive resources are freed.
22412 +static int ixgbe_close(struct net_device *netdev)
22414 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
22416 + ixgbe_down(adapter);
22417 + ixgbe_free_irq(adapter);
22419 + ixgbe_free_all_tx_resources(adapter);
22420 + ixgbe_free_all_rx_resources(adapter);
22422 + ixgbe_release_hw_control(adapter);
22428 +static int ixgbe_resume(struct pci_dev *pdev)
22430 + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
22431 + struct net_device *netdev = adapter->netdev;
22434 + pci_set_power_state(pdev, PCI_D0);
22435 + pci_restore_state(pdev);
22437 + * pci_restore_state clears dev->state_saved so call
22438 + * pci_save_state to restore it.
22440 + pci_save_state(pdev);
22442 + err = pci_enable_device(pdev);
22444 + printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
22448 + pci_set_master(pdev);
22450 + pci_wake_from_d3(pdev, false);
22452 + err = ixgbe_init_interrupt_scheme(adapter);
22454 + printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
22459 + ixgbe_reset(adapter);
22461 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
22463 + if (netif_running(netdev)) {
22464 + err = ixgbe_open(netdev);
22469 + netif_device_attach(netdev);
22473 +#endif /* CONFIG_PM */
22476 + * __ixgbe_shutdown is not used when power management
22477 + * is disabled on older kernels (<2.6.12). causes a compile
22478 + * warning/error, because it is defined and not used.
22480 +#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER)
22481 +static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
22483 + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
22484 + struct net_device *netdev = adapter->netdev;
22485 + struct ixgbe_hw *hw = &adapter->hw;
22487 + u32 wufc = adapter->wol;
22492 + netif_device_detach(netdev);
22494 + if (netif_running(netdev)) {
22495 + ixgbe_down(adapter);
22496 + ixgbe_free_irq(adapter);
22497 + ixgbe_free_all_tx_resources(adapter);
22498 + ixgbe_free_all_rx_resources(adapter);
22501 + ixgbe_clear_interrupt_scheme(adapter);
22504 + retval = pci_save_state(pdev);
22510 + ixgbe_set_rx_mode(netdev);
22512 + /* turn on all-multi mode if wake on multicast is enabled */
22513 + if (wufc & IXGBE_WUFC_MC) {
22514 + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
22515 + fctrl |= IXGBE_FCTRL_MPE;
22516 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
22519 + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
22520 + ctrl |= IXGBE_CTRL_GIO_DIS;
22521 + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
22523 + IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
22525 + IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
22526 + IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
22529 + switch (hw->mac.type) {
22530 + case ixgbe_mac_82598EB:
22531 + pci_wake_from_d3(pdev, false);
22533 + case ixgbe_mac_82599EB:
22535 + pci_wake_from_d3(pdev, true);
22537 + pci_wake_from_d3(pdev, false);
22543 + *enable_wake = !!wufc;
22545 + ixgbe_release_hw_control(adapter);
22547 + pci_disable_device(pdev);
22551 +#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */
22554 +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
22559 + retval = __ixgbe_shutdown(pdev, &wake);
22564 + pci_prepare_to_sleep(pdev);
22566 + pci_wake_from_d3(pdev, false);
22567 + pci_set_power_state(pdev, PCI_D3hot);
22572 +#endif /* CONFIG_PM */
22574 +#ifndef USE_REBOOT_NOTIFIER
22575 +static void ixgbe_shutdown(struct pci_dev *pdev)
22579 + __ixgbe_shutdown(pdev, &wake);
22581 + if (system_state == SYSTEM_POWER_OFF) {
22582 + pci_wake_from_d3(pdev, wake);
22583 + pci_set_power_state(pdev, PCI_D3hot);
22589 + * ixgbe_update_stats - Update the board statistics counters.
22590 + * @adapter: board private structure
22592 +void ixgbe_update_stats(struct ixgbe_adapter *adapter)
22594 + struct ixgbe_hw *hw = &adapter->hw;
22595 + u64 total_mpc = 0;
22596 + u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
22597 + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
22598 + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
22599 + u64 bytes = 0, packets = 0;
22600 +#ifndef IXGBE_NO_LRO
22601 + u32 flushed = 0, coal = 0, recycled = 0;
22602 + int num_q_vectors = 1;
22605 + if (test_bit(__IXGBE_DOWN, &adapter->state) ||
22606 + test_bit(__IXGBE_RESETTING, &adapter->state))
22609 +#ifndef IXGBE_NO_LRO
22610 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
22611 + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
22614 + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
22615 + u64 rsc_count = 0;
22616 + u64 rsc_flush = 0;
22617 + for (i = 0; i < 16; i++)
22618 + adapter->hw_rx_no_dma_resources += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
22619 + for (i = 0; i < adapter->num_rx_queues; i++) {
22620 + rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
22621 + rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
22623 + adapter->rsc_total_count = rsc_count;
22624 + adapter->rsc_total_flush = rsc_flush;
22627 +#ifndef IXGBE_NO_LRO
22628 + for (i = 0; i < num_q_vectors; i++) {
22629 + struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
22630 + if (!q_vector || !q_vector->lrolist)
22632 + flushed += q_vector->lrolist->stats.flushed;
22633 + coal += q_vector->lrolist->stats.coal;
22634 + recycled += q_vector->lrolist->stats.recycled;
22636 + adapter->lro_stats.flushed = flushed;
22637 + adapter->lro_stats.coal = coal;
22638 + adapter->lro_stats.recycled = recycled;
22641 + for (i = 0; i < adapter->num_rx_queues; i++) {
22642 + struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
22643 + non_eop_descs += rx_ring->rx_stats.non_eop_descs;
22644 + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
22645 + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
22646 + bytes += rx_ring->stats.bytes;
22647 + packets += rx_ring->stats.packets;
22650 + adapter->non_eop_descs = non_eop_descs;
22651 + adapter->alloc_rx_page_failed = alloc_rx_page_failed;
22652 + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
22653 + adapter->net_stats.rx_bytes = bytes;
22654 + adapter->net_stats.rx_packets = packets;
22658 + /* gather some stats to the adapter struct that are per queue */
22659 + for (i = 0; i < adapter->num_tx_queues; i++) {
22660 + struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
22661 + restart_queue += tx_ring->tx_stats.restart_queue;
22662 + tx_busy += tx_ring->tx_stats.tx_busy;
22663 + bytes += tx_ring->stats.bytes;
22664 + packets += tx_ring->stats.packets;
22666 + adapter->restart_queue = restart_queue;
22667 + adapter->tx_busy = tx_busy;
22668 + adapter->net_stats.tx_bytes = bytes;
22669 + adapter->net_stats.tx_packets = packets;
22671 + adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
22672 + for (i = 0; i < 8; i++) {
22673 + /* for packet buffers not used, the register should read 0 */
22674 + mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
22675 + missed_rx += mpc;
22676 + adapter->stats.mpc[i] += mpc;
22677 + total_mpc += adapter->stats.mpc[i];
22678 + if (hw->mac.type == ixgbe_mac_82598EB)
22679 + adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
22680 + adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
22681 + adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
22682 + adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
22683 + adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
22684 + switch (hw->mac.type) {
22685 + case ixgbe_mac_82598EB:
22686 + adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
22687 + IXGBE_PXONRXC(i));
22688 + adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
22689 + IXGBE_PXOFFRXC(i));
22691 + case ixgbe_mac_82599EB:
22692 + adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
22693 + IXGBE_PXONRXCNT(i));
22694 + adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
22695 + IXGBE_PXOFFRXCNT(i));
22700 + adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
22701 + IXGBE_PXONTXC(i));
22702 + adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
22703 + IXGBE_PXOFFTXC(i));
22705 + adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
22706 + /* work around hardware counting issue */
22707 + adapter->stats.gprc -= missed_rx;
22709 + /* 82598 hardware only has a 32 bit counter in the high register */
22710 + switch (hw->mac.type) {
22711 + case ixgbe_mac_82598EB:
22712 + adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
22713 + adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
22714 + adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
22715 + adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
22716 + adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
22718 + case ixgbe_mac_82599EB:
22719 + adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
22720 + IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
22721 + adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
22722 + IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
22723 + adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
22724 + IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
22725 + adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
22726 + adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
22728 + adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
22729 + adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
22730 +#endif /* HAVE_TX_MQ */
22732 + adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
22733 + adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
22734 + adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
22735 + adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
22736 + adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
22737 + adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
22738 + adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
22739 +#endif /* IXGBE_FCOE */
22744 + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
22745 + adapter->stats.bprc += bprc;
22746 + adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
22747 + if (hw->mac.type == ixgbe_mac_82598EB)
22748 + adapter->stats.mprc -= bprc;
22749 + adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
22750 + adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
22751 + adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
22752 + adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
22753 + adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
22754 + adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
22755 + adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
22756 + adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
22757 + lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
22758 + adapter->stats.lxontxc += lxon;
22759 + lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
22760 + adapter->stats.lxofftxc += lxoff;
22761 + adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
22762 + adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
22763 + adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
22765 + * 82598 errata - tx of flow control packets is included in tx counters
22767 + xon_off_tot = lxon + lxoff;
22768 + adapter->stats.gptc -= xon_off_tot;
22769 + adapter->stats.mptc -= xon_off_tot;
22770 + adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
22771 + adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
22772 + adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
22773 + adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
22774 + adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
22775 + adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
22776 + adapter->stats.ptc64 -= xon_off_tot;
22777 + adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
22778 + adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
22779 + adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
22780 + adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
22781 + adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
22782 + adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
22784 + /* Fill out the OS statistics structure */
22785 + adapter->net_stats.multicast = adapter->stats.mprc;
22788 + adapter->net_stats.rx_errors = adapter->stats.crcerrs +
22789 + adapter->stats.rlec;
22790 + adapter->net_stats.rx_dropped = 0;
22791 + adapter->net_stats.rx_length_errors = adapter->stats.rlec;
22792 + adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
22793 + adapter->net_stats.rx_missed_errors = total_mpc;
22796 + * VF Stats Collection - skip while resetting because these
22797 + * are not clear on read and otherwise you'll sometimes get
22800 + if (!test_bit(__IXGBE_RESETTING, &adapter->state)) {
22801 + for(i = 0; i < adapter->num_vfs; i++) {
22802 + UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i), \
22803 + adapter->vfinfo[i].last_vfstats.gprc, \
22804 + adapter->vfinfo[i].vfstats.gprc);
22805 + UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i), \
22806 + adapter->vfinfo[i].last_vfstats.gptc, \
22807 + adapter->vfinfo[i].vfstats.gptc);
22808 + UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i), \
22809 + IXGBE_PVFGORC_MSB(i), \
22810 + adapter->vfinfo[i].last_vfstats.gorc, \
22811 + adapter->vfinfo[i].vfstats.gorc);
22812 + UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i), \
22813 + IXGBE_PVFGOTC_MSB(i), \
22814 + adapter->vfinfo[i].last_vfstats.gotc, \
22815 + adapter->vfinfo[i].vfstats.gotc);
22816 + UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i), \
22817 + adapter->vfinfo[i].last_vfstats.mprc, \
22818 + adapter->vfinfo[i].vfstats.mprc);
22824 + * ixgbe_watchdog - Timer Call-back
22825 + * @data: pointer to adapter cast into an unsigned long
22827 +static void ixgbe_watchdog(unsigned long data)
22829 + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
22830 + struct ixgbe_hw *hw = &adapter->hw;
22834 + /* if interface is down do nothing */
22835 + if (test_bit(__IXGBE_DOWN, &adapter->state))
22838 + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
22840 + * for legacy and MSI interrupts don't set any bits
22841 + * that are enabled for EIAM, because this operation
22842 + * would set *both* EIMS and EICS for any bit in EIAM
22844 + IXGBE_WRITE_REG(hw, IXGBE_EICS,
22845 + (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
22847 + /* get one bit for every active tx/rx interrupt vector */
22848 + for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
22849 + struct ixgbe_q_vector *qv = adapter->q_vector[i];
22850 + if (qv->rxr_count || qv->txr_count)
22851 + eics |= ((u64)1 << i);
22855 + /* Cause software interrupt to ensure rings are cleaned */
22856 + ixgbe_irq_rearm_queues(adapter, eics);
22858 + /* Reset the timer */
22859 + mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
22861 + schedule_work(&adapter->watchdog_task);
22865 + * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
22866 + * @work: pointer to work_struct containing our data
22868 +static void ixgbe_multispeed_fiber_task(struct work_struct *work)
22870 + struct ixgbe_adapter *adapter = container_of(work,
22871 + struct ixgbe_adapter,
22872 + multispeed_fiber_task);
22873 + struct ixgbe_hw *hw = &adapter->hw;
22875 + bool negotiation;
22877 + adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
22878 + autoneg = hw->phy.autoneg_advertised;
22879 + if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
22880 + hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
22881 + hw->mac.autotry_restart = false;
22882 + if (hw->mac.ops.setup_link)
22883 + hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
22884 + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
22885 + adapter->link_check_timeout = jiffies;
22886 + adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
22890 + * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
22891 + * @work: pointer to work_struct containing our data
22893 +static void ixgbe_sfp_config_module_task(struct work_struct *work)
22895 + struct ixgbe_adapter *adapter = container_of(work,
22896 + struct ixgbe_adapter,
22897 + sfp_config_module_task);
22898 + struct ixgbe_hw *hw = &adapter->hw;
22901 + adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
22902 + err = hw->phy.ops.identify_sfp(hw);
22903 + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
22904 + DPRINTK(PROBE, ERR, "failed to load because an "
22905 + "unsupported SFP+ module type was detected.\n");
22906 + unregister_netdev(adapter->netdev);
22907 + adapter->netdev_registered = false;
22911 + * A module may be identified correctly, but the EEPROM may not have
22912 + * support for that module. setup_sfp() will fail in that case, so
22913 + * we should not allow that module to load.
22915 + err = hw->mac.ops.setup_sfp(hw);
22916 + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
22917 + DPRINTK(PROBE, ERR, "failed to load because an "
22918 + "unsupported SFP+ module type was detected.\n");
22919 + unregister_netdev(adapter->netdev);
22920 + adapter->netdev_registered = false;
22924 + if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
22925 + /* This will also work for DA Twinax connections */
22926 + schedule_work(&adapter->multispeed_fiber_task);
22927 + adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
22932 + * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
22933 + * @work: pointer to work_struct containing our data
22935 +static void ixgbe_fdir_reinit_task(struct work_struct *work)
22937 + struct ixgbe_adapter *adapter = container_of(work,
22938 + struct ixgbe_adapter,
22939 + fdir_reinit_task);
22940 + struct ixgbe_hw *hw = &adapter->hw;
22943 + /* if interface is down do nothing */
22944 + if (test_bit(__IXGBE_DOWN, &adapter->state))
22947 + if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
22948 + for (i = 0; i < adapter->num_tx_queues; i++)
22949 + set_bit(__IXGBE_TX_FDIR_INIT_DONE,
22950 + &(adapter->tx_ring[i]->state));
22952 + DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
22953 + "ignored adding FDIR ATR filters \n");
22955 + /* Done FDIR Re-initialization, enable transmits */
22956 + netif_tx_start_all_queues(adapter->netdev);
22959 +#endif /* HAVE_TX_MQ */
22961 + * ixgbe_watchdog_task - worker thread to bring link up
22962 + * @work: pointer to work_struct containing our data
22964 +static void ixgbe_watchdog_task(struct work_struct *work)
22966 + struct ixgbe_adapter *adapter = container_of(work,
22967 + struct ixgbe_adapter,
22969 + struct net_device *netdev = adapter->netdev;
22970 + struct ixgbe_hw *hw = &adapter->hw;
22971 + u32 link_speed = adapter->link_speed;
22972 + bool link_up = adapter->link_up;
22974 + struct ixgbe_ring *tx_ring;
22975 + int some_tx_pending = 0;
22977 + /* if interface is down do nothing */
22978 + if (test_bit(__IXGBE_DOWN, &adapter->state))
22981 + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
22982 + if (hw->mac.ops.check_link) {
22983 + hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
22985 + /* always assume link is up, if no check link function */
22986 + link_speed = IXGBE_LINK_SPEED_10GB_FULL;
22990 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
22991 + for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
22992 + hw->mac.ops.fc_enable(hw, i);
22994 + hw->mac.ops.fc_enable(hw, 0);
22999 + time_after(jiffies, (adapter->link_check_timeout +
23000 + IXGBE_TRY_LINK_TIMEOUT))) {
23001 + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
23002 + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
23003 + IXGBE_WRITE_FLUSH(hw);
23005 + adapter->link_up = link_up;
23006 + adapter->link_speed = link_speed;
23010 + if (!netif_carrier_ok(netdev)) {
23011 + bool flow_rx, flow_tx;
23013 + switch (hw->mac.type) {
23014 + case ixgbe_mac_82598EB: {
23015 + u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
23016 + u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
23017 + flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
23018 + flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
23021 + case ixgbe_mac_82599EB: {
23022 + u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
23023 + u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
23024 + flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
23025 + flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
23033 + DPRINTK(LINK, INFO, "NIC Link is Up %s, "
23034 + "Flow Control: %s\n",
23035 + (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
23037 + (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
23038 + "1 Gbps" : "unknown speed")),
23039 + ((flow_rx && flow_tx) ? "RX/TX" :
23040 + (flow_rx ? "RX" :
23041 + (flow_tx ? "TX" : "None"))));
23043 + netif_carrier_on(netdev);
23044 + netif_tx_wake_all_queues(netdev);
23046 + /* Force detection of hung controller */
23047 + for (i = 0; i < adapter->num_tx_queues; i++) {
23048 + tx_ring = adapter->tx_ring[i];
23049 + set_check_for_tx_hang(tx_ring);
23053 + adapter->link_up = false;
23054 + adapter->link_speed = 0;
23055 + if (netif_carrier_ok(netdev)) {
23056 + DPRINTK(LINK, INFO, "NIC Link is Down\n");
23057 + netif_carrier_off(netdev);
23058 + netif_tx_stop_all_queues(netdev);
23062 + if (!netif_carrier_ok(netdev)) {
23063 + for (i = 0; i < adapter->num_tx_queues; i++) {
23064 + tx_ring = adapter->tx_ring[i];
23065 + if (tx_ring->next_to_use != tx_ring->next_to_clean) {
23066 + some_tx_pending = 1;
23071 + if (some_tx_pending) {
23072 + /* We've lost link, so the controller stops DMA,
23073 + * but we've got queued Tx work that's never going
23074 + * to get done, so reset controller to flush Tx.
23075 + * (Do the reset outside of interrupt context).
23077 + schedule_work(&adapter->reset_task);
23081 + ixgbe_update_stats(adapter);
23083 + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
23084 + /* poll faster when waiting for link */
23085 + mod_timer(&adapter->watchdog_timer, jiffies + (HZ/10));
23090 +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
23091 + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
23093 + struct ixgbe_adv_tx_context_desc *context_desc;
23094 + struct ixgbe_tx_buffer *tx_buffer_info;
23095 + u16 i = tx_ring->next_to_use;
23097 + tx_buffer_info = &tx_ring->tx_buffer_info[i];
23098 + tx_buffer_info->time_stamp = jiffies;
23099 + tx_buffer_info->next_to_watch = i;
23101 + context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
23104 + if (i == tx_ring->count)
23105 + tx_ring->next_to_use = 0;
23107 + tx_ring->next_to_use = i;
23109 + /* set bits to identify this as an advanced context descriptor */
23110 + type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
23112 + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
23113 + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
23114 + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
23115 + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
23118 +static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
23119 + u32 tx_flags, u8 *hdr_len)
23121 +#ifdef NETIF_F_TSO
23123 + u32 vlan_macip_lens, type_tucmd;
23124 + u32 mss_l4len_idx, l4len;
23126 + if (!skb_is_gso(skb))
23127 +#endif /* NETIF_F_TSO */
23129 +#ifdef NETIF_F_TSO
23131 + if (skb_header_cloned(skb)) {
23132 + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
23137 + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
23138 + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
23140 + if (skb->protocol == __constant_htons(ETH_P_IP)) {
23141 + struct iphdr *iph = ip_hdr(skb);
23142 + iph->tot_len = 0;
23144 + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
23148 + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
23149 +#ifdef NETIF_F_TSO6
23150 + } else if (skb_is_gso_v6(skb)) {
23151 + ipv6_hdr(skb)->payload_len = 0;
23152 + tcp_hdr(skb)->check =
23153 + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
23154 + &ipv6_hdr(skb)->daddr,
23155 + 0, IPPROTO_TCP, 0);
23159 + l4len = tcp_hdrlen(skb);
23160 + *hdr_len = skb_transport_offset(skb) + l4len;
23162 + /* mss_l4len_id: use 1 as index for TSO */
23163 + mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
23164 + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
23165 + mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
23167 + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
23168 + vlan_macip_lens = skb_network_header_len(skb);
23169 + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
23170 + vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
23172 + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
23179 +static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
23180 + struct sk_buff *skb, u32 tx_flags)
23182 + u32 vlan_macip_lens = 0, type_tucmd = 0;
23184 + if (skb->ip_summed != CHECKSUM_PARTIAL) {
23185 + if (!(tx_flags & IXGBE_TX_FLAGS_VLAN))
23188 + __be16 protocol = skb->protocol;
23190 +#ifdef NETIF_F_HW_VLAN_TX
23191 + if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
23192 + const struct vlan_ethhdr *vhdr =
23193 + (const struct vlan_ethhdr *)skb->data;
23195 + protocol = vhdr->h_vlan_encapsulated_proto;
23197 + protocol = skb->protocol;
23201 + switch (protocol) {
23202 + case __constant_htons(ETH_P_IP):
23203 + if (ip_hdr(skb)->protocol == IPPROTO_TCP)
23204 + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP |
23205 + IXGBE_ADVTXD_TUCMD_IPV4;
23207 + type_tucmd = IXGBE_ADVTXD_TUCMD_IPV4;
23209 +#ifdef NETIF_F_IPV6_CSUM
23210 + case __constant_htons(ETH_P_IPV6):
23211 + /* XXX what about other V6 headers?? */
23212 + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
23213 + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
23217 + if (unlikely(net_ratelimit())) {
23218 + dev_warn(tx_ring->dev,
23219 + "partial checksum but proto=%x!\n",
23224 + vlan_macip_lens = skb_network_header_len(skb);
23227 + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
23228 + vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
23230 + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
23232 + return (skb->ip_summed == CHECKSUM_PARTIAL);
23235 +static u16 ixgbe_tx_map(struct ixgbe_ring *tx_ring,
23236 + struct sk_buff *skb, u32 tx_flags,
23237 + unsigned int first, const u8 hdr_len)
23239 + struct device *dev = tx_ring->dev;
23240 + struct ixgbe_tx_buffer *tx_buffer_info;
23241 +#ifdef MAX_SKB_FRAGS
23242 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
23243 + unsigned int f = 0;
23244 + unsigned int data_len = skb->data_len;
23246 + unsigned int len = skb_headlen(skb), bytecount = skb->len;
23247 + u32 offset = 0, size;
23248 + u16 gso_segs = 1;
23249 + u16 i = tx_ring->next_to_use;
23253 + if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
23254 + if (data_len >= sizeof(struct fcoe_crc_eof)) {
23255 + data_len -= sizeof(struct fcoe_crc_eof);
23257 + len -= sizeof(struct fcoe_crc_eof) - data_len;
23264 + tx_buffer_info = &tx_ring->tx_buffer_info[i];
23265 + size = min_t(unsigned int, len, IXGBE_MAX_DATA_PER_TXD);
23267 + tx_buffer_info->length = size;
23268 + tx_buffer_info->mapped_as_page = false;
23269 + tx_buffer_info->dma = dma_map_single(dev,
23270 + skb->data + offset,
23271 + size, DMA_TO_DEVICE);
23272 + if (dma_mapping_error(dev, tx_buffer_info->dma))
23274 + tx_buffer_info->time_stamp = jiffies;
23275 + tx_buffer_info->next_to_watch = i;
23281 + if (i == tx_ring->count)
23285 +#ifdef MAX_SKB_FRAGS
23286 + while (data_len && (f < nr_frags)) {
23287 + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
23288 + len = min_t(unsigned int, data_len, frag->size);
23290 + offset = frag->page_offset;
23293 + tx_buffer_info = &tx_ring->tx_buffer_info[i];
23294 + size = min_t(unsigned int, len, IXGBE_MAX_DATA_PER_TXD);
23296 + tx_buffer_info->length = size;
23297 + tx_buffer_info->mapped_as_page = true;
23298 + tx_buffer_info->dma = dma_map_page(dev,
23302 + if (dma_mapping_error(dev, tx_buffer_info->dma))
23304 + tx_buffer_info->time_stamp = jiffies;
23305 + tx_buffer_info->next_to_watch = i;
23308 + data_len -= size;
23312 + if (i == tx_ring->count)
23320 + i = tx_ring->count;
23323 +#ifdef NETIF_F_TSO
23324 + if (tx_flags & IXGBE_TX_FLAGS_TSO)
23325 + gso_segs = skb_shinfo(skb)->gso_segs;
23327 + /* adjust for FCoE Sequence Offload */
23328 + else if (tx_flags & IXGBE_TX_FLAGS_FSO)
23329 + gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
23330 + skb_shinfo(skb)->gso_size);
23331 +#endif /* IXGBE_FCOE */
23333 + bytecount += ((gso_segs - 1) * hdr_len);
23335 + /* multiply data chunks by size of headers */
23336 + tx_ring->tx_buffer_info[i].bytecount = bytecount;
23337 + tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
23338 + tx_ring->tx_buffer_info[i].skb = skb;
23339 + tx_ring->tx_buffer_info[first].next_to_watch = i;
23344 + dev_err(dev, "TX DMA map failed\n");
23346 + /* clear timestamp and dma mappings for failed tx_buffer_info map */
23347 + tx_buffer_info->dma = 0;
23348 + tx_buffer_info->time_stamp = 0;
23349 + tx_buffer_info->next_to_watch = 0;
23351 + /* clear timestamp and dma mappings for remaining portion of packet */
23352 + for (; count > 0; count--) {
23354 + i = tx_ring->count;
23356 + tx_buffer_info = &tx_ring->tx_buffer_info[i];
23357 + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
23363 +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
23364 + IXGBE_TXD_CMD_RS | \
23365 + IXGBE_TXD_CMD_IFCS)
23367 +static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, int tx_flags,
23368 + int count, u32 paylen, const u8 hdr_len)
23370 + union ixgbe_adv_tx_desc *tx_desc = NULL;
23371 + struct ixgbe_tx_buffer *tx_buffer_info;
23372 + u32 olinfo_status = 0, cmd_type_len;
23375 + cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
23376 + IXGBE_ADVTXD_DCMD_IFCS |
23377 + IXGBE_ADVTXD_DCMD_DEXT;
23379 + if (tx_flags & IXGBE_TX_FLAGS_VLAN)
23380 + cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
23382 + if (tx_flags & IXGBE_TX_FLAGS_TSO) {
23383 + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
23385 + olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
23386 + IXGBE_ADVTXD_POPTS_SHIFT;
23388 + /* use index 1 context for tso */
23389 + olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
23390 + if (tx_flags & IXGBE_TX_FLAGS_IPV4)
23391 + olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
23392 + IXGBE_ADVTXD_POPTS_SHIFT;
23394 + } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) {
23395 + olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
23396 + IXGBE_ADVTXD_POPTS_SHIFT;
23399 + } else if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
23400 + olinfo_status |= IXGBE_ADVTXD_CC;
23401 + olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
23402 + if (tx_flags & IXGBE_TX_FLAGS_FSO)
23403 + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
23404 +#endif /* IXGBE_FCOE */
23407 + olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
23409 + i = tx_ring->next_to_use;
23410 + while (count--) {
23411 + tx_buffer_info = &tx_ring->tx_buffer_info[i];
23412 + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
23413 + tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
23414 + tx_desc->read.cmd_type_len =
23415 + cpu_to_le32(cmd_type_len | tx_buffer_info->length);
23416 + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
23418 + if (i == tx_ring->count)
23421 + tx_ring->next_to_use = i;
23423 + tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
23426 + * Force memory writes to complete before letting h/w
23427 + * know there are new descriptors to fetch. (Only
23428 + * applicable for weak-ordered memory model archs,
23429 + * such as IA-64).
23433 + writel(i, tx_ring->tail);
23436 +static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
23437 + u8 queue, u32 tx_flags)
23439 + struct ethhdr *eth = (struct ethhdr *)skb->data;
23440 + struct iphdr *iph = ip_hdr(skb);
23441 + struct ixgbe_atr_input atr_input;
23442 + u16 vlan_id, src_port, dst_port;
23445 + /* Right now, we support IPv4 only */
23446 + if (skb->protocol != htons(ETH_P_IP))
23449 + /* check if we're UDP or TCP */
23450 + if (iph->protocol == IPPROTO_TCP) {
23451 + struct tcphdr *th = tcp_hdr(skb);
23452 + src_port = th->source;
23453 + dst_port = th->dest;
23454 + l4type |= IXGBE_ATR_L4TYPE_TCP;
23455 + /* l4type IPv4 type is 0, no need to assign */
23457 + /* Unsupported L4 header, just bail here */
23461 + memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
23463 + vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
23464 + IXGBE_TX_FLAGS_VLAN_SHIFT;
23466 + ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
23467 + ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
23468 + ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
23469 + ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
23470 + ixgbe_atr_set_l4type_82599(&atr_input, l4type);
23471 + /* src and dst are inverted, think how the receiver sees them */
23472 + ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
23473 + ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
23475 + /* This assumes the Rx queue and Tx queue are bound to the same CPU */
23476 + ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
23479 +static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
23481 + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
23482 + /* Herbert's original patch had:
23483 + * smp_mb__after_netif_stop_queue();
23484 + * but since that doesn't exist yet, just open code it. */
23487 + /* We need to check again in a case another CPU has just
23488 + * made room available. */
23489 + if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
23492 + /* A reprieve! - use start_queue because it doesn't call schedule */
23493 + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
23494 + ++tx_ring->tx_stats.restart_queue;
23498 +static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
23500 + if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
23502 + return __ixgbe_maybe_stop_tx(tx_ring, size);
23505 +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
23506 + struct ixgbe_adapter *adapter,
23507 + struct ixgbe_ring *tx_ring)
23509 + struct net_device *netdev = tx_ring->netdev;
23511 + int count = 0, tx_map_count = 0;
23512 +#ifdef MAX_SKB_FRAGS
23513 +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
23517 + u32 tx_flags = 0;
23522 + * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
23523 + * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
23524 + * + 2 desc gap to keep tail from touching head,
23525 + * + 1 desc for context descriptor,
23526 + * otherwise try next time
23528 +#ifdef MAX_SKB_FRAGS
23529 +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
23530 + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
23531 + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
23533 + count += skb_shinfo(skb)->nr_frags;
23536 + count += TXD_USE_COUNT(skb_headlen(skb));
23537 + if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
23538 + tx_ring->tx_stats.tx_busy++;
23539 + return NETDEV_TX_BUSY;
23542 +#ifdef NETIF_F_HW_VLAN_TX
23543 + if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
23544 + tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
23545 + tx_flags |= IXGBE_TX_FLAGS_VLAN;
23550 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
23551 + tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
23553 + /* for FCoE with DCB, we force the priority to what
23554 + * was specified by the switch */
23555 + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
23556 + ((skb->protocol == htons(ETH_P_FCOE)) ||
23557 + (skb->protocol == htons(ETH_P_FIP))))
23558 + tx_flags |= adapter->fcoe.up <<
23559 + IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
23561 +#endif /* IXGBE_FCOE */
23562 + tx_flags |= skb->queue_mapping <<
23563 + IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
23564 + tx_flags |= IXGBE_TX_FLAGS_VLAN;
23567 +#endif /* HAVE_TX_MQ */
23568 + first = tx_ring->next_to_use;
23570 + if (skb->protocol == htons(ETH_P_IP))
23571 + tx_flags |= IXGBE_TX_FLAGS_IPV4;
23574 + /* setup tx offload for FCoE */
23575 + else if (skb->protocol == htons(ETH_P_FCOE)) {
23576 + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
23577 + tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
23579 + dev_kfree_skb_any(skb);
23580 + return NETDEV_TX_OK;
23583 + tx_flags |= IXGBE_TX_FLAGS_FSO |
23584 + IXGBE_TX_FLAGS_FCOE;
23586 + tx_flags |= IXGBE_TX_FLAGS_FCOE;
23592 +#endif /* IXGBE_FCOE */
23593 + if ((tso = ixgbe_tso(tx_ring, skb, tx_flags, &hdr_len)))
23594 + tx_flags |= IXGBE_TX_FLAGS_TSO;
23595 + else if (ixgbe_tx_csum(tx_ring, skb, tx_flags))
23596 + tx_flags |= IXGBE_TX_FLAGS_CSUM;
23599 + dev_kfree_skb_any(skb);
23600 + return NETDEV_TX_OK;
23603 + /* add the ATR filter if ATR is on */
23604 + if (tx_ring->atr_sample_rate) {
23605 + ++tx_ring->atr_count;
23606 + if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
23607 + test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) {
23608 + ixgbe_atr(adapter, skb, tx_ring->queue_index, tx_flags);
23609 + tx_ring->atr_count = 0;
23615 +#endif /* IXGBE_FCOE */
23616 + tx_map_count = ixgbe_tx_map(tx_ring, skb, tx_flags, first, hdr_len);
23617 + if (!tx_map_count) {
23618 + /* handle dma mapping errors in ixgbe_tx_map */
23619 + dev_kfree_skb_any(skb);
23620 + tx_ring->next_to_use = first;
23621 + return NETDEV_TX_OK;
23624 + ixgbe_tx_queue(tx_ring, tx_flags, tx_map_count, skb->len, hdr_len);
23626 + netdev->trans_start = jiffies;
23628 + ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
23630 + return NETDEV_TX_OK;
23633 +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
23635 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
23636 + struct ixgbe_ring *tx_ring;
23639 + tx_ring = adapter->tx_ring[skb->queue_mapping];
23641 + tx_ring = adapter->tx_ring[0];
23643 + return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
23647 + * ixgbe_get_stats - Get System Network Statistics
23648 + * @netdev: network interface device structure
23650 + * Returns the address of the device statistics structure.
23651 + * The statistics are actually updated from the timer callback.
23653 +static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
23655 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
23657 + /* only return the current stats */
23658 + return &adapter->net_stats;
23662 + * ixgbe_set_mac - Change the Ethernet Address of the NIC
23663 + * @netdev: network interface device structure
23664 + * @p: pointer to an address structure
23666 + * Returns 0 on success, negative on failure
23668 +static int ixgbe_set_mac(struct net_device *netdev, void *p)
23670 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
23671 + struct ixgbe_hw *hw = &adapter->hw;
23672 + struct sockaddr *addr = p;
23674 + if (!is_valid_ether_addr(addr->sa_data))
23675 + return -EADDRNOTAVAIL;
23677 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
23678 + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
23680 + if (hw->mac.ops.set_rar)
23681 + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
23686 +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
23688 + * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
23689 + * netdev->dev_addr_list
23690 + * @netdev: network interface device structure
23692 + * Returns non-zero on failure
23694 +static int ixgbe_add_sanmac_netdev(struct net_device *dev)
23697 + struct ixgbe_adapter *adapter = netdev_priv(dev);
23698 + struct ixgbe_mac_info *mac = &adapter->hw.mac;
23700 + if (is_valid_ether_addr(mac->san_addr)) {
23702 + err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
23709 + * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
23710 + * netdev->dev_addr_list
23711 + * @netdev: network interface device structure
23713 + * Returns non-zero on failure
23715 +static int ixgbe_del_sanmac_netdev(struct net_device *dev)
23718 + struct ixgbe_adapter *adapter = netdev_priv(dev);
23719 + struct ixgbe_mac_info *mac = &adapter->hw.mac;
23721 + if (is_valid_ether_addr(mac->san_addr)) {
23723 + err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
23729 +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) */
23730 +#ifdef ETHTOOL_OPS_COMPAT
23737 +static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
23740 + case SIOCETHTOOL:
23741 + return ethtool_ioctl(ifr);
23743 + return -EOPNOTSUPP;
23748 +#ifdef CONFIG_NET_POLL_CONTROLLER
23750 + * Polling 'interrupt' - used by things like netconsole to send skbs
23751 + * without having to re-enable interrupts. It's not called while
23752 + * the interrupt routine is executing.
23754 +static void ixgbe_netpoll(struct net_device *netdev)
23756 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
23759 + /* if interface is down do nothing */
23760 + if (test_bit(__IXGBE_DOWN, &adapter->state))
23763 +#ifndef CONFIG_IXGBE_NAPI
23764 + ixgbe_irq_disable(adapter);
23766 + adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
23767 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
23768 + int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
23769 + for (i = 0; i < num_q_vectors; i++) {
23770 + struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
23771 + ixgbe_msix_clean_many(0, q_vector);
23774 + ixgbe_intr(adapter->pdev->irq, netdev);
23776 + adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
23777 +#ifndef CONFIG_IXGBE_NAPI
23778 + ixgbe_irq_enable(adapter, true, true);
23783 +#ifdef HAVE_NETDEV_SELECT_QUEUE
23784 +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
23786 + struct ixgbe_adapter *adapter = netdev_priv(dev);
23787 + int txq = smp_processor_id();
23790 + if ((skb->protocol == htons(ETH_P_FCOE)) ||
23791 + (skb->protocol == htons(ETH_P_FIP))) {
23792 + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
23793 + txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
23794 + txq += adapter->ring_feature[RING_F_FCOE].mask;
23796 + } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
23797 + txq = adapter->fcoe.up;
23802 +#endif /* IXGBE_FCOE */
23803 + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
23804 + while (unlikely(txq >= dev->real_num_tx_queues))
23805 + txq -= dev->real_num_tx_queues;
23809 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
23810 + if (skb->priority == TC_PRIO_CONTROL)
23811 + txq = adapter->ring_feature[RING_F_DCB].indices - 1;
23813 + txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
23817 + return skb_tx_hash(dev, skb);
23820 +#endif /* HAVE_NETDEV_SELECT_QUEUE */
23821 +#ifdef HAVE_NET_DEVICE_OPS
23822 +static const struct net_device_ops ixgbe_netdev_ops = {
23823 + .ndo_open = &ixgbe_open,
23824 + .ndo_stop = &ixgbe_close,
23825 + .ndo_start_xmit = &ixgbe_xmit_frame,
23826 + .ndo_get_stats = &ixgbe_get_stats,
23827 + .ndo_set_rx_mode = &ixgbe_set_rx_mode,
23828 + .ndo_set_multicast_list = &ixgbe_set_rx_mode,
23829 + .ndo_validate_addr = eth_validate_addr,
23830 + .ndo_set_mac_address = &ixgbe_set_mac,
23831 + .ndo_change_mtu = &ixgbe_change_mtu,
23832 +#ifdef ETHTOOL_OPS_COMPAT
23833 + .ndo_do_ioctl = &ixgbe_ioctl,
23835 + .ndo_tx_timeout = &ixgbe_tx_timeout,
23836 + .ndo_vlan_rx_register = &ixgbe_vlan_rx_register,
23837 + .ndo_vlan_rx_add_vid = &ixgbe_vlan_rx_add_vid,
23838 + .ndo_vlan_rx_kill_vid = &ixgbe_vlan_rx_kill_vid,
23839 +#ifdef HAVE_IPLINK_VF_CONFIG
23840 + .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
23841 + .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
23842 + .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
23843 + .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
23845 +#ifdef CONFIG_NET_POLL_CONTROLLER
23846 + .ndo_poll_controller = &ixgbe_netpoll,
23848 + .ndo_select_queue = &ixgbe_select_queue,
23850 + .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
23851 + .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
23852 +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE
23853 + .ndo_fcoe_enable = ixgbe_fcoe_enable,
23854 + .ndo_fcoe_disable = ixgbe_fcoe_disable,
23856 +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN
23857 + .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
23859 +#endif /* IXGBE_FCOE */
23862 +#endif /* HAVE_NET_DEVICE_OPS */
23866 +void ixgbe_assign_netdev_ops(struct net_device *dev)
23868 + struct ixgbe_adapter *adapter;
23869 + adapter = netdev_priv(dev);
23870 +#ifdef HAVE_NET_DEVICE_OPS
23871 + dev->netdev_ops = &ixgbe_netdev_ops;
23872 +#else /* HAVE_NET_DEVICE_OPS */
23873 + dev->open = &ixgbe_open;
23874 + dev->stop = &ixgbe_close;
23875 + dev->hard_start_xmit = &ixgbe_xmit_frame;
23876 + dev->get_stats = &ixgbe_get_stats;
23877 +#ifdef HAVE_SET_RX_MODE
23878 + dev->set_rx_mode = &ixgbe_set_rx_mode;
23880 + dev->set_multicast_list = &ixgbe_set_rx_mode;
23881 + dev->set_mac_address = &ixgbe_set_mac;
23882 + dev->change_mtu = &ixgbe_change_mtu;
23883 +#ifdef ETHTOOL_OPS_COMPAT
23884 + dev->do_ioctl = &ixgbe_ioctl;
23886 +#ifdef HAVE_TX_TIMEOUT
23887 + dev->tx_timeout = &ixgbe_tx_timeout;
23889 +#ifdef NETIF_F_HW_VLAN_TX
23890 + dev->vlan_rx_register = &ixgbe_vlan_rx_register;
23891 + dev->vlan_rx_add_vid = &ixgbe_vlan_rx_add_vid;
23892 + dev->vlan_rx_kill_vid = &ixgbe_vlan_rx_kill_vid;
23894 +#ifdef CONFIG_NET_POLL_CONTROLLER
23895 + dev->poll_controller = &ixgbe_netpoll;
23897 +#ifdef HAVE_NETDEV_SELECT_QUEUE
23898 + dev->select_queue = &ixgbe_select_queue;
23899 +#endif /* HAVE_NETDEV_SELECT_QUEUE */
23900 +#endif /* HAVE_NET_DEVICE_OPS */
23901 + ixgbe_set_ethtool_ops(dev);
23902 + dev->watchdog_timeo = 5 * HZ;
23905 +static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter)
23907 +#ifdef CONFIG_PCI_IOV
23910 + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
23912 + DPRINTK(PROBE, ERR,
23913 + "Failed to enable PCI sriov: %d\n", err);
23916 + /* If call to enable VFs succeeded then allocate memory
23917 + * for per VF control structures.
23919 + adapter->vfinfo =
23920 + kcalloc(adapter->num_vfs,
23921 + sizeof(struct vf_data_storage), GFP_KERNEL);
23922 + if (adapter->vfinfo) {
23923 + adapter->l2switch_enable = true;
23924 + adapter->repl_enable = true;
23926 + /* RSS not compatible with SR-IOV operation */
23927 + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
23929 + /* Disable RSC when in SR-IOV mode */
23930 + adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
23931 + IXGBE_FLAG2_RSC_ENABLED);
23933 + adapter->flags &= ~(IXGBE_FLAG_RX_PS_ENABLED |
23934 + IXGBE_FLAG_RX_PS_CAPABLE);
23940 + DPRINTK(PROBE, ERR,
23941 + "Unable to allocate memory for VF "
23942 + "Data Storage - SRIOV disabled\n");
23943 + pci_disable_sriov(adapter->pdev);
23946 + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
23947 + adapter->num_vfs = 0;
23948 +#endif /* CONFIG_PCI_IOV */
23952 + * ixgbe_probe - Device Initialization Routine
23953 + * @pdev: PCI device information struct
23954 + * @ent: entry in ixgbe_pci_tbl
23956 + * Returns 0 on success, negative on failure
23958 + * ixgbe_probe initializes an adapter identified by a pci_dev structure.
23959 + * The OS initialization, configuring of the adapter private structure,
23960 + * and a hardware reset occur.
23962 +static int __devinit ixgbe_probe(struct pci_dev *pdev,
23963 + const struct pci_device_id *ent)
23965 + struct net_device *netdev;
23966 + struct ixgbe_adapter *adapter = NULL;
23967 + struct ixgbe_hw *hw = NULL;
23968 + static int cards_found;
23969 + int i, err, pci_using_dac;
23971 + unsigned int indices;
23974 + u8 part_str[IXGBE_PBA_LEN];
23975 + u32 part_str_size = IXGBE_PBA_LEN;
23976 + enum ixgbe_mac_type mac_type = ixgbe_mac_unknown;
23981 + err = pci_enable_device(pdev);
23985 + if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) &&
23986 + !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) {
23987 + pci_using_dac = 1;
23989 + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
23991 + err = dma_set_coherent_mask(pci_dev_to_dev(pdev),
23992 + DMA_BIT_MASK(32));
23994 + dev_err(pci_dev_to_dev(pdev), "No usable DMA "
23995 + "configuration, aborting\n");
23999 + pci_using_dac = 0;
24002 + err = pci_request_regions(pdev, ixgbe_driver_name);
24004 + dev_err(pci_dev_to_dev(pdev),
24005 + "pci_request_regions failed 0x%x\n", err);
24006 + goto err_pci_reg;
24010 + * The mac_type is needed before we have the adapter is set up
24011 + * so rather than maintain two devID -> MAC tables we dummy up
24012 + * an ixgbe_hw stuct and use ixgbe_set_mac_type.
24014 + hw = vmalloc(sizeof(struct ixgbe_hw));
24016 + printk(KERN_INFO "Unable to allocate memory for early mac "
24019 + hw->vendor_id = pdev->vendor;
24020 + hw->device_id = pdev->device;
24021 + ixgbe_set_mac_type(hw);
24022 + mac_type = hw->mac.type;
24027 + * Workaround of Silicon errata on 82598. Disable LOs in the PCI switch
24028 + * port to which the 82598 is connected to prevent duplicate
24029 + * completions caused by LOs. We need the mac type so that we only
24030 + * do this on 82598 devices, ixgbe_set_mac_type does this for us if
24031 + * we set it's device ID.
24033 + if (mac_type == ixgbe_mac_82598EB)
24034 + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
24036 + pci_enable_pcie_error_reporting(pdev);
24038 + pci_set_master(pdev);
24041 + indices = num_possible_cpus();
24042 + if (mac_type == ixgbe_mac_unknown)
24043 + indices = max_t(unsigned int, IXGBE_MAX_RSS_INDICES,
24044 + IXGBE_MAX_FDIR_INDICES);
24045 + else if (mac_type == ixgbe_mac_82598EB)
24046 + indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
24048 + indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
24049 + indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
24051 + indices += min_t(unsigned int, num_possible_cpus(),
24052 + IXGBE_MAX_FCOE_INDICES);
24054 + netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
24056 + netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
24060 + goto err_alloc_etherdev;
24063 + SET_NETDEV_DEV(netdev, &pdev->dev);
24065 + adapter = netdev_priv(netdev);
24066 + pci_set_drvdata(pdev, adapter);
24068 + adapter->netdev = netdev;
24069 + adapter->pdev = pdev;
24070 + hw = &adapter->hw;
24071 + hw->back = adapter;
24072 + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
24074 +#ifdef HAVE_DEVICE_NUMA_NODE
24075 + DPRINTK(TX_ERR, INFO, "my (original) node was: %d\n",
24076 + dev_to_node(&pdev->dev));
24077 +#endif /* HAVE_DEVICE_NUMA_NODE */
24079 +#ifdef HAVE_PCI_ERS
24081 + * call save state here in standalone driver because it relies on
24082 + * adapter struct to exist, and needs to call netdev_priv
24084 + pci_save_state(pdev);
24087 + hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
24088 + pci_resource_len(pdev, 0));
24089 + if (!hw->hw_addr) {
24091 + goto err_ioremap;
24094 + ixgbe_assign_netdev_ops(netdev);
24096 + strcpy(netdev->name, pci_name(pdev));
24098 + adapter->bd_number = cards_found;
24100 +#ifdef IXGBE_TCP_TIMER
24101 + adapter->msix_addr = ioremap(pci_resource_start(pdev, 3),
24102 + pci_resource_len(pdev, 3));
24103 + if (!adapter->msix_addr) {
24105 + printk("Error in ioremap of BAR3\n");
24106 + goto err_map_msix;
24110 + /* set up this timer and work struct before calling get_invariants
24111 + * which might start the timer
24113 + setup_timer(&adapter->sfp_timer, &ixgbe_sfp_timer,
24114 + (unsigned long) adapter);
24115 + INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
24117 + /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
24118 + INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
24120 + /* a new SFP+ module arrival, called from GPI SDP2 context */
24121 + INIT_WORK(&adapter->sfp_config_module_task,
24122 + ixgbe_sfp_config_module_task);
24124 + /* setup the private structure */
24125 + err = ixgbe_sw_init(adapter);
24127 + goto err_sw_init;
24129 + /* Make it possible the adapter to be woken up via WOL */
24130 + if (adapter->hw.mac.type == ixgbe_mac_82599EB)
24131 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
24134 + * If we have a fan, this is as early we know, warn if we
24135 + * have had a failure.
24137 + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
24138 + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
24139 + if (esdp & IXGBE_ESDP_SDP1)
24140 + DPRINTK(PROBE, CRIT,
24141 + "Fan has stopped, replace the adapter\n");
24144 + /* reset_hw fills in the perm_addr as well */
24145 + hw->phy.reset_if_overtemp = true;
24146 + err = hw->mac.ops.reset_hw(hw);
24147 + hw->phy.reset_if_overtemp = false;
24148 + if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
24149 + hw->mac.type == ixgbe_mac_82598EB) {
24151 + * Start a kernel thread to watch for a module to arrive.
24152 + * Only do this for 82598, since 82599 will generate interrupts
24153 + * on module arrival.
24155 + set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
24156 + mod_timer(&adapter->sfp_timer,
24157 + round_jiffies(jiffies + (2 * HZ)));
24159 + } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
24160 + DPRINTK(PROBE, ERR, "failed to load because an "
24161 + "unsupported SFP+ module type was detected.\n");
24162 + goto err_sw_init;
24163 + } else if (err) {
24164 + DPRINTK(PROBE, ERR, "HW Init failed: %d\n", err);
24165 + goto err_sw_init;
24169 + * check_options must be called before setup_link to set up
24170 + * hw->fc completely
24172 + ixgbe_check_options(adapter);
24174 + DPRINTK(TX_ERR, INFO, "my (preferred) node is: %d\n", adapter->node);
24176 + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
24177 + ixgbe_probe_vf(adapter);
24179 +#ifdef MAX_SKB_FRAGS
24180 +#ifdef NETIF_F_HW_VLAN_TX
24181 + netdev->features = NETIF_F_SG |
24182 + NETIF_F_IP_CSUM |
24183 + NETIF_F_HW_VLAN_TX |
24184 + NETIF_F_HW_VLAN_RX |
24185 + NETIF_F_HW_VLAN_FILTER;
24188 + netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
24191 +#ifdef NETIF_F_IPV6_CSUM
24192 + netdev->features |= NETIF_F_IPV6_CSUM;
24194 +#ifdef NETIF_F_TSO
24195 + netdev->features |= NETIF_F_TSO;
24196 +#ifdef NETIF_F_TSO6
24197 + netdev->features |= NETIF_F_TSO6;
24198 +#endif /* NETIF_F_TSO6 */
24199 +#endif /* NETIF_F_TSO */
24200 +#ifdef NETIF_F_GRO
24201 + netdev->features |= NETIF_F_GRO;
24202 +#endif /* NETIF_F_GRO */
24203 +#ifdef NETIF_F_NTUPLE
24205 + * If perfect filters were enabled in check_options(), enable them
24206 + * on the netdevice too.
24208 + if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
24209 + netdev->features |= NETIF_F_NTUPLE;
24210 +#endif /* NETIF_F_NTUPLE */
24211 + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
24212 + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
24213 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
24214 + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
24215 + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
24216 + adapter->flags &= ~(IXGBE_FLAG_FDIR_HASH_CAPABLE
24217 + | IXGBE_FLAG_FDIR_PERFECT_CAPABLE);
24218 +#ifdef NETIF_F_NTUPLE
24219 + /* clear n-tuple support in the netdev unconditionally */
24220 + netdev->features &= ~NETIF_F_NTUPLE;
24221 +#endif /* NETIF_F_NTUPLE */
24223 + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
24224 + netdev->features |= NETIF_F_LRO;
24225 + adapter->flags2 &= ~IXGBE_FLAG2_SWLRO_ENABLED;
24226 +#ifndef IXGBE_NO_HW_RSC
24227 + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
24229 + netdev->features |= NETIF_F_LRO;
24230 + adapter->flags2 |= IXGBE_FLAG2_SWLRO_ENABLED;
24233 +#ifndef IXGBE_NO_LRO
24234 + netdev->features |= NETIF_F_LRO;
24235 + adapter->flags2 |= IXGBE_FLAG2_SWLRO_ENABLED;
24237 + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
24239 +#ifdef HAVE_NETDEV_VLAN_FEATURES
24240 +#ifdef NETIF_F_TSO
24241 + netdev->vlan_features |= NETIF_F_TSO;
24242 +#ifdef NETIF_F_TSO6
24243 + netdev->vlan_features |= NETIF_F_TSO6;
24244 +#endif /* NETIF_F_TSO6 */
24245 +#endif /* NETIF_F_TSO */
24246 + netdev->vlan_features |= NETIF_F_IP_CSUM;
24247 +#ifdef NETIF_F_IPV6_CSUM
24248 + netdev->vlan_features |= NETIF_F_IPV6_CSUM;
24250 + netdev->vlan_features |= NETIF_F_SG;
24252 +#endif /* HAVE_NETDEV_VLAN_FEATURES */
24254 + netdev->dcbnl_ops = &dcbnl_ops;
24258 + switch (adapter->hw.mac.type) {
24259 + case ixgbe_mac_82599EB:
24260 +#ifdef NETIF_F_FSO
24261 + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
24262 + ixgbe_get_device_caps(hw, &device_caps);
24263 + if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) {
24264 + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
24265 + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
24266 + DPRINTK(PROBE, INFO, "FCoE offload feature "
24267 + "is not available. Disabling FCoE "
24268 + "offload feature\n");
24270 +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
24272 + adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
24273 + adapter->ring_feature[RING_F_FCOE].indices =
24274 + IXGBE_FCRETA_SIZE;
24275 + netdev->features |= NETIF_F_FSO;
24276 + netdev->features |= NETIF_F_FCOE_CRC;
24277 + netdev->features |= NETIF_F_FCOE_MTU;
24278 + netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
24279 + DPRINTK(PROBE, INFO, "Enabling FCoE offload "
24282 +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
24284 +#ifdef HAVE_NETDEV_VLAN_FEATURES
24285 + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
24286 + netdev->vlan_features |= NETIF_F_FSO;
24287 + netdev->vlan_features |= NETIF_F_FCOE_CRC;
24288 + netdev->vlan_features |= NETIF_F_FCOE_MTU;
24290 +#endif /* HAVE_NETDEV_VLAN_FEATURES */
24291 +#endif /* NETIF_F_FSO */
24296 +#endif /* IXGBE_FCOE */
24297 + if (pci_using_dac)
24298 + netdev->features |= NETIF_F_HIGHDMA;
24300 +#endif /* MAX_SKB_FRAGS */
24301 + /* make sure the EEPROM is good */
24302 + if (hw->eeprom.ops.validate_checksum &&
24303 + (hw->eeprom.ops.validate_checksum(hw, NULL) < 0)) {
24304 + DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
24306 + goto err_sw_init;
24309 + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
24310 +#ifdef ETHTOOL_GPERMADDR
24311 + memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
24313 + if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
24314 + DPRINTK(PROBE, INFO, "invalid MAC address\n");
24316 + goto err_sw_init;
24319 + if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
24320 + DPRINTK(PROBE, INFO, "invalid MAC address\n");
24322 + goto err_sw_init;
24326 + /* power down the optics */
24327 + if (hw->phy.multispeed_fiber)
24328 + ixgbe_disable_tx_laser(hw);
24330 + setup_timer(&adapter->watchdog_timer, &ixgbe_watchdog,
24331 + (unsigned long) adapter);
24333 + INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
24334 + INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
24336 + err = ixgbe_init_interrupt_scheme(adapter);
24338 + goto err_sw_init;
24340 + switch (pdev->device) {
24341 + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
24342 + /* All except this subdevice support WOL */
24343 + if (pdev->subsystem_device ==
24344 + IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
24345 + adapter->wol = 0;
24348 + case IXGBE_DEV_ID_82599_KX4:
24349 + adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
24350 + IXGBE_WUFC_MC | IXGBE_WUFC_BC);
24353 + adapter->wol = 0;
24356 + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
24358 + /* save off EEPROM version number */
24359 + ixgbe_read_eeprom(hw, 0x29, &adapter->eeprom_version);
24361 + /* reset the hardware with the new settings */
24362 + err = hw->mac.ops.start_hw(hw);
24363 + if (err == IXGBE_ERR_EEPROM_VERSION) {
24364 + /* We are running on a pre-production device, log a warning */
24365 + DPRINTK(PROBE, INFO, "This device is a pre-production adapter/"
24366 + "LOM. Please be aware there may be issues associated "
24367 + "with your hardware. If you are experiencing problems "
24368 + "please contact your Intel or hardware representative "
24369 + "who provided you with this hardware.\n");
24371 + /* pick up the PCI bus settings for reporting later */
24372 + if (hw->mac.ops.get_bus_info)
24373 + hw->mac.ops.get_bus_info(hw);
24376 + strcpy(netdev->name, "eth%d");
24377 + err = register_netdev(netdev);
24379 + goto err_register;
24381 + adapter->netdev_registered = true;
24382 + /* carrier off reporting is important to ethtool even BEFORE open */
24383 + netif_carrier_off(netdev);
24384 + /* keep stopping all the transmit queues for older kernels */
24385 + netif_tx_stop_all_queues(netdev);
24388 + INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
24389 +#endif /* HAVE_TX_MQ */
24390 + INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task);
24391 + if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) {
24392 + err = dca_add_requester(&pdev->dev);
24395 + adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
24396 + ixgbe_setup_dca(adapter);
24398 + /* -19 is returned from the kernel when no provider is found */
24400 + DPRINTK(PROBE, INFO, "No DCA provider found. Please "
24401 + "start ioatdma for DCA functionality.\n");
24404 + DPRINTK(PROBE, INFO, "DCA registration failed: %d\n",
24410 + /* print all messages at the end so that we use our eth%d name */
24411 + /* print bus type/speed/width info */
24412 + DPRINTK(PROBE, INFO, "(PCI Express:%s:%s) ",
24413 + ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
24414 + (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
24415 + (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
24416 + (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
24417 + (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
24420 + /* print the MAC address */
24421 + for (i = 0; i < 6; i++)
24422 + printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
24424 + /* Frist try to read PBA as a string */
24425 + err = ixgbe_read_pba_string(hw, part_str, &part_str_size);
24429 + case IXGBE_NOT_IMPLEMENTED:
24430 + /* old style PBA number */
24431 + ixgbe_read_pba_num(hw, &part_num);
24432 + sprintf(part_str, "%06x-%03x\n", (part_num >> 8),
24433 + (part_num & 0xff));
24436 + strcpy(part_str, "Unknown");
24439 + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
24440 + DPRINTK(PROBE, INFO, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
24441 + hw->mac.type, hw->phy.type, hw->phy.sfp_type,
24444 + DPRINTK(PROBE, INFO, "MAC: %d, PHY: %d, PBA No: %s\n",
24445 + hw->mac.type, hw->phy.type, part_str);
24447 + if (((hw->bus.speed == ixgbe_bus_speed_2500) &&
24448 + (hw->bus.width <= ixgbe_bus_width_pcie_x4)) ||
24449 + (hw->bus.width <= ixgbe_bus_width_pcie_x2)) {
24450 + DPRINTK(PROBE, WARNING, "PCI-Express bandwidth available for "
24451 + "this card is not sufficient for optimal "
24452 + "performance.\n");
24453 + DPRINTK(PROBE, WARNING, "For optimal performance a x8 "
24454 + "PCI-Express slot is required.\n");
24457 +#ifdef NETIF_F_GRO
24458 + if (adapter->netdev->features & NETIF_F_GRO)
24459 + DPRINTK(PROBE, INFO, "GRO is enabled\n");
24460 + else if (adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED)
24462 + if (adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED)
24464 + DPRINTK(PROBE, INFO, "Internal LRO is enabled \n");
24466 + DPRINTK(PROBE, INFO, "LRO is disabled \n");
24468 + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
24469 + DPRINTK(PROBE, INFO, "HW RSC is enabled \n");
24470 +#ifdef CONFIG_PCI_IOV
24471 + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
24472 + DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
24473 + adapter->num_vfs);
24474 + for (i = 0; i < adapter->num_vfs; i++)
24475 + ixgbe_vf_configuration(pdev, (i | 0x10000000));
24479 +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
24480 + /* add san mac addr to netdev */
24481 + ixgbe_add_sanmac_netdev(netdev);
24483 +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */
24484 + DPRINTK(PROBE, INFO, "Intel(R) 10 Gigabit Network Connection\n");
24489 + ixgbe_clear_interrupt_scheme(adapter);
24490 + ixgbe_release_hw_control(adapter);
24492 + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
24493 + ixgbe_disable_sriov(adapter);
24494 + clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
24495 + del_timer_sync(&adapter->sfp_timer);
24496 + cancel_work_sync(&adapter->sfp_task);
24497 + cancel_work_sync(&adapter->multispeed_fiber_task);
24498 + cancel_work_sync(&adapter->sfp_config_module_task);
24499 +#ifdef IXGBE_TCP_TIMER
24500 + iounmap(adapter->msix_addr);
24503 + iounmap(hw->hw_addr);
24505 + free_netdev(netdev);
24506 +err_alloc_etherdev:
24507 + pci_release_regions(pdev);
24511 + pci_disable_device(pdev);
24516 + * ixgbe_remove - Device Removal Routine
24517 + * @pdev: PCI device information struct
24519 + * ixgbe_remove is called by the PCI subsystem to alert the driver
24520 + * that it should release a PCI device. The could be caused by a
24521 + * Hot-Plug event, or because the driver is going to be removed from
24524 +static void __devexit ixgbe_remove(struct pci_dev *pdev)
24526 + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
24527 + struct net_device *netdev = adapter->netdev;
24529 + set_bit(__IXGBE_DOWN, &adapter->state);
24531 + * clear the module not found bit to make sure the worker won't
24534 + clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
24535 + del_timer_sync(&adapter->watchdog_timer);
24536 + del_timer_sync(&adapter->sfp_timer);
24537 + cancel_work_sync(&adapter->reset_task);
24538 + cancel_work_sync(&adapter->watchdog_task);
24539 + cancel_work_sync(&adapter->sfp_task);
24541 + cancel_work_sync(&adapter->fdir_reinit_task);
24543 + cancel_work_sync(&adapter->check_overtemp_task);
24544 + cancel_work_sync(&adapter->multispeed_fiber_task);
24545 + cancel_work_sync(&adapter->sfp_config_module_task);
24546 + flush_scheduled_work();
24548 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
24549 + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
24550 + dca_remove_requester(&pdev->dev);
24551 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
24555 + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
24556 + ixgbe_cleanup_fcoe(adapter);
24558 +#endif /* IXGBE_FCOE */
24559 +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
24560 + /* remove the added san mac */
24561 + ixgbe_del_sanmac_netdev(netdev);
24563 +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */
24564 + if (adapter->netdev_registered) {
24565 + unregister_netdev(netdev);
24566 + adapter->netdev_registered = false;
24568 + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
24569 + ixgbe_disable_sriov(adapter);
24571 + ixgbe_clear_interrupt_scheme(adapter);
24572 + ixgbe_release_hw_control(adapter);
24574 +#ifdef IXGBE_TCP_TIMER
24575 + iounmap(adapter->msix_addr);
24577 + iounmap(adapter->hw.hw_addr);
24578 + pci_release_regions(pdev);
24580 + DPRINTK(PROBE, INFO, "complete\n");
24581 + free_netdev(netdev);
24583 + pci_disable_pcie_error_reporting(pdev);
24585 + pci_disable_device(pdev);
24588 +u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
24591 + struct ixgbe_adapter *adapter = hw->back;
24593 + pci_read_config_word(adapter->pdev, reg, &value);
24597 +void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
24599 + struct ixgbe_adapter *adapter = hw->back;
24601 + pci_write_config_word(adapter->pdev, reg, value);
24604 +#ifdef HAVE_PCI_ERS
24606 + * ixgbe_io_error_detected - called when PCI error is detected
24607 + * @pdev: Pointer to PCI device
24608 + * @state: The current pci connection state
24610 + * This function is called after a PCI bus error affecting
24611 + * this device has been detected.
24613 +static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
24614 + pci_channel_state_t state)
24616 + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
24617 + struct net_device *netdev = adapter->netdev;
24619 + netif_device_detach(netdev);
24621 + if (state == pci_channel_io_perm_failure)
24622 + return PCI_ERS_RESULT_DISCONNECT;
24624 + if (netif_running(netdev))
24625 + ixgbe_down(adapter);
24626 + pci_disable_device(pdev);
24628 + /* Request a slot reset. */
24629 + return PCI_ERS_RESULT_NEED_RESET;
24633 + * ixgbe_io_slot_reset - called after the pci bus has been reset.
24634 + * @pdev: Pointer to PCI device
24636 + * Restart the card from scratch, as if from a cold-boot.
24638 +static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
24640 + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
24641 + pci_ers_result_t result;
24643 + if (pci_enable_device(pdev)) {
24644 + DPRINTK(PROBE, ERR,
24645 + "Cannot re-enable PCI device after reset.\n");
24646 + result = PCI_ERS_RESULT_DISCONNECT;
24648 + pci_set_master(pdev);
24649 + pci_restore_state(pdev);
24651 + * After second error pci->state_saved is false, this
24652 + * resets it so EEH doesn't break.
24654 + pci_save_state(pdev);
24656 + pci_wake_from_d3(pdev, false);
24658 + ixgbe_reset(adapter);
24659 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
24660 + result = PCI_ERS_RESULT_RECOVERED;
24663 + pci_cleanup_aer_uncorrect_error_status(pdev);
24669 + * ixgbe_io_resume - called when traffic can start flowing again.
24670 + * @pdev: Pointer to PCI device
24672 + * This callback is called when the error recovery driver tells us that
24673 + * its OK to resume normal operation.
24675 +static void ixgbe_io_resume(struct pci_dev *pdev)
24677 + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
24678 + struct net_device *netdev = adapter->netdev;
24680 + if (netif_running(netdev)) {
24681 + if (ixgbe_up(adapter)) {
24682 + DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
24687 + netif_device_attach(netdev);
24690 +static struct pci_error_handlers ixgbe_err_handler = {
24691 + .error_detected = ixgbe_io_error_detected,
24692 + .slot_reset = ixgbe_io_slot_reset,
24693 + .resume = ixgbe_io_resume,
24697 +static struct pci_driver ixgbe_driver = {
24698 + .name = ixgbe_driver_name,
24699 + .id_table = ixgbe_pci_tbl,
24700 + .probe = ixgbe_probe,
24701 + .remove = __devexit_p(ixgbe_remove),
24703 + .suspend = ixgbe_suspend,
24704 + .resume = ixgbe_resume,
24706 +#ifndef USE_REBOOT_NOTIFIER
24707 + .shutdown = ixgbe_shutdown,
24709 +#ifdef HAVE_PCI_ERS
24710 + .err_handler = &ixgbe_err_handler
24714 +bool ixgbe_is_ixgbe(struct pci_dev *pcidev)
24716 + if (pci_dev_driver(pcidev) != &ixgbe_driver)
24723 + * ixgbe_init_module - Driver Registration Routine
24725 + * ixgbe_init_module is the first routine called when the driver is
24726 + * loaded. All it does is register with the PCI subsystem.
24728 +static int __init ixgbe_init_module(void)
24731 + printk(KERN_INFO "ixgbe: %s - version %s\n", ixgbe_driver_string,
24732 + ixgbe_driver_version);
24734 + printk(KERN_INFO "%s\n", ixgbe_copyright);
24736 +#ifndef CONFIG_DCB
24737 + ixgbe_dcb_netlink_register();
24739 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
24740 + dca_register_notify(&dca_notifier);
24743 + ret = pci_register_driver(&ixgbe_driver);
24747 +module_init(ixgbe_init_module);
24750 + * ixgbe_exit_module - Driver Exit Cleanup Routine
24752 + * ixgbe_exit_module is called just before the driver is removed
24755 +static void __exit ixgbe_exit_module(void)
24757 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
24758 + dca_unregister_notify(&dca_notifier);
24760 +#ifndef CONFIG_DCB
24761 + ixgbe_dcb_netlink_unregister();
24763 + pci_unregister_driver(&ixgbe_driver);
24766 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
24767 +static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
24772 + ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
24773 + __ixgbe_notify_dca);
24775 + return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
24778 +module_exit(ixgbe_exit_module);
24780 +/* ixgbe_main.c */
24782 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_mbx.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_mbx.c
24783 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_mbx.c 1969-12-31 19:00:00.000000000 -0500
24784 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_mbx.c 2010-08-25 17:56:26.000000000 -0400
24786 +/*******************************************************************************
24788 + Intel 10 Gigabit PCI Express Linux driver
24789 + Copyright(c) 1999 - 2010 Intel Corporation.
24791 + This program is free software; you can redistribute it and/or modify it
24792 + under the terms and conditions of the GNU General Public License,
24793 + version 2, as published by the Free Software Foundation.
24795 + This program is distributed in the hope it will be useful, but WITHOUT
24796 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24797 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24800 + You should have received a copy of the GNU General Public License along with
24801 + this program; if not, write to the Free Software Foundation, Inc.,
24802 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24804 + The full GNU General Public License is included in this distribution in
24805 + the file called "COPYING".
24807 + Contact Information:
24808 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24809 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24811 +*******************************************************************************/
24813 +#include "ixgbe_type.h"
24814 +#include "ixgbe_mbx.h"
24817 + * ixgbe_read_mbx - Reads a message from the mailbox
24818 + * @hw: pointer to the HW structure
24819 + * @msg: The message buffer
24820 + * @size: Length of buffer
24821 + * @mbx_id: id of mailbox to read
24823 + * returns SUCCESS if it successfuly read message from buffer
24825 +s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
24827 + struct ixgbe_mbx_info *mbx = &hw->mbx;
24828 + s32 ret_val = IXGBE_ERR_MBX;
24830 + /* limit read to size of mailbox */
24831 + if (size > mbx->size)
24832 + size = mbx->size;
24834 + if (mbx->ops.read)
24835 + ret_val = mbx->ops.read(hw, msg, size, mbx_id);
24841 + * ixgbe_write_mbx - Write a message to the mailbox
24842 + * @hw: pointer to the HW structure
24843 + * @msg: The message buffer
24844 + * @size: Length of buffer
24845 + * @mbx_id: id of mailbox to write
24847 + * returns SUCCESS if it successfully copied message into the buffer
24849 +s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
24851 + struct ixgbe_mbx_info *mbx = &hw->mbx;
24854 + if (size > mbx->size)
24855 + ret_val = IXGBE_ERR_MBX;
24857 + else if (mbx->ops.write)
24858 + ret_val = mbx->ops.write(hw, msg, size, mbx_id);
24864 + * ixgbe_check_for_msg - checks to see if someone sent us mail
24865 + * @hw: pointer to the HW structure
24866 + * @mbx_id: id of mailbox to check
24868 + * returns SUCCESS if the Status bit was found or else ERR_MBX
24870 +s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
24872 + struct ixgbe_mbx_info *mbx = &hw->mbx;
24873 + s32 ret_val = IXGBE_ERR_MBX;
24875 + if (mbx->ops.check_for_msg)
24876 + ret_val = mbx->ops.check_for_msg(hw, mbx_id);
24882 + * ixgbe_check_for_ack - checks to see if someone sent us ACK
24883 + * @hw: pointer to the HW structure
24884 + * @mbx_id: id of mailbox to check
24886 + * returns SUCCESS if the Status bit was found or else ERR_MBX
24888 +s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
24890 + struct ixgbe_mbx_info *mbx = &hw->mbx;
24891 + s32 ret_val = IXGBE_ERR_MBX;
24893 + if (mbx->ops.check_for_ack)
24894 + ret_val = mbx->ops.check_for_ack(hw, mbx_id);
24900 + * ixgbe_check_for_rst - checks to see if other side has reset
24901 + * @hw: pointer to the HW structure
24902 + * @mbx_id: id of mailbox to check
24904 + * returns SUCCESS if the Status bit was found or else ERR_MBX
24906 +s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
24908 + struct ixgbe_mbx_info *mbx = &hw->mbx;
24909 + s32 ret_val = IXGBE_ERR_MBX;
24911 + if (mbx->ops.check_for_rst)
24912 + ret_val = mbx->ops.check_for_rst(hw, mbx_id);
24918 + * ixgbe_poll_for_msg - Wait for message notification
24919 + * @hw: pointer to the HW structure
24920 + * @mbx_id: id of mailbox to write
24922 + * returns SUCCESS if it successfully received a message notification
24924 +static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
24926 + struct ixgbe_mbx_info *mbx = &hw->mbx;
24927 + int countdown = mbx->timeout;
24929 + if (!countdown || !mbx->ops.check_for_msg)
24932 + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
24936 + udelay(mbx->udelay);
24940 + return countdown ? 0 : IXGBE_ERR_MBX;
24944 + * ixgbe_poll_for_ack - Wait for message acknowledgement
24945 + * @hw: pointer to the HW structure
24946 + * @mbx_id: id of mailbox to write
24948 + * returns SUCCESS if it successfully received a message acknowledgement
24950 +static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
24952 + struct ixgbe_mbx_info *mbx = &hw->mbx;
24953 + int countdown = mbx->timeout;
24955 + if (!countdown || !mbx->ops.check_for_ack)
24958 + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
24962 + udelay(mbx->udelay);
24966 + return countdown ? 0 : IXGBE_ERR_MBX;
24970 + * ixgbe_read_posted_mbx - Wait for message notification and receive message
24971 + * @hw: pointer to the HW structure
24972 + * @msg: The message buffer
24973 + * @size: Length of buffer
24974 + * @mbx_id: id of mailbox to write
24976 + * returns SUCCESS if it successfully received a message notification and
24977 + * copied it into the receive buffer.
24979 +s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
24981 + struct ixgbe_mbx_info *mbx = &hw->mbx;
24982 + s32 ret_val = IXGBE_ERR_MBX;
24984 + if (!mbx->ops.read)
24987 + ret_val = ixgbe_poll_for_msg(hw, mbx_id);
24989 + /* if ack received read message, otherwise we timed out */
24991 + ret_val = mbx->ops.read(hw, msg, size, mbx_id);
24997 + * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
24998 + * @hw: pointer to the HW structure
24999 + * @msg: The message buffer
25000 + * @size: Length of buffer
25001 + * @mbx_id: id of mailbox to write
25003 + * returns SUCCESS if it successfully copied message into the buffer and
25004 + * received an ack to that message within delay * timeout period
25006 +s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
25009 + struct ixgbe_mbx_info *mbx = &hw->mbx;
25010 + s32 ret_val = IXGBE_ERR_MBX;
25012 + /* exit if either we can't write or there isn't a defined timeout */
25013 + if (!mbx->ops.write || !mbx->timeout)
25017 + ret_val = mbx->ops.write(hw, msg, size, mbx_id);
25019 + /* if msg sent wait until we receive an ack */
25021 + ret_val = ixgbe_poll_for_ack(hw, mbx_id);
25027 + * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
25028 + * @hw: pointer to the HW structure
25030 + * Setups up the mailbox read and write message function pointers
25032 +void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
25034 + struct ixgbe_mbx_info *mbx = &hw->mbx;
25036 + mbx->ops.read_posted = ixgbe_read_posted_mbx;
25037 + mbx->ops.write_posted = ixgbe_write_posted_mbx;
25040 +static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
25042 + u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
25043 + s32 ret_val = IXGBE_ERR_MBX;
25045 + if (mbvficr & mask) {
25047 + IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
25054 + * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
25055 + * @hw: pointer to the HW structure
25056 + * @vf_number: the VF index
25058 + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
25060 +static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
25062 + s32 ret_val = IXGBE_ERR_MBX;
25063 + s32 index = IXGBE_MBVFICR_INDEX(vf_number);
25064 + u32 vf_bit = vf_number % 16;
25066 + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
25069 + hw->mbx.stats.reqs++;
25076 + * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
25077 + * @hw: pointer to the HW structure
25078 + * @vf_number: the VF index
25080 + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
25082 +static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
25084 + s32 ret_val = IXGBE_ERR_MBX;
25085 + s32 index = IXGBE_MBVFICR_INDEX(vf_number);
25086 + u32 vf_bit = vf_number % 16;
25088 + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
25091 + hw->mbx.stats.acks++;
25098 + * ixgbe_check_for_rst_pf - checks to see if the VF has reset
25099 + * @hw: pointer to the HW structure
25100 + * @vf_number: the VF index
25102 + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
25104 +static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
25106 + u32 reg_offset = (vf_number < 32) ? 0 : 1;
25107 + u32 vf_shift = vf_number % 32;
25109 + s32 ret_val = IXGBE_ERR_MBX;
25111 + if (hw->mac.type == ixgbe_mac_82599EB)
25112 + vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
25114 + if (vflre & (1 << vf_shift)) {
25116 + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
25117 + hw->mbx.stats.rsts++;
25124 + * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
25125 + * @hw: pointer to the HW structure
25126 + * @vf_number: the VF index
25128 + * return SUCCESS if we obtained the mailbox lock
25130 +static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
25132 + s32 ret_val = IXGBE_ERR_MBX;
25135 + /* Take ownership of the buffer */
25136 + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
25138 + /* reserve mailbox for vf use */
25139 + p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
25140 + if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
25147 + * ixgbe_write_mbx_pf - Places a message in the mailbox
25148 + * @hw: pointer to the HW structure
25149 + * @msg: The message buffer
25150 + * @size: Length of buffer
25151 + * @vf_number: the VF index
25153 + * returns SUCCESS if it successfully copied message into the buffer
25155 +static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
25161 + /* lock the mailbox to prevent pf/vf race condition */
25162 + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
25164 + goto out_no_write;
25166 + /* flush msg and acks as we are overwriting the message buffer */
25167 + ixgbe_check_for_msg_pf(hw, vf_number);
25168 + ixgbe_check_for_ack_pf(hw, vf_number);
25170 + /* copy the caller specified message to the mailbox memory buffer */
25171 + for (i = 0; i < size; i++)
25172 + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
25174 + /* Interrupt VF to tell it a message has been sent and release buffer*/
25175 + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
25177 + /* update stats */
25178 + hw->mbx.stats.msgs_tx++;
25186 + * ixgbe_read_mbx_pf - Read a message from the mailbox
25187 + * @hw: pointer to the HW structure
25188 + * @msg: The message buffer
25189 + * @size: Length of buffer
25190 + * @vf_number: the VF index
25192 + * This function copies a message from the mailbox buffer to the caller's
25193 + * memory buffer. The presumption is that the caller knows that there was
25194 + * a message due to a VF request so no polling for message is needed.
25196 +static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
25202 + /* lock the mailbox to prevent pf/vf race condition */
25203 + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
25205 + goto out_no_read;
25207 + /* copy the message to the mailbox memory buffer */
25208 + for (i = 0; i < size; i++)
25209 + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
25211 + /* Acknowledge the message and release buffer */
25212 + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
25214 + /* update stats */
25215 + hw->mbx.stats.msgs_rx++;
25222 + * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
25223 + * @hw: pointer to the HW structure
25225 + * Initializes the hw->mbx struct to correct values for pf mailbox
25227 +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
25229 + struct ixgbe_mbx_info *mbx = &hw->mbx;
25231 + if (hw->mac.type != ixgbe_mac_82599EB)
25234 + mbx->timeout = 0;
25237 + mbx->size = IXGBE_VFMAILBOX_SIZE;
25239 + mbx->ops.read = ixgbe_read_mbx_pf;
25240 + mbx->ops.write = ixgbe_write_mbx_pf;
25241 + mbx->ops.read_posted = ixgbe_read_posted_mbx;
25242 + mbx->ops.write_posted = ixgbe_write_posted_mbx;
25243 + mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
25244 + mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
25245 + mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
25247 + mbx->stats.msgs_tx = 0;
25248 + mbx->stats.msgs_rx = 0;
25249 + mbx->stats.reqs = 0;
25250 + mbx->stats.acks = 0;
25251 + mbx->stats.rsts = 0;
25254 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_mbx.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_mbx.h
25255 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_mbx.h 1969-12-31 19:00:00.000000000 -0500
25256 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_mbx.h 2010-08-25 17:56:26.000000000 -0400
25258 +/*******************************************************************************
25260 + Intel 10 Gigabit PCI Express Linux driver
25261 + Copyright(c) 1999 - 2010 Intel Corporation.
25263 + This program is free software; you can redistribute it and/or modify it
25264 + under the terms and conditions of the GNU General Public License,
25265 + version 2, as published by the Free Software Foundation.
25267 + This program is distributed in the hope it will be useful, but WITHOUT
25268 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
25269 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
25272 + You should have received a copy of the GNU General Public License along with
25273 + this program; if not, write to the Free Software Foundation, Inc.,
25274 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25276 + The full GNU General Public License is included in this distribution in
25277 + the file called "COPYING".
25279 + Contact Information:
25280 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25281 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25283 +*******************************************************************************/
25285 +#ifndef _IXGBE_MBX_H_
25286 +#define _IXGBE_MBX_H_
25288 +#include "ixgbe_type.h"
25290 +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
25291 +#define IXGBE_ERR_MBX -100
25293 +#define IXGBE_VFMAILBOX 0x002FC
25294 +#define IXGBE_VFMBMEM 0x00200
25296 +#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
25297 +#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
25299 +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
25300 +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
25301 +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
25302 +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
25303 +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
25305 +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
25306 +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
25307 +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
25308 +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
25311 +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
25312 + * PF. The reverse is true if it is IXGBE_PF_*.
25313 + * Message ACK's are the value or'd with 0xF0000000
25315 +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
25316 + * this are the ACK */
25317 +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
25318 + * this are the NACK */
25319 +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
25320 + clear to send requests */
25321 +#define IXGBE_VT_MSGINFO_SHIFT 16
25322 +/* bits 23:16 are used for exra info for certain messages */
25323 +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
25325 +#define IXGBE_VF_RESET 0x01 /* VF requests reset */
25326 +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
25327 +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
25328 +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
25329 +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
25331 +/* length of permanent address message returned from PF */
25332 +#define IXGBE_VF_PERMADDR_MSG_LEN 4
25333 +/* word in permanent address message with the current multicast type */
25334 +#define IXGBE_VF_MC_TYPE_WORD 3
25336 +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
25338 +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
25339 +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
25341 +s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
25342 +s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
25343 +s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
25344 +s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
25345 +s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
25346 +s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
25347 +s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
25348 +void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
25349 +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
25351 +#endif /* _IXGBE_MBX_H_ */
25352 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_osdep.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_osdep.h
25353 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_osdep.h 1969-12-31 19:00:00.000000000 -0500
25354 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_osdep.h 2010-08-25 17:56:26.000000000 -0400
25356 +/*******************************************************************************
25358 + Intel 10 Gigabit PCI Express Linux driver
25359 + Copyright(c) 1999 - 2010 Intel Corporation.
25361 + This program is free software; you can redistribute it and/or modify it
25362 + under the terms and conditions of the GNU General Public License,
25363 + version 2, as published by the Free Software Foundation.
25365 + This program is distributed in the hope it will be useful, but WITHOUT
25366 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
25367 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
25370 + You should have received a copy of the GNU General Public License along with
25371 + this program; if not, write to the Free Software Foundation, Inc.,
25372 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25374 + The full GNU General Public License is included in this distribution in
25375 + the file called "COPYING".
25377 + Contact Information:
25378 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25379 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25381 +*******************************************************************************/
25384 +/* glue for the OS independent part of ixgbe
25385 + * includes register access macros
25388 +#ifndef _IXGBE_OSDEP_H_
25389 +#define _IXGBE_OSDEP_H_
25391 +#include <linux/pci.h>
25392 +#include <linux/delay.h>
25393 +#include <linux/interrupt.h>
25394 +#include <linux/if_ether.h>
25395 +#include <linux/sched.h>
25396 +#include "kcompat.h"
25400 +#define msleep(x) do { if(in_interrupt()) { \
25401 + /* Don't mdelay in interrupt context! */ \
25412 +#define hw_dbg(hw, S, A...) printk(KERN_DEBUG S, ## A)
25414 +#define hw_dbg(hw, S, A...) do {} while (0)
25418 +#define IXGBE_WRITE_REG(a, reg, value) do {\
25420 + case IXGBE_EIMS: \
25421 + case IXGBE_EIMC: \
25422 + case IXGBE_EIAM: \
25423 + case IXGBE_EIAC: \
25424 + case IXGBE_EICR: \
25425 + case IXGBE_EICS: \
25426 + printk("%s: Reg - 0x%05X, value - 0x%08X\n", __FUNCTION__, \
25427 + reg, (u32)(value)); \
25431 + writel((value), ((a)->hw_addr + (reg))); \
25434 +#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
25437 +#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
25439 +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
25440 + writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
25442 +#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
25443 + readl((a)->hw_addr + (reg) + ((offset) << 2)))
25446 +#define writeq(val, addr) writel((u32) (val), addr); \
25447 + writel((u32) (val >> 32), (addr + 4));
25450 +#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
25452 +#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
25454 +extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
25455 +extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
25456 +#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word
25457 +#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word
25458 +#define IXGBE_EEPROM_GRANT_ATTEMPS 100
25459 +#define IXGBE_HTONL(_i) htonl(_i)
25460 +#define IXGBE_HTONS(_i) htons(_i)
25462 +#endif /* _IXGBE_OSDEP_H_ */
25463 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_param.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_param.c
25464 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_param.c 1969-12-31 19:00:00.000000000 -0500
25465 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_param.c 2010-08-25 17:56:26.000000000 -0400
25467 +/*******************************************************************************
25469 + Intel 10 Gigabit PCI Express Linux driver
25470 + Copyright(c) 1999 - 2010 Intel Corporation.
25472 + This program is free software; you can redistribute it and/or modify it
25473 + under the terms and conditions of the GNU General Public License,
25474 + version 2, as published by the Free Software Foundation.
25476 + This program is distributed in the hope it will be useful, but WITHOUT
25477 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
25478 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
25481 + You should have received a copy of the GNU General Public License along with
25482 + this program; if not, write to the Free Software Foundation, Inc.,
25483 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25485 + The full GNU General Public License is included in this distribution in
25486 + the file called "COPYING".
25488 + Contact Information:
25489 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25490 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25492 +*******************************************************************************/
25494 +#include <linux/types.h>
25495 +#include <linux/module.h>
25497 +#include "ixgbe.h"
25499 +/* This is the only thing that needs to be changed to adjust the
25500 + * maximum number of ports that the driver can manage.
25503 +#define IXGBE_MAX_NIC 32
25505 +#define OPTION_UNSET -1
25506 +#define OPTION_DISABLED 0
25507 +#define OPTION_ENABLED 1
25509 +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */
25510 +#define XSTRINGIFY(bar) STRINGIFY(bar)
25512 +/* All parameters are treated the same, as an integer array of values.
25513 + * This macro just reduces the need to repeat the same declaration code
25514 + * over and over (plus this helps to avoid typo bugs).
25517 +#define IXGBE_PARAM_INIT { [0 ... IXGBE_MAX_NIC] = OPTION_UNSET }
25518 +#ifndef module_param_array
25519 +/* Module Parameters are always initialized to -1, so that the driver
25520 + * can tell the difference between no user specified value or the
25521 + * user asking for the default value.
25522 + * The true default values are loaded in when ixgbe_check_options is called.
25524 + * This is a GCC extension to ANSI C.
25525 + * See the item "Labeled Elements in Initializers" in the section
25526 + * "Extensions to the C Language Family" of the GCC documentation.
25529 +#define IXGBE_PARAM(X, desc) \
25530 + static const int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \
25531 + MODULE_PARM(X, "1-" __MODULE_STRING(IXGBE_MAX_NIC) "i"); \
25532 + MODULE_PARM_DESC(X, desc);
25534 +#define IXGBE_PARAM(X, desc) \
25535 + static int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \
25536 + static unsigned int num_##X; \
25537 + module_param_array_named(X, X, int, &num_##X, 0); \
25538 + MODULE_PARM_DESC(X, desc);
25541 +/* IntMode (Interrupt Mode)
25543 + * Valid Range: 0-2
25544 + * - 0 - Legacy Interrupt
25545 + * - 1 - MSI Interrupt
25546 + * - 2 - MSI-X Interrupt(s)
25548 + * Default Value: 2
25550 +IXGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default IntMode (deprecated)");
25551 +IXGBE_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2");
25552 +#define IXGBE_INT_LEGACY 0
25553 +#define IXGBE_INT_MSI 1
25554 +#define IXGBE_INT_MSIX 2
25555 +#define IXGBE_DEFAULT_INT IXGBE_INT_MSIX
25557 +IXGBE_PARAM(Node, "set the starting node to allocate memory on, default -1");
25559 +/* MQ - Multiple Queue enable/disable
25561 + * Valid Range: 0, 1
25562 + * - 0 - disables MQ
25563 + * - 1 - enables MQ
25565 + * Default Value: 1
25568 +IXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1");
25570 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
25571 +/* DCA - Direct Cache Access (DCA) Control
25573 + * This option allows the device to hint to DCA enabled processors
25574 + * which CPU should have its cache warmed with the data being
25575 + * transferred over PCIe. This can increase performance by reducing
25576 + * cache misses. ixgbe hardware supports DCA for:
25577 + * tx descriptor writeback
25578 + * rx descriptor writeback
25580 + * rx data header only (in packet split mode)
25582 + * enabling option 2 can cause cache thrash in some tests, particularly
25583 + * if the CPU is completely utilized
25585 + * Valid Range: 0 - 2
25586 + * - 0 - disables DCA
25587 + * - 1 - enables DCA
25588 + * - 2 - enables DCA with rx data included
25590 + * Default Value: 2
25593 +#define IXGBE_MAX_DCA 2
25595 +IXGBE_PARAM(DCA, "Disable or enable Direct Cache Access, 0=disabled, 1=descriptor only, 2=descriptor and data");
25598 +/* RSS - Receive-Side Scaling (RSS) Descriptor Queues
25600 + * Valid Range: 0-16
25601 + * - 0 - disables RSS
25602 + * - 1 - enables RSS and sets the Desc. Q's to min(16, num_online_cpus()).
25603 + * - 2-16 - enables RSS and sets the Desc. Q's to the specified value.
25605 + * Default Value: 1
25608 +IXGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, default 1=number of cpus");
25610 +/* VMDQ - Virtual Machine Device Queues (VMDQ)
25612 + * Valid Range: 1-16
25613 + * - 1 Disables VMDQ by allocating only a single queue.
25614 + * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value.
25616 + * Default Value: 1
25619 +#define IXGBE_DEFAULT_NUM_VMDQ 8
25621 +IXGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable, 2-16 enable (default=" XSTRINGIFY(IXGBE_DEFAULT_NUM_VMDQ) ")");
25623 +#ifdef CONFIG_PCI_IOV
25624 +/* max_vfs - SR I/O Virtualization
25626 + * Valid Range: 0-63
25627 + * - 0 Disables SR-IOV
25628 + * - 1 Enables SR-IOV to default number of VFs enabled
25629 + * - 2-63 - enables SR-IOV and sets the number of VFs enabled
25631 + * Default Value: 0
25634 +#define MAX_SRIOV_VFS 63
25636 +IXGBE_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), 1 = default settings, 2-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable this many VFs");
25639 +/* Interrupt Throttle Rate (interrupts/sec)
25641 + * Valid Range: 956-488281 (0=off, 1=dynamic)
25643 + * Default Value: 8000
25645 +#define DEFAULT_ITR 8000
25646 +IXGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, (956-488281), default 8000");
25647 +#define MAX_ITR IXGBE_MAX_INT_RATE
25648 +#define MIN_ITR IXGBE_MIN_INT_RATE
25650 +#ifndef IXGBE_NO_LLI
25651 +/* LLIPort (Low Latency Interrupt TCP Port)
25653 + * Valid Range: 0 - 65535
25655 + * Default Value: 0 (disabled)
25657 +IXGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)");
25659 +#define DEFAULT_LLIPORT 0
25660 +#define MAX_LLIPORT 0xFFFF
25661 +#define MIN_LLIPORT 0
25663 +/* LLIPush (Low Latency Interrupt on TCP Push flag)
25665 + * Valid Range: 0,1
25667 + * Default Value: 0 (disabled)
25669 +IXGBE_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1)");
25671 +#define DEFAULT_LLIPUSH 0
25672 +#define MAX_LLIPUSH 1
25673 +#define MIN_LLIPUSH 0
25675 +/* LLISize (Low Latency Interrupt on Packet Size)
25677 + * Valid Range: 0 - 1500
25679 + * Default Value: 0 (disabled)
25681 +IXGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)");
25683 +#define DEFAULT_LLISIZE 0
25684 +#define MAX_LLISIZE 1500
25685 +#define MIN_LLISIZE 0
25687 +/* LLIEType (Low Latency Interrupt Ethernet Type)
25689 + * Valid Range: 0 - 0x8fff
25691 + * Default Value: 0 (disabled)
25693 +IXGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type");
25695 +#define DEFAULT_LLIETYPE 0
25696 +#define MAX_LLIETYPE 0x8fff
25697 +#define MIN_LLIETYPE 0
25699 +/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold)
25701 + * Valid Range: 0 - 7
25703 + * Default Value: 0 (disabled)
25705 +IXGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold");
25707 +#define DEFAULT_LLIVLANP 0
25708 +#define MAX_LLIVLANP 7
25709 +#define MIN_LLIVLANP 0
25711 +#endif /* IXGBE_NO_LLI */
25714 + * Valid Range: 0-2 0 = 1buf_mode_always, 1 = ps_mode_always and 2 = optimal
25716 + * Default Value: 2
25718 +IXGBE_PARAM(RxBufferMode, "0=1 descriptor per packet,\n"
25719 + "\t\t\t1=use packet split, multiple descriptors per jumbo frame\n"
25720 + "\t\t\t2 (default)=use 1buf mode for 1500 mtu, packet split for jumbo");
25722 +#define IXGBE_RXBUFMODE_1BUF_ALWAYS 0
25723 +#define IXGBE_RXBUFMODE_PS_ALWAYS 1
25724 +#define IXGBE_RXBUFMODE_OPTIMAL 2
25725 +#define IXGBE_DEFAULT_RXBUFMODE IXGBE_RXBUFMODE_OPTIMAL
25728 +/* Flow Director filtering mode
25730 + * Valid Range: 0-2 0 = off, 1 = Hashing (ATR), and 2 = perfect filters
25732 + * Default Value: 1 (ATR)
25734 +IXGBE_PARAM(FdirMode, "Flow Director filtering modes:\n"
25735 + "\t\t\t0 = Filtering off\n"
25736 + "\t\t\t1 = Signature Hashing filters (SW ATR)\n"
25737 + "\t\t\t2 = Perfect Filters");
25739 +#define IXGBE_FDIR_FILTER_OFF 0
25740 +#define IXGBE_FDIR_FILTER_HASH 1
25741 +#define IXGBE_FDIR_FILTER_PERFECT 2
25742 +#define IXGBE_DEFAULT_FDIR_FILTER IXGBE_FDIR_FILTER_HASH
25744 +/* Flow Director packet buffer allocation level
25746 + * Valid Range: 0-2 0 = 8k hash/2k perfect, 1 = 16k hash/4k perfect,
25747 + * 2 = 32k hash/8k perfect
25749 + * Default Value: 0
25751 +IXGBE_PARAM(FdirPballoc, "Flow Director packet buffer allocation level:\n"
25752 + "\t\t\t0 = 8k hash filters or 2k perfect filters\n"
25753 + "\t\t\t1 = 16k hash filters or 4k perfect filters\n"
25754 + "\t\t\t2 = 32k hash filters or 8k perfect filters");
25756 +#define IXGBE_FDIR_PBALLOC_64K 0
25757 +#define IXGBE_FDIR_PBALLOC_128K 1
25758 +#define IXGBE_FDIR_PBALLOC_256K 2
25759 +#define IXGBE_DEFAULT_FDIR_PBALLOC IXGBE_FDIR_PBALLOC_64K
25761 +/* Software ATR packet sample rate
25763 + * Valid Range: 0-100 0 = off, 1-100 = rate of Tx packet inspection
25765 + * Default Value: 20
25767 +IXGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate");
25769 +#define IXGBE_MAX_ATR_SAMPLE_RATE 100
25770 +#define IXGBE_MIN_ATR_SAMPLE_RATE 1
25771 +#define IXGBE_ATR_SAMPLE_RATE_OFF 0
25772 +#define IXGBE_DEFAULT_ATR_SAMPLE_RATE 20
25773 +#endif /* HAVE_TX_MQ */
25775 +/* FCoE - Fibre Channel over Ethernet Offload Enable/Disable
25777 + * Valid Range: 0, 1
25778 + * - 0 - disables FCoE Offload
25779 + * - 1 - enables FCoE Offload
25781 + * Default Value: 1
25783 +IXGBE_PARAM(FCoE, "Disable or enable FCoE Offload, default 1");
25784 +#endif /* IXGBE_FCOE */
25785 +struct ixgbe_option {
25786 + enum { enable_option, range_option, list_option } type;
25787 + const char *name;
25791 + struct { /* range_option info */
25795 + struct { /* list_option info */
25797 + const struct ixgbe_opt_list {
25805 +static int __devinit ixgbe_validate_option(unsigned int *value,
25806 + struct ixgbe_option *opt)
25808 + if (*value == OPTION_UNSET) {
25809 + *value = opt->def;
25813 + switch (opt->type) {
25814 + case enable_option:
25815 + switch (*value) {
25816 + case OPTION_ENABLED:
25817 + printk(KERN_INFO "ixgbe: %s Enabled\n", opt->name);
25819 + case OPTION_DISABLED:
25820 + printk(KERN_INFO "ixgbe: %s Disabled\n", opt->name);
25824 + case range_option:
25825 + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
25826 + printk(KERN_INFO "ixgbe: %s set to %d\n", opt->name, *value);
25830 + case list_option: {
25832 + const struct ixgbe_opt_list *ent;
25834 + for (i = 0; i < opt->arg.l.nr; i++) {
25835 + ent = &opt->arg.l.p[i];
25836 + if (*value == ent->i) {
25837 + if (ent->str[0] != '\0')
25838 + printk(KERN_INFO "%s\n", ent->str);
25848 + printk(KERN_INFO "ixgbe: Invalid %s specified (%d), %s\n",
25849 + opt->name, *value, opt->err);
25850 + *value = opt->def;
25854 +#define LIST_LEN(l) (sizeof(l) / sizeof(l[0]))
25857 + * ixgbe_check_options - Range Checking for Command Line Parameters
25858 + * @adapter: board private structure
25860 + * This routine checks all command line parameters for valid user
25861 + * input. If an invalid value is given, or if no user specified
25862 + * value exists, a default value is used. The final value is stored
25863 + * in a variable in the adapter structure.
25865 +void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter)
25867 + int bd = adapter->bd_number;
25868 + u32 *aflags = &adapter->flags;
25869 + struct ixgbe_ring_feature *feature = adapter->ring_feature;
25871 + if (bd >= IXGBE_MAX_NIC) {
25872 + printk(KERN_NOTICE
25873 + "Warning: no configuration for board #%d\n", bd);
25874 + printk(KERN_NOTICE "Using defaults for all values\n");
25875 +#ifndef module_param_array
25876 + bd = IXGBE_MAX_NIC;
25880 + { /* Interrupt Mode */
25881 + unsigned int int_mode;
25882 + static struct ixgbe_option opt = {
25883 + .type = range_option,
25884 + .name = "Interrupt Mode",
25886 + "using default of "__MODULE_STRING(IXGBE_DEFAULT_INT),
25887 + .def = IXGBE_DEFAULT_INT,
25888 + .arg = { .r = { .min = IXGBE_INT_LEGACY,
25889 + .max = IXGBE_INT_MSIX}}
25892 +#ifdef module_param_array
25893 + if (num_IntMode > bd || num_InterruptType > bd) {
25895 + int_mode = IntMode[bd];
25896 + if (int_mode == OPTION_UNSET)
25897 + int_mode = InterruptType[bd];
25898 + ixgbe_validate_option(&int_mode, &opt);
25899 + switch (int_mode) {
25900 + case IXGBE_INT_MSIX:
25901 + if (!(*aflags & IXGBE_FLAG_MSIX_CAPABLE))
25903 + "Ignoring MSI-X setting; "
25904 + "support unavailable\n");
25906 + case IXGBE_INT_MSI:
25907 + if (!(*aflags & IXGBE_FLAG_MSI_CAPABLE)) {
25909 + "Ignoring MSI setting; "
25910 + "support unavailable\n");
25912 + *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE;
25913 + *aflags &= ~IXGBE_FLAG_DCB_CAPABLE;
25916 + case IXGBE_INT_LEGACY:
25918 + *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE;
25919 + *aflags &= ~IXGBE_FLAG_MSI_CAPABLE;
25920 + *aflags &= ~IXGBE_FLAG_DCB_CAPABLE;
25923 +#ifdef module_param_array
25925 + /* default settings */
25926 + if (opt.def == IXGBE_INT_MSIX &&
25927 + *aflags & IXGBE_FLAG_MSIX_CAPABLE) {
25928 + *aflags |= IXGBE_FLAG_MSIX_CAPABLE;
25929 + *aflags |= IXGBE_FLAG_MSI_CAPABLE;
25930 + } else if (opt.def == IXGBE_INT_MSI &&
25931 + *aflags & IXGBE_FLAG_MSI_CAPABLE) {
25932 + *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE;
25933 + *aflags |= IXGBE_FLAG_MSI_CAPABLE;
25934 + *aflags &= ~IXGBE_FLAG_DCB_CAPABLE;
25936 + *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE;
25937 + *aflags &= ~IXGBE_FLAG_MSI_CAPABLE;
25938 + *aflags &= ~IXGBE_FLAG_DCB_CAPABLE;
25943 + { /* Multiple Queue Support */
25944 + static struct ixgbe_option opt = {
25945 + .type = enable_option,
25946 + .name = "Multiple Queue Support",
25947 + .err = "defaulting to Enabled",
25948 + .def = OPTION_ENABLED
25951 +#ifdef module_param_array
25952 + if (num_MQ > bd) {
25954 + unsigned int mq = MQ[bd];
25955 + ixgbe_validate_option(&mq, &opt);
25957 + *aflags |= IXGBE_FLAG_MQ_CAPABLE;
25959 + *aflags &= ~IXGBE_FLAG_MQ_CAPABLE;
25960 +#ifdef module_param_array
25962 + if (opt.def == OPTION_ENABLED)
25963 + *aflags |= IXGBE_FLAG_MQ_CAPABLE;
25965 + *aflags &= ~IXGBE_FLAG_MQ_CAPABLE;
25968 + /* Check Interoperability */
25969 + if ((*aflags & IXGBE_FLAG_MQ_CAPABLE) &&
25970 + !(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) {
25971 + DPRINTK(PROBE, INFO,
25972 + "Multiple queues are not supported while MSI-X "
25973 + "is disabled. Disabling Multiple Queues.\n");
25974 + *aflags &= ~IXGBE_FLAG_MQ_CAPABLE;
25977 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
25978 + { /* Direct Cache Access (DCA) */
25979 + static struct ixgbe_option opt = {
25980 + .type = range_option,
25981 + .name = "Direct Cache Access (DCA)",
25982 + .err = "defaulting to Enabled",
25983 + .def = IXGBE_MAX_DCA,
25984 + .arg = { .r = { .min = OPTION_DISABLED,
25985 + .max = IXGBE_MAX_DCA}}
25987 + unsigned int dca = opt.def;
25989 +#ifdef module_param_array
25990 + if (num_DCA > bd) {
25993 + ixgbe_validate_option(&dca, &opt);
25995 + *aflags &= ~IXGBE_FLAG_DCA_CAPABLE;
25997 + /* Check Interoperability */
25998 + if (!(*aflags & IXGBE_FLAG_DCA_CAPABLE)) {
25999 + DPRINTK(PROBE, INFO, "DCA is disabled\n");
26000 + *aflags &= ~IXGBE_FLAG_DCA_ENABLED;
26003 + if (dca == IXGBE_MAX_DCA) {
26004 + DPRINTK(PROBE, INFO,
26005 + "DCA enabled for rx data\n");
26006 + adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA;
26008 +#ifdef module_param_array
26010 + /* make sure to clear the capability flag if the
26011 + * option is disabled by default above */
26012 + if (opt.def == OPTION_DISABLED)
26013 + *aflags &= ~IXGBE_FLAG_DCA_CAPABLE;
26016 + if (dca == IXGBE_MAX_DCA)
26017 + adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA;
26019 +#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
26020 + { /* Receive-Side Scaling (RSS) */
26021 + static struct ixgbe_option opt = {
26022 + .type = range_option,
26023 + .name = "Receive-Side Scaling (RSS)",
26024 + .err = "using default.",
26025 + .def = OPTION_ENABLED,
26026 + .arg = { .r = { .min = OPTION_DISABLED,
26027 + .max = IXGBE_MAX_RSS_INDICES}}
26029 + unsigned int rss = RSS[bd];
26031 +#ifdef module_param_array
26032 + if (num_RSS > bd) {
26034 + if (rss != OPTION_ENABLED)
26035 + ixgbe_validate_option(&rss, &opt);
26037 + * we cannot use an else since validate option may
26038 + * have changed the state of RSS
26040 + if (rss == OPTION_ENABLED) {
26042 + * Base it off num_online_cpus() with
26043 + * a hardware limit cap.
26045 + rss = min(IXGBE_MAX_RSS_INDICES,
26046 + (int)num_online_cpus());
26048 + feature[RING_F_RSS].indices = rss;
26050 + *aflags |= IXGBE_FLAG_RSS_ENABLED;
26052 + *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
26053 +#ifdef module_param_array
26055 + if (opt.def == OPTION_DISABLED) {
26056 + *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
26058 + rss = min(IXGBE_MAX_RSS_INDICES,
26059 + (int)num_online_cpus());
26060 + feature[RING_F_RSS].indices = rss;
26062 + *aflags |= IXGBE_FLAG_RSS_ENABLED;
26064 + *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
26068 + /* Check Interoperability */
26069 + if (*aflags & IXGBE_FLAG_RSS_ENABLED) {
26070 + if (!(*aflags & IXGBE_FLAG_RSS_CAPABLE)) {
26071 + DPRINTK(PROBE, INFO,
26072 + "RSS is not supported on this "
26073 + "hardware. Disabling RSS.\n");
26074 + *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
26075 + feature[RING_F_RSS].indices = 0;
26076 + } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) {
26077 + DPRINTK(PROBE, INFO,
26078 + "RSS is not supported while multiple "
26079 + "queues are disabled. "
26080 + "Disabling RSS.\n");
26081 + *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
26082 + *aflags &= ~IXGBE_FLAG_DCB_CAPABLE;
26083 + feature[RING_F_RSS].indices = 0;
26087 + { /* Virtual Machine Device Queues (VMDQ) */
26088 + static struct ixgbe_option opt = {
26089 + .type = range_option,
26090 + .name = "Virtual Machine Device Queues (VMDQ)",
26091 + .err = "defaulting to Disabled",
26092 + .def = OPTION_DISABLED,
26093 + .arg = { .r = { .min = OPTION_DISABLED,
26094 + .max = IXGBE_MAX_VMDQ_INDICES
26098 +#ifdef module_param_array
26099 + if (num_VMDQ > bd) {
26101 + unsigned int vmdq = VMDQ[bd];
26102 + ixgbe_validate_option(&vmdq, &opt);
26103 + feature[RING_F_VMDQ].indices = vmdq;
26104 + adapter->flags2 |= IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE;
26105 + /* zero or one both mean disabled from our driver's
26108 + *aflags |= IXGBE_FLAG_VMDQ_ENABLED;
26110 + *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED;
26111 +#ifdef module_param_array
26113 + if (opt.def == OPTION_DISABLED) {
26114 + *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED;
26116 + feature[RING_F_VMDQ].indices = IXGBE_DEFAULT_NUM_VMDQ;
26117 + *aflags |= IXGBE_FLAG_VMDQ_ENABLED;
26121 + /* Check Interoperability */
26122 + if (*aflags & IXGBE_FLAG_VMDQ_ENABLED) {
26123 + if (!(*aflags & IXGBE_FLAG_VMDQ_CAPABLE)) {
26124 + DPRINTK(PROBE, INFO,
26125 + "VMDQ is not supported on this "
26126 + "hardware. Disabling VMDQ.\n");
26127 + *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED;
26128 + feature[RING_F_VMDQ].indices = 0;
26129 + } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) {
26130 + DPRINTK(PROBE, INFO,
26131 + "VMDQ is not supported while multiple "
26132 + "queues are disabled. "
26133 + "Disabling VMDQ.\n");
26134 + *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED;
26135 + feature[RING_F_VMDQ].indices = 0;
26138 + if (adapter->hw.mac.type == ixgbe_mac_82598EB)
26139 + feature[RING_F_VMDQ].indices =
26140 + min(feature[RING_F_VMDQ].indices, 16);
26142 + /* Disable RSS when using VMDQ mode */
26143 + *aflags &= ~IXGBE_FLAG_RSS_CAPABLE;
26144 + *aflags &= ~IXGBE_FLAG_RSS_ENABLED;
26147 +#ifdef CONFIG_PCI_IOV
26148 + { /* Single Root I/O Virtualization (SR-IOV) */
26149 + static struct ixgbe_option opt = {
26150 + .type = range_option,
26151 + .name = "I/O Virtualization (IOV)",
26152 + .err = "defaulting to Disabled",
26153 + .def = OPTION_DISABLED,
26154 + .arg = { .r = { .min = OPTION_DISABLED,
26155 + .max = IXGBE_MAX_VF_FUNCTIONS}}
26158 +#ifdef module_param_array
26159 + if (num_max_vfs > bd) {
26161 + unsigned int vfs = max_vfs[bd];
26162 + ixgbe_validate_option(&vfs, &opt);
26163 + adapter->num_vfs = vfs;
26165 + *aflags |= IXGBE_FLAG_SRIOV_ENABLED;
26167 + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED;
26168 +#ifdef module_param_array
26170 + if (opt.def == OPTION_DISABLED) {
26171 + adapter->num_vfs = 0;
26172 + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED;
26174 + adapter->num_vfs = opt.def;
26175 + *aflags |= IXGBE_FLAG_SRIOV_ENABLED;
26180 + /* Check Interoperability */
26181 + if (*aflags & IXGBE_FLAG_SRIOV_ENABLED) {
26182 + if (!(*aflags & IXGBE_FLAG_SRIOV_CAPABLE)) {
26183 + DPRINTK(PROBE, INFO,
26184 + "IOV is not supported on this "
26185 + "hardware. Disabling IOV.\n");
26186 + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED;
26187 + adapter->num_vfs = 0;
26188 + } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) {
26189 + DPRINTK(PROBE, INFO,
26190 + "IOV is not supported while multiple "
26191 + "queues are disabled. "
26192 + "Disabling IOV.\n");
26193 + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED;
26194 + adapter->num_vfs = 0;
26196 + *aflags &= ~IXGBE_FLAG_RSS_CAPABLE;
26197 + adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
26201 +#endif /* CONFIG_PCI_IOV */
26202 + { /* Interrupt Throttling Rate */
26203 + static struct ixgbe_option opt = {
26204 + .type = range_option,
26205 + .name = "Interrupt Throttling Rate (ints/sec)",
26206 + .err = "using default of "__MODULE_STRING(DEFAULT_ITR),
26207 + .def = DEFAULT_ITR,
26208 + .arg = { .r = { .min = MIN_ITR,
26209 + .max = MAX_ITR }}
26212 +#ifdef module_param_array
26213 + if (num_InterruptThrottleRate > bd) {
26215 + u32 eitr = InterruptThrottleRate[bd];
26218 + DPRINTK(PROBE, INFO, "%s turned off\n",
26221 + * zero is a special value, we don't want to
26222 + * turn off ITR completely, just set it to an
26223 + * insane interrupt rate
26225 + adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
26226 + adapter->rx_itr_setting = 0;
26227 + adapter->tx_itr_setting = 0;
26230 + DPRINTK(PROBE, INFO, "dynamic interrupt "
26231 + "throttling enabled\n");
26232 + adapter->rx_eitr_param = 20000;
26233 + adapter->tx_eitr_param =
26234 + adapter->rx_eitr_param >> 1;
26235 + adapter->rx_itr_setting = 1;
26236 + adapter->tx_itr_setting = 1;
26239 + ixgbe_validate_option(&eitr, &opt);
26240 + adapter->rx_eitr_param = eitr;
26241 + adapter->tx_eitr_param = (eitr >> 1);
26242 + /* the first bit is used as control */
26243 + adapter->rx_itr_setting = eitr & ~1;
26244 + adapter->tx_itr_setting = (eitr >> 1) & ~1;
26247 +#ifdef module_param_array
26249 + adapter->rx_eitr_param = DEFAULT_ITR;
26250 + adapter->rx_itr_setting = DEFAULT_ITR & ~1;
26251 + adapter->tx_eitr_param = (DEFAULT_ITR >> 1);
26252 + adapter->tx_itr_setting = (DEFAULT_ITR >> 1) & ~1;
26255 + /* Check Interoperability */
26256 + if (adapter->rx_itr_setting == 0 &&
26257 + adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
26258 + /* itr ==0 and RSC are mutually exclusive */
26259 + adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
26260 + adapter->netdev->features &= ~NETIF_F_LRO;
26261 + DPRINTK(PROBE, INFO,
26262 + "InterruptThrottleRate set to 0, disabling RSC\n");
26265 +#ifndef IXGBE_NO_LLI
26266 + { /* Low Latency Interrupt TCP Port*/
26267 + static struct ixgbe_option opt = {
26268 + .type = range_option,
26269 + .name = "Low Latency Interrupt TCP Port",
26270 + .err = "using default of "
26271 + __MODULE_STRING(DEFAULT_LLIPORT),
26272 + .def = DEFAULT_LLIPORT,
26273 + .arg = { .r = { .min = MIN_LLIPORT,
26274 + .max = MAX_LLIPORT }}
26277 +#ifdef module_param_array
26278 + if (num_LLIPort > bd) {
26280 + adapter->lli_port = LLIPort[bd];
26281 + if (adapter->lli_port) {
26282 + ixgbe_validate_option(&adapter->lli_port, &opt);
26284 + DPRINTK(PROBE, INFO, "%s turned off\n",
26287 +#ifdef module_param_array
26289 + adapter->lli_port = opt.def;
26293 + { /* Low Latency Interrupt on Packet Size */
26294 + static struct ixgbe_option opt = {
26295 + .type = range_option,
26296 + .name = "Low Latency Interrupt on Packet Size",
26297 + .err = "using default of "
26298 + __MODULE_STRING(DEFAULT_LLISIZE),
26299 + .def = DEFAULT_LLISIZE,
26300 + .arg = { .r = { .min = MIN_LLISIZE,
26301 + .max = MAX_LLISIZE }}
26304 +#ifdef module_param_array
26305 + if (num_LLISize > bd) {
26307 + adapter->lli_size = LLISize[bd];
26308 + if (adapter->lli_size) {
26309 + ixgbe_validate_option(&adapter->lli_size, &opt);
26311 + DPRINTK(PROBE, INFO, "%s turned off\n",
26314 +#ifdef module_param_array
26316 + adapter->lli_size = opt.def;
26320 + { /*Low Latency Interrupt on TCP Push flag*/
26321 + static struct ixgbe_option opt = {
26322 + .type = enable_option,
26323 + .name = "Low Latency Interrupt on TCP Push flag",
26324 + .err = "defaulting to Disabled",
26325 + .def = OPTION_DISABLED
26328 +#ifdef module_param_array
26329 + if (num_LLIPush > bd) {
26331 + unsigned int lli_push = LLIPush[bd];
26332 + ixgbe_validate_option(&lli_push, &opt);
26334 + *aflags |= IXGBE_FLAG_LLI_PUSH;
26336 + *aflags &= ~IXGBE_FLAG_LLI_PUSH;
26337 +#ifdef module_param_array
26339 + if (opt.def == OPTION_ENABLED)
26340 + *aflags |= IXGBE_FLAG_LLI_PUSH;
26342 + *aflags &= ~IXGBE_FLAG_LLI_PUSH;
26346 + { /* Low Latency Interrupt EtherType*/
26347 + static struct ixgbe_option opt = {
26348 + .type = range_option,
26349 + .name = "Low Latency Interrupt on Ethernet Protocol Type",
26350 + .err = "using default of "
26351 + __MODULE_STRING(DEFAULT_LLIETYPE),
26352 + .def = DEFAULT_LLIETYPE,
26353 + .arg = { .r = { .min = MIN_LLIETYPE,
26354 + .max = MAX_LLIETYPE }}
26357 +#ifdef module_param_array
26358 + if (num_LLIEType > bd) {
26360 + adapter->lli_etype = LLIEType[bd];
26361 + if (adapter->lli_etype) {
26362 + ixgbe_validate_option(&adapter->lli_etype, &opt);
26364 + DPRINTK(PROBE, INFO, "%s turned off\n",
26367 +#ifdef module_param_array
26369 + adapter->lli_etype = opt.def;
26373 + { /* LLI VLAN Priority */
26374 + static struct ixgbe_option opt = {
26375 + .type = range_option,
26376 + .name = "Low Latency Interrupt on VLAN priority threashold",
26377 + .err = "using default of "
26378 + __MODULE_STRING(DEFAULT_LLIVLANP),
26379 + .def = DEFAULT_LLIVLANP,
26380 + .arg = { .r = { .min = MIN_LLIVLANP,
26381 + .max = MAX_LLIVLANP }}
26384 +#ifdef module_param_array
26385 + if (num_LLIVLANP > bd) {
26387 + adapter->lli_vlan_pri = LLIVLANP[bd];
26388 + if (adapter->lli_vlan_pri) {
26389 + ixgbe_validate_option(&adapter->lli_vlan_pri, &opt);
26391 + DPRINTK(PROBE, INFO, "%s turned off\n",
26394 +#ifdef module_param_array
26396 + adapter->lli_vlan_pri = opt.def;
26400 +#endif /* IXGBE_NO_LLI */
26401 + { /* Rx buffer mode */
26402 + unsigned int rx_buf_mode;
26403 + static struct ixgbe_option opt = {
26404 + .type = range_option,
26405 + .name = "Rx buffer mode",
26406 + .err = "using default of "
26407 + __MODULE_STRING(IXGBE_DEFAULT_RXBUFMODE),
26408 + .def = IXGBE_DEFAULT_RXBUFMODE,
26409 + .arg = {.r = {.min = IXGBE_RXBUFMODE_1BUF_ALWAYS,
26410 + .max = IXGBE_RXBUFMODE_OPTIMAL}}
26413 +#ifdef module_param_array
26414 + if (num_RxBufferMode > bd) {
26416 + rx_buf_mode = RxBufferMode[bd];
26417 + ixgbe_validate_option(&rx_buf_mode, &opt);
26418 + switch (rx_buf_mode) {
26419 + case IXGBE_RXBUFMODE_OPTIMAL:
26420 + *aflags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
26421 + *aflags |= IXGBE_FLAG_RX_PS_CAPABLE;
26423 + case IXGBE_RXBUFMODE_PS_ALWAYS:
26424 + *aflags |= IXGBE_FLAG_RX_PS_CAPABLE;
26426 + case IXGBE_RXBUFMODE_1BUF_ALWAYS:
26427 + *aflags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
26432 +#ifdef module_param_array
26434 + *aflags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
26435 + *aflags |= IXGBE_FLAG_RX_PS_CAPABLE;
26440 + { /* Flow Director filtering mode */
26441 + unsigned int fdir_filter_mode;
26442 + static struct ixgbe_option opt = {
26443 + .type = range_option,
26444 + .name = "Flow Director filtering mode",
26445 + .err = "using default of "
26446 + __MODULE_STRING(IXGBE_DEFAULT_FDIR_FILTER),
26447 + .def = IXGBE_DEFAULT_FDIR_FILTER,
26448 + .arg = {.r = {.min = IXGBE_FDIR_FILTER_OFF,
26449 + .max = IXGBE_FDIR_FILTER_PERFECT}}
26452 + *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
26453 + *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
26454 + if (adapter->hw.mac.type == ixgbe_mac_82598EB)
26455 + goto no_flow_director;
26456 + if (num_FdirMode > bd) {
26457 + fdir_filter_mode = FdirMode[bd];
26458 + ixgbe_validate_option(&fdir_filter_mode, &opt);
26460 + switch (fdir_filter_mode) {
26461 + case IXGBE_FDIR_FILTER_OFF:
26462 + DPRINTK(PROBE, INFO, "Flow Director disabled\n");
26464 + case IXGBE_FDIR_FILTER_HASH:
26465 + *aflags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
26466 + *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
26467 + feature[RING_F_FDIR].indices =
26468 + IXGBE_MAX_FDIR_INDICES;
26469 + DPRINTK(PROBE, INFO,
26470 + "Flow Director hash filtering enabled\n");
26472 + case IXGBE_FDIR_FILTER_PERFECT:
26473 +#ifdef NETIF_F_NTUPLE
26474 + *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
26475 + *aflags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
26476 + feature[RING_F_FDIR].indices =
26477 + IXGBE_MAX_FDIR_INDICES;
26478 + DPRINTK(PROBE, INFO,
26479 + "Flow Director perfect filtering enabled\n");
26480 +#else /* NETIF_F_NTUPLE */
26481 + DPRINTK(PROBE, INFO, "No ethtool support for "
26482 + "Flow Director perfect filtering. "
26483 + "Defaulting to hash filtering.\n");
26484 + *aflags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
26485 + *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
26486 + feature[RING_F_FDIR].indices =
26487 + IXGBE_MAX_FDIR_INDICES;
26488 +#endif /* NETIF_F_NTUPLE */
26494 + if (opt.def == IXGBE_FDIR_FILTER_OFF) {
26495 + *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
26496 + *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
26497 + feature[RING_F_FDIR].indices = 0;
26498 + DPRINTK(PROBE, INFO,
26499 + "Flow Director hash filtering disabled\n");
26501 + *aflags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
26502 + feature[RING_F_FDIR].indices = IXGBE_MAX_FDIR_INDICES;
26503 + DPRINTK(PROBE, INFO,
26504 + "Flow Director hash filtering enabled\n");
26507 + /* Check interoperability */
26508 + if ((*aflags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
26509 + (*aflags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
26510 + if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) {
26511 + DPRINTK(PROBE, INFO,
26512 + "Flow Director is not supported "
26513 + "while multiple queues are disabled. "
26514 + "Disabling Flow Director\n");
26515 + *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
26516 + *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
26520 + /* empty code line with semi-colon */ ;
26522 + { /* Flow Director packet buffer allocation */
26523 + unsigned int fdir_pballoc_mode;
26524 + static struct ixgbe_option opt = {
26525 + .type = range_option,
26526 + .name = "Flow Director packet buffer allocation",
26527 + .err = "using default of "
26528 + __MODULE_STRING(IXGBE_DEFAULT_FDIR_PBALLOC),
26529 + .def = IXGBE_DEFAULT_FDIR_PBALLOC,
26530 + .arg = {.r = {.min = IXGBE_FDIR_PBALLOC_64K,
26531 + .max = IXGBE_FDIR_PBALLOC_256K}}
26533 + char pstring[10];
26535 + if ((adapter->hw.mac.type == ixgbe_mac_82598EB) ||
26536 + (!(*aflags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
26537 + IXGBE_FLAG_FDIR_PERFECT_CAPABLE))))
26538 + goto no_fdir_pballoc;
26539 + if (num_FdirPballoc > bd) {
26540 + fdir_pballoc_mode = FdirPballoc[bd];
26541 + ixgbe_validate_option(&fdir_pballoc_mode, &opt);
26542 + switch (fdir_pballoc_mode) {
26543 + case IXGBE_FDIR_PBALLOC_64K:
26544 + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
26545 + sprintf(pstring, "64kB");
26547 + case IXGBE_FDIR_PBALLOC_128K:
26548 + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_128K;
26549 + sprintf(pstring, "128kB");
26551 + case IXGBE_FDIR_PBALLOC_256K:
26552 + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_256K;
26553 + sprintf(pstring, "256kB");
26558 + DPRINTK(PROBE, INFO,
26559 + "Flow Director allocated %s of packet buffer\n",
26562 + adapter->fdir_pballoc = opt.def;
26563 + DPRINTK(PROBE, INFO,
26564 + "Flow Director allocated 64kB of packet buffer\n");
26567 + /* empty code line with semi-colon */ ;
26569 + { /* Flow Director ATR Tx sample packet rate */
26570 + static struct ixgbe_option opt = {
26571 + .type = range_option,
26572 + .name = "Software ATR Tx packet sample rate",
26573 + .err = "using default of "
26574 + __MODULE_STRING(IXGBE_DEFAULT_ATR_SAMPLE_RATE),
26575 + .def = IXGBE_DEFAULT_ATR_SAMPLE_RATE,
26576 + .arg = {.r = {.min = IXGBE_ATR_SAMPLE_RATE_OFF,
26577 + .max = IXGBE_MAX_ATR_SAMPLE_RATE}}
26579 + static const char atr_string[] =
26580 + "ATR Tx Packet sample rate set to";
26582 + adapter->atr_sample_rate = IXGBE_ATR_SAMPLE_RATE_OFF;
26583 + if (adapter->hw.mac.type == ixgbe_mac_82598EB)
26584 + goto no_fdir_sample;
26586 + /* no sample rate for perfect filtering */
26587 + if (*aflags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
26588 + goto no_fdir_sample;
26589 + if (num_AtrSampleRate > bd) {
26590 + /* Only enable the sample rate if hashing (ATR) is on */
26591 + if (*aflags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
26592 + adapter->atr_sample_rate = AtrSampleRate[bd];
26594 + if (adapter->atr_sample_rate) {
26595 + ixgbe_validate_option(&adapter->atr_sample_rate,
26597 + DPRINTK(PROBE, INFO, "%s %d\n", atr_string,
26598 + adapter->atr_sample_rate);
26601 + /* Only enable the sample rate if hashing (ATR) is on */
26602 + if (*aflags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
26603 + adapter->atr_sample_rate = opt.def;
26605 + DPRINTK(PROBE, INFO, "%s default of %d\n", atr_string,
26606 + adapter->atr_sample_rate);
26609 + /* empty code line with semi-colon */ ;
26611 +#endif /* HAVE_TX_MQ */
26614 + *aflags &= ~IXGBE_FLAG_FCOE_CAPABLE;
26616 + switch (adapter->hw.mac.type) {
26617 + case ixgbe_mac_82599EB: {
26618 + struct ixgbe_option opt = {
26619 + .type = enable_option,
26620 + .name = "Enabled/Disable FCoE offload",
26621 + .err = "defaulting to Enabled",
26622 + .def = OPTION_ENABLED
26624 +#ifdef module_param_array
26625 + if (num_FCoE > bd) {
26627 + unsigned int fcoe = FCoE[bd];
26629 + ixgbe_validate_option(&fcoe, &opt);
26631 + *aflags |= IXGBE_FLAG_FCOE_CAPABLE;
26632 +#ifdef module_param_array
26634 + if (opt.def == OPTION_ENABLED)
26635 + *aflags |= IXGBE_FLAG_FCOE_CAPABLE;
26638 +#ifdef CONFIG_PCI_IOV
26639 + if (*aflags & IXGBE_FLAG_SRIOV_ENABLED)
26640 + *aflags &= ~IXGBE_FLAG_FCOE_CAPABLE;
26642 + DPRINTK(PROBE, INFO, "FCoE Offload feature %sabled\n",
26643 + (*aflags & IXGBE_FLAG_FCOE_CAPABLE) ?
26651 +#endif /* IXGBE_FCOE */
26652 + { /* Node assignment */
26653 + static struct ixgbe_option opt = {
26654 + .type = range_option,
26655 + .name = "Node to start on",
26656 +#ifdef HAVE_EARLY_VMALLOC_NODE
26657 + .err = "defaulting to 0",
26660 + .err = "defaulting to -1",
26663 + .arg = { .r = { .min = 0,
26664 + .max = (MAX_NUMNODES - 1)}}
26666 + int node_param = opt.def;
26668 + /* if the default was zero then we need to set the
26669 + * default value to an online node, which is not
26670 + * necessarily zero, and the constant initializer
26671 + * above can't take first_online_node */
26672 + if (node_param == 0)
26673 + /* must set opt.def for validate */
26674 + opt.def = node_param = first_online_node;
26675 +#ifdef module_param_array
26676 + if (num_Node > bd) {
26678 + node_param = Node[bd];
26679 + ixgbe_validate_option((uint *)&node_param, &opt);
26681 + if (node_param != OPTION_UNSET) {
26682 + DPRINTK(PROBE, INFO, "node set to %d\n", node_param);
26684 +#ifdef module_param_array
26687 + /* check sanity of the value */
26688 + if (node_param != -1 && !node_online(node_param)) {
26689 + DPRINTK(PROBE, INFO,
26690 + "ignoring node set to invalid value %d\n",
26692 + node_param = opt.def;
26695 + adapter->node = node_param;
26699 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_phy.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_phy.c
26700 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_phy.c 1969-12-31 19:00:00.000000000 -0500
26701 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_phy.c 2010-08-25 17:56:26.000000000 -0400
26703 +/*******************************************************************************
26705 + Intel 10 Gigabit PCI Express Linux driver
26706 + Copyright(c) 1999 - 2010 Intel Corporation.
26708 + This program is free software; you can redistribute it and/or modify it
26709 + under the terms and conditions of the GNU General Public License,
26710 + version 2, as published by the Free Software Foundation.
26712 + This program is distributed in the hope it will be useful, but WITHOUT
26713 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26714 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
26717 + You should have received a copy of the GNU General Public License along with
26718 + this program; if not, write to the Free Software Foundation, Inc.,
26719 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26721 + The full GNU General Public License is included in this distribution in
26722 + the file called "COPYING".
26724 + Contact Information:
26725 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26726 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26728 +*******************************************************************************/
26730 +#include "ixgbe_api.h"
26731 +#include "ixgbe_common.h"
26732 +#include "ixgbe_phy.h"
26734 +static void ixgbe_i2c_start(struct ixgbe_hw *hw);
26735 +static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
26736 +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
26737 +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
26738 +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
26739 +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
26740 +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
26741 +static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
26742 +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
26743 +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
26744 +static bool ixgbe_get_i2c_data(u32 *i2cctl);
26745 +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
26748 + * ixgbe_init_phy_ops_generic - Inits PHY function ptrs
26749 + * @hw: pointer to the hardware structure
26751 + * Initialize the function pointers.
26753 +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
26755 + struct ixgbe_phy_info *phy = &hw->phy;
26758 + phy->ops.identify = &ixgbe_identify_phy_generic;
26759 + phy->ops.reset = &ixgbe_reset_phy_generic;
26760 + phy->ops.read_reg = &ixgbe_read_phy_reg_generic;
26761 + phy->ops.write_reg = &ixgbe_write_phy_reg_generic;
26762 + phy->ops.setup_link = &ixgbe_setup_phy_link_generic;
26763 + phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
26764 + phy->ops.check_link = NULL;
26765 + phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
26766 + phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
26767 + phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
26768 + phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
26769 + phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
26770 + phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
26771 + phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
26772 + phy->sfp_type = ixgbe_sfp_type_unknown;
26773 + phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
26778 + * ixgbe_identify_phy_generic - Get physical layer module
26779 + * @hw: pointer to hardware structure
26781 + * Determines the physical layer module found on the current adapter.
26783 +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
26785 + s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
26787 + u16 ext_ability = 0;
26789 + if (hw->phy.type == ixgbe_phy_unknown) {
26790 + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
26791 + if (ixgbe_validate_phy_addr(hw, phy_addr)) {
26792 + hw->phy.addr = phy_addr;
26793 + ixgbe_get_phy_id(hw);
26795 + ixgbe_get_phy_type_from_id(hw->phy.id);
26797 + if (hw->phy.type == ixgbe_phy_unknown) {
26798 + hw->phy.ops.read_reg(hw,
26799 + IXGBE_MDIO_PHY_EXT_ABILITY,
26800 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
26802 + if (ext_ability &
26803 + IXGBE_MDIO_PHY_10GBASET_ABILITY ||
26805 + IXGBE_MDIO_PHY_1000BASET_ABILITY)
26807 + ixgbe_phy_cu_unknown;
26810 + ixgbe_phy_generic;
26818 + hw->phy.addr = 0;
26827 + * ixgbe_validate_phy_addr - Determines phy address is valid
26828 + * @hw: pointer to hardware structure
26831 +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
26834 + bool valid = false;
26836 + hw->phy.addr = phy_addr;
26837 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
26838 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
26840 + if (phy_id != 0xFFFF && phy_id != 0x0)
26847 + * ixgbe_get_phy_id - Get the phy type
26848 + * @hw: pointer to hardware structure
26851 +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
26854 + u16 phy_id_high = 0;
26855 + u16 phy_id_low = 0;
26857 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
26858 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
26861 + if (status == 0) {
26862 + hw->phy.id = (u32)(phy_id_high << 16);
26863 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
26864 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
26866 + hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
26867 + hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
26873 + * ixgbe_get_phy_type_from_id - Get the phy type
26874 + * @hw: pointer to hardware structure
26877 +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
26879 + enum ixgbe_phy_type phy_type;
26881 + switch (phy_id) {
26882 + case TN1010_PHY_ID:
26883 + phy_type = ixgbe_phy_tn;
26885 + case AQ1002_PHY_ID:
26886 + phy_type = ixgbe_phy_aq;
26888 + case QT2022_PHY_ID:
26889 + phy_type = ixgbe_phy_qt;
26892 + phy_type = ixgbe_phy_nl;
26895 + phy_type = ixgbe_phy_unknown;
26899 + hw_dbg(hw, "phy type found is %d\n", phy_type);
26904 + * ixgbe_reset_phy_generic - Performs a PHY reset
26905 + * @hw: pointer to hardware structure
26907 +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
26913 + if (hw->phy.type == ixgbe_phy_unknown)
26914 + status = ixgbe_identify_phy_generic(hw);
26916 + if (status != 0 || hw->phy.type == ixgbe_phy_none)
26919 + if (!hw->phy.reset_if_overtemp &&
26920 + (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) {
26921 + /* Don't reset PHY if it's shut down due to overtemp. */
26926 + * Perform soft PHY reset to the PHY_XS.
26927 + * This will cause a soft reset to the PHY
26929 + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
26930 + IXGBE_MDIO_PHY_XS_DEV_TYPE,
26931 + IXGBE_MDIO_PHY_XS_RESET);
26934 + * Poll for reset bit to self-clear indicating reset is complete.
26935 + * Some PHYs could take up to 3 seconds to complete and need about
26936 + * 1.7 usec delay after the reset is complete.
26938 + for (i = 0; i < 30; i++) {
26940 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
26941 + IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
26942 + if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
26948 + if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
26949 + status = IXGBE_ERR_RESET_FAILED;
26950 + hw_dbg(hw, "PHY reset polling failed to complete.\n");
26958 + * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
26959 + * @hw: pointer to hardware structure
26960 + * @reg_addr: 32 bit address of PHY register to read
26961 + * @phy_data: Pointer to read data from PHY register
26963 +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
26964 + u32 device_type, u16 *phy_data)
26972 + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
26973 + gssr = IXGBE_GSSR_PHY1_SM;
26975 + gssr = IXGBE_GSSR_PHY0_SM;
26977 + if (ixgbe_acquire_swfw_sync(hw, gssr) != 0)
26978 + status = IXGBE_ERR_SWFW_SYNC;
26980 + if (status == 0) {
26981 + /* Setup and write the address cycle command */
26982 + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
26983 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
26984 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
26985 + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
26987 + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
26990 + * Check every 10 usec to see if the address cycle completed.
26991 + * The MDI Command bit will clear when the operation is
26994 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
26997 + command = IXGBE_READ_REG(hw, IXGBE_MSCA);
26999 + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
27003 + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
27004 + hw_dbg(hw, "PHY address command did not complete.\n");
27005 + status = IXGBE_ERR_PHY;
27008 + if (status == 0) {
27010 + * Address cycle complete, setup and write the read
27013 + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
27014 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
27015 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
27016 + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
27018 + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
27021 + * Check every 10 usec to see if the address cycle
27022 + * completed. The MDI Command bit will clear when the
27023 + * operation is complete
27025 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
27028 + command = IXGBE_READ_REG(hw, IXGBE_MSCA);
27030 + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
27034 + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
27035 + hw_dbg(hw, "PHY read command didn't complete\n");
27036 + status = IXGBE_ERR_PHY;
27039 + * Read operation is complete. Get the data
27042 + data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
27043 + data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
27044 + *phy_data = (u16)(data);
27048 + ixgbe_release_swfw_sync(hw, gssr);
27055 + * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
27056 + * @hw: pointer to hardware structure
27057 + * @reg_addr: 32 bit PHY register to write
27058 + * @device_type: 5 bit device type
27059 + * @phy_data: Data to write to the PHY register
27061 +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
27062 + u32 device_type, u16 phy_data)
27069 + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
27070 + gssr = IXGBE_GSSR_PHY1_SM;
27072 + gssr = IXGBE_GSSR_PHY0_SM;
27074 + if (ixgbe_acquire_swfw_sync(hw, gssr) != 0)
27075 + status = IXGBE_ERR_SWFW_SYNC;
27077 + if (status == 0) {
27078 + /* Put the data in the MDI single read and write data register*/
27079 + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
27081 + /* Setup and write the address cycle command */
27082 + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
27083 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
27084 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
27085 + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
27087 + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
27090 + * Check every 10 usec to see if the address cycle completed.
27091 + * The MDI Command bit will clear when the operation is
27094 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
27097 + command = IXGBE_READ_REG(hw, IXGBE_MSCA);
27099 + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
27103 + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
27104 + hw_dbg(hw, "PHY address cmd didn't complete\n");
27105 + status = IXGBE_ERR_PHY;
27108 + if (status == 0) {
27110 + * Address cycle complete, setup and write the write
27113 + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
27114 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
27115 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
27116 + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
27118 + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
27121 + * Check every 10 usec to see if the address cycle
27122 + * completed. The MDI Command bit will clear when the
27123 + * operation is complete
27125 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
27128 + command = IXGBE_READ_REG(hw, IXGBE_MSCA);
27130 + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
27134 + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
27135 + hw_dbg(hw, "PHY address cmd didn't complete\n");
27136 + status = IXGBE_ERR_PHY;
27140 + ixgbe_release_swfw_sync(hw, gssr);
27147 + * ixgbe_setup_phy_link_generic - Set and restart autoneg
27148 + * @hw: pointer to hardware structure
27150 + * Restart autonegotiation and PHY and waits for completion.
27152 +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
27156 + u32 max_time_out = 10;
27157 + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
27158 + bool autoneg = false;
27159 + ixgbe_link_speed speed;
27161 + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
27163 + if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
27164 + /* Set or unset auto-negotiation 10G advertisement */
27165 + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
27166 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27169 + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
27170 + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
27171 + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
27173 + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
27174 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27178 + if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
27179 + /* Set or unset auto-negotiation 1G advertisement */
27180 + hw->phy.ops.read_reg(hw,
27181 + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
27182 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27185 + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
27186 + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
27187 + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
27189 + hw->phy.ops.write_reg(hw,
27190 + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
27191 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27195 + if (speed & IXGBE_LINK_SPEED_100_FULL) {
27196 + /* Set or unset auto-negotiation 100M advertisement */
27197 + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
27198 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27201 + autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
27202 + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
27203 + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
27205 + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
27206 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27210 + /* Restart PHY autonegotiation and wait for completion */
27211 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
27212 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
27214 + autoneg_reg |= IXGBE_MII_RESTART;
27216 + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
27217 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
27219 + /* Wait for autonegotiation to finish */
27220 + for (time_out = 0; time_out < max_time_out; time_out++) {
27222 + /* Restart PHY autonegotiation and wait for completion */
27223 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
27224 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27227 + autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
27228 + if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
27233 + if (time_out == max_time_out) {
27234 + status = IXGBE_ERR_LINK_SETUP;
27235 + hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
27242 + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
27243 + * @hw: pointer to hardware structure
27244 + * @speed: new link speed
27245 + * @autoneg: true if autonegotiation enabled
27247 +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
27248 + ixgbe_link_speed speed,
27250 + bool autoneg_wait_to_complete)
27254 + * Clear autoneg_advertised and set new values based on input link
27257 + hw->phy.autoneg_advertised = 0;
27259 + if (speed & IXGBE_LINK_SPEED_10GB_FULL)
27260 + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
27262 + if (speed & IXGBE_LINK_SPEED_1GB_FULL)
27263 + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
27265 + if (speed & IXGBE_LINK_SPEED_100_FULL)
27266 + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
27268 + /* Setup link based on the new speed settings */
27269 + hw->phy.ops.setup_link(hw);
27275 + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
27276 + * @hw: pointer to hardware structure
27277 + * @speed: pointer to link speed
27278 + * @autoneg: boolean auto-negotiation value
27280 + * Determines the link capabilities by reading the AUTOC register.
27282 +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
27283 + ixgbe_link_speed *speed,
27286 + s32 status = IXGBE_ERR_LINK_SETUP;
27287 + u16 speed_ability;
27292 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
27293 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
27296 + if (status == 0) {
27297 + if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
27298 + *speed |= IXGBE_LINK_SPEED_10GB_FULL;
27299 + if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
27300 + *speed |= IXGBE_LINK_SPEED_1GB_FULL;
27301 + if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
27302 + *speed |= IXGBE_LINK_SPEED_100_FULL;
27309 + * ixgbe_check_phy_link_tnx - Determine link and speed status
27310 + * @hw: pointer to hardware structure
27312 + * Reads the VS1 register to determine if link is up and the current speed for
27315 +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
27320 + u32 max_time_out = 10;
27321 + u16 phy_link = 0;
27322 + u16 phy_speed = 0;
27323 + u16 phy_data = 0;
27325 + /* Initialize speed and link to default case */
27326 + *link_up = false;
27327 + *speed = IXGBE_LINK_SPEED_10GB_FULL;
27330 + * Check current speed and link status of the PHY register.
27331 + * This is a vendor specific register and may have to
27332 + * be changed for other copper PHYs.
27334 + for (time_out = 0; time_out < max_time_out; time_out++) {
27336 + status = hw->phy.ops.read_reg(hw,
27337 + IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
27338 + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
27340 + phy_link = phy_data &
27341 + IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
27342 + phy_speed = phy_data &
27343 + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
27344 + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
27347 + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
27348 + *speed = IXGBE_LINK_SPEED_1GB_FULL;
27357 + * ixgbe_setup_phy_link_tnx - Set and restart autoneg
27358 + * @hw: pointer to hardware structure
27360 + * Restart autonegotiation and PHY and waits for completion.
27362 +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
27366 + u32 max_time_out = 10;
27367 + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
27368 + bool autoneg = false;
27369 + ixgbe_link_speed speed;
27371 + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
27373 + if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
27374 + /* Set or unset auto-negotiation 10G advertisement */
27375 + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
27376 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27379 + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
27380 + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
27381 + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
27383 + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
27384 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27388 + if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
27389 + /* Set or unset auto-negotiation 1G advertisement */
27390 + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
27391 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27394 + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
27395 + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
27396 + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
27398 + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
27399 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27403 + if (speed & IXGBE_LINK_SPEED_100_FULL) {
27404 + /* Set or unset auto-negotiation 100M advertisement */
27405 + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
27406 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27409 + autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
27410 + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
27411 + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
27413 + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
27414 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27418 + /* Restart PHY autonegotiation and wait for completion */
27419 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
27420 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
27422 + autoneg_reg |= IXGBE_MII_RESTART;
27424 + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
27425 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
27427 + /* Wait for autonegotiation to finish */
27428 + for (time_out = 0; time_out < max_time_out; time_out++) {
27430 + /* Restart PHY autonegotiation and wait for completion */
27431 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
27432 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
27435 + autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
27436 + if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
27441 + if (time_out == max_time_out) {
27442 + status = IXGBE_ERR_LINK_SETUP;
27443 + hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
27451 + * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
27452 + * @hw: pointer to hardware structure
27453 + * @firmware_version: pointer to the PHY Firmware Version
27455 +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
27456 + u16 *firmware_version)
27460 + status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
27461 + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
27462 + firmware_version);
27469 + * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
27470 + * @hw: pointer to hardware structure
27471 + * @firmware_version: pointer to the PHY Firmware Version
27473 +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
27474 + u16 *firmware_version)
27478 + status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
27479 + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
27480 + firmware_version);
27486 + * ixgbe_reset_phy_nl - Performs a PHY reset
27487 + * @hw: pointer to hardware structure
27489 +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
27491 + u16 phy_offset, control, eword, edata, block_crc;
27492 + bool end_data = false;
27493 + u16 list_offset, data_offset;
27494 + u16 phy_data = 0;
27498 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
27499 + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
27501 + /* reset the PHY and poll for completion */
27502 + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
27503 + IXGBE_MDIO_PHY_XS_DEV_TYPE,
27504 + (phy_data | IXGBE_MDIO_PHY_XS_RESET));
27506 + for (i = 0; i < 100; i++) {
27507 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
27508 + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
27509 + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
27514 + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
27515 + hw_dbg(hw, "PHY reset did not complete.\n");
27516 + ret_val = IXGBE_ERR_PHY;
27520 + /* Get init offsets */
27521 + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
27523 + if (ret_val != 0)
27526 + ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
27528 + while (!end_data) {
27530 + * Read control word from PHY init contents offset
27532 + ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
27533 + control = (eword & IXGBE_CONTROL_MASK_NL) >>
27534 + IXGBE_CONTROL_SHIFT_NL;
27535 + edata = eword & IXGBE_DATA_MASK_NL;
27536 + switch (control) {
27537 + case IXGBE_DELAY_NL:
27539 + hw_dbg(hw, "DELAY: %d MS\n", edata);
27542 + case IXGBE_DATA_NL:
27543 + hw_dbg(hw, "DATA: \n");
27545 + hw->eeprom.ops.read(hw, data_offset++,
27547 + for (i = 0; i < edata; i++) {
27548 + hw->eeprom.ops.read(hw, data_offset, &eword);
27549 + hw->phy.ops.write_reg(hw, phy_offset,
27550 + IXGBE_TWINAX_DEV, eword);
27551 + hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
27557 + case IXGBE_CONTROL_NL:
27559 + hw_dbg(hw, "CONTROL: \n");
27560 + if (edata == IXGBE_CONTROL_EOL_NL) {
27561 + hw_dbg(hw, "EOL\n");
27563 + } else if (edata == IXGBE_CONTROL_SOL_NL) {
27564 + hw_dbg(hw, "SOL\n");
27566 + hw_dbg(hw, "Bad control value\n");
27567 + ret_val = IXGBE_ERR_PHY;
27572 + hw_dbg(hw, "Bad control type\n");
27573 + ret_val = IXGBE_ERR_PHY;
27583 + * ixgbe_identify_sfp_module_generic - Identifies SFP modules
27584 + * @hw: pointer to hardware structure
27586 + * Searches for and identifies the SFP module and assigns appropriate PHY type.
27588 +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
27590 + s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
27591 + u32 vendor_oui = 0;
27592 + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
27593 + u8 identifier = 0;
27594 + u8 comp_codes_1g = 0;
27595 + u8 comp_codes_10g = 0;
27596 + u8 oui_bytes[3] = {0, 0, 0};
27597 + u8 cable_tech = 0;
27598 + u8 cable_spec = 0;
27599 + u16 enforce_sfp = 0;
27601 + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
27602 + hw->phy.sfp_type = ixgbe_sfp_type_not_present;
27603 + status = IXGBE_ERR_SFP_NOT_PRESENT;
27607 + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
27610 + if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) {
27611 + status = IXGBE_ERR_SFP_NOT_PRESENT;
27612 + hw->phy.sfp_type = ixgbe_sfp_type_not_present;
27613 + if (hw->phy.type != ixgbe_phy_nl) {
27615 + hw->phy.type = ixgbe_phy_unknown;
27620 + /* LAN ID is needed for sfp_type determination */
27621 + hw->mac.ops.set_lan_id(hw);
27623 + if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
27624 + hw->phy.type = ixgbe_phy_sfp_unsupported;
27625 + status = IXGBE_ERR_SFP_NOT_SUPPORTED;
27627 + hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES,
27629 + hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
27630 + &comp_codes_10g);
27631 + hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY,
27639 + * 3 SFP_DA_CORE0 - 82599-specific
27640 + * 4 SFP_DA_CORE1 - 82599-specific
27641 + * 5 SFP_SR/LR_CORE0 - 82599-specific
27642 + * 6 SFP_SR/LR_CORE1 - 82599-specific
27643 + * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
27644 + * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
27645 + * 9 SFP_1g_cu_CORE0 - 82599-specific
27646 + * 10 SFP_1g_cu_CORE1 - 82599-specific
27648 + if (hw->mac.type == ixgbe_mac_82598EB) {
27649 + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
27650 + hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
27651 + else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
27652 + hw->phy.sfp_type = ixgbe_sfp_type_sr;
27653 + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
27654 + hw->phy.sfp_type = ixgbe_sfp_type_lr;
27656 + hw->phy.sfp_type = ixgbe_sfp_type_unknown;
27657 + } else if (hw->mac.type == ixgbe_mac_82599EB) {
27658 + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
27659 + if (hw->bus.lan_id == 0)
27660 + hw->phy.sfp_type =
27661 + ixgbe_sfp_type_da_cu_core0;
27663 + hw->phy.sfp_type =
27664 + ixgbe_sfp_type_da_cu_core1;
27665 + } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
27666 + hw->phy.ops.read_i2c_eeprom(
27667 + hw, IXGBE_SFF_CABLE_SPEC_COMP,
27670 + IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
27671 + if (hw->bus.lan_id == 0)
27672 + hw->phy.sfp_type =
27673 + ixgbe_sfp_type_da_act_lmt_core0;
27675 + hw->phy.sfp_type =
27676 + ixgbe_sfp_type_da_act_lmt_core1;
27678 + hw->phy.sfp_type =
27679 + ixgbe_sfp_type_unknown;
27680 + } else if (comp_codes_10g &
27681 + (IXGBE_SFF_10GBASESR_CAPABLE |
27682 + IXGBE_SFF_10GBASELR_CAPABLE)) {
27683 + if (hw->bus.lan_id == 0)
27684 + hw->phy.sfp_type =
27685 + ixgbe_sfp_type_srlr_core0;
27687 + hw->phy.sfp_type =
27688 + ixgbe_sfp_type_srlr_core1;
27689 + } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
27690 + if (hw->bus.lan_id == 0)
27691 + hw->phy.sfp_type =
27692 + ixgbe_sfp_type_1g_cu_core0;
27694 + hw->phy.sfp_type =
27695 + ixgbe_sfp_type_1g_cu_core1;
27697 + hw->phy.sfp_type = ixgbe_sfp_type_unknown;
27701 + if (hw->phy.sfp_type != stored_sfp_type)
27702 + hw->phy.sfp_setup_needed = true;
27704 + /* Determine if the SFP+ PHY is dual speed or not. */
27705 + hw->phy.multispeed_fiber = false;
27706 + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
27707 + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
27708 + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
27709 + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
27710 + hw->phy.multispeed_fiber = true;
27712 + /* Determine PHY vendor */
27713 + if (hw->phy.type != ixgbe_phy_nl) {
27714 + hw->phy.id = identifier;
27715 + hw->phy.ops.read_i2c_eeprom(hw,
27716 + IXGBE_SFF_VENDOR_OUI_BYTE0,
27718 + hw->phy.ops.read_i2c_eeprom(hw,
27719 + IXGBE_SFF_VENDOR_OUI_BYTE1,
27721 + hw->phy.ops.read_i2c_eeprom(hw,
27722 + IXGBE_SFF_VENDOR_OUI_BYTE2,
27726 + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
27727 + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
27728 + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
27730 + switch (vendor_oui) {
27731 + case IXGBE_SFF_VENDOR_OUI_TYCO:
27732 + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
27734 + ixgbe_phy_sfp_passive_tyco;
27736 + case IXGBE_SFF_VENDOR_OUI_FTL:
27737 + if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
27738 + hw->phy.type = ixgbe_phy_sfp_ftl_active;
27740 + hw->phy.type = ixgbe_phy_sfp_ftl;
27742 + case IXGBE_SFF_VENDOR_OUI_AVAGO:
27743 + hw->phy.type = ixgbe_phy_sfp_avago;
27745 + case IXGBE_SFF_VENDOR_OUI_INTEL:
27746 + hw->phy.type = ixgbe_phy_sfp_intel;
27749 + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
27751 + ixgbe_phy_sfp_passive_unknown;
27752 + else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
27754 + ixgbe_phy_sfp_active_unknown;
27756 + hw->phy.type = ixgbe_phy_sfp_unknown;
27761 + /* Allow any DA cable vendor */
27762 + if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
27763 + IXGBE_SFF_DA_ACTIVE_CABLE)) {
27768 + /* Verify supporteed 1G SFP modules */
27769 + if (comp_codes_10g == 0 &&
27770 + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
27771 + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
27772 + hw->phy.type = ixgbe_phy_sfp_unsupported;
27773 + status = IXGBE_ERR_SFP_NOT_SUPPORTED;
27777 + /* Anything else 82598-based is supported */
27778 + if (hw->mac.type == ixgbe_mac_82598EB) {
27783 + ixgbe_get_device_caps(hw, &enforce_sfp);
27784 + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
27785 + !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
27786 + (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
27787 + /* Make sure we're a supported PHY type */
27788 + if (hw->phy.type == ixgbe_phy_sfp_intel) {
27791 + hw_dbg(hw, "SFP+ module not supported\n");
27792 + hw->phy.type = ixgbe_phy_sfp_unsupported;
27793 + status = IXGBE_ERR_SFP_NOT_SUPPORTED;
27805 + * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
27806 + * @hw: pointer to hardware structure
27807 + * @list_offset: offset to the SFP ID list
27808 + * @data_offset: offset to the SFP data block
27810 + * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
27811 + * so it returns the offsets to the phy init sequence block.
27813 +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
27814 + u16 *list_offset,
27815 + u16 *data_offset)
27818 + u16 sfp_type = hw->phy.sfp_type;
27820 + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
27821 + return IXGBE_ERR_SFP_NOT_SUPPORTED;
27823 + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
27824 + return IXGBE_ERR_SFP_NOT_PRESENT;
27826 + if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
27827 + (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
27828 + return IXGBE_ERR_SFP_NOT_SUPPORTED;
27831 + * Limiting active cables and 1G Phys must be initialized as
27834 + if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
27835 + sfp_type == ixgbe_sfp_type_1g_cu_core0)
27836 + sfp_type = ixgbe_sfp_type_srlr_core0;
27837 + else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
27838 + sfp_type == ixgbe_sfp_type_1g_cu_core1)
27839 + sfp_type = ixgbe_sfp_type_srlr_core1;
27841 + /* Read offset to PHY init contents */
27842 + hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
27844 + if ((!*list_offset) || (*list_offset == 0xFFFF))
27845 + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
27847 + /* Shift offset to first ID word */
27848 + (*list_offset)++;
27851 + * Find the matching SFP ID in the EEPROM
27852 + * and program the init sequence
27854 + hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
27856 + while (sfp_id != IXGBE_PHY_INIT_END_NL) {
27857 + if (sfp_id == sfp_type) {
27858 + (*list_offset)++;
27859 + hw->eeprom.ops.read(hw, *list_offset, data_offset);
27860 + if ((!*data_offset) || (*data_offset == 0xFFFF)) {
27861 + hw_dbg(hw, "SFP+ module not supported\n");
27862 + return IXGBE_ERR_SFP_NOT_SUPPORTED;
27867 + (*list_offset) += 2;
27868 + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
27869 + return IXGBE_ERR_PHY;
27873 + if (sfp_id == IXGBE_PHY_INIT_END_NL) {
27874 + hw_dbg(hw, "No matching SFP+ module found\n");
27875 + return IXGBE_ERR_SFP_NOT_SUPPORTED;
27882 + * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface
27883 + * @hw: pointer to hardware structure
27884 + * @byte_offset: EEPROM byte offset to read
27885 + * @eeprom_data: value read
27887 + * Performs byte read operation to SFP module's EEPROM over I2C interface.
27889 +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
27892 + return hw->phy.ops.read_i2c_byte(hw, byte_offset,
27893 + IXGBE_I2C_EEPROM_DEV_ADDR,
27898 + * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
27899 + * @hw: pointer to hardware structure
27900 + * @byte_offset: EEPROM byte offset to write
27901 + * @eeprom_data: value to write
27903 + * Performs byte write operation to SFP module's EEPROM over I2C interface.
27905 +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
27908 + return hw->phy.ops.write_i2c_byte(hw, byte_offset,
27909 + IXGBE_I2C_EEPROM_DEV_ADDR,
27914 + * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
27915 + * @hw: pointer to hardware structure
27916 + * @byte_offset: byte offset to read
27917 + * @data: value read
27919 + * Performs byte read operation to SFP module's EEPROM over I2C interface at
27920 + * a specified deivce address.
27922 +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
27923 + u8 dev_addr, u8 *data)
27926 + u32 max_retry = 10;
27928 + u16 swfw_mask = 0;
27931 + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
27932 + swfw_mask = IXGBE_GSSR_PHY1_SM;
27934 + swfw_mask = IXGBE_GSSR_PHY0_SM;
27938 + if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
27939 + status = IXGBE_ERR_SWFW_SYNC;
27940 + goto read_byte_out;
27943 + ixgbe_i2c_start(hw);
27945 + /* Device Address and write indication */
27946 + status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
27950 + status = ixgbe_get_i2c_ack(hw);
27954 + status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
27958 + status = ixgbe_get_i2c_ack(hw);
27962 + ixgbe_i2c_start(hw);
27964 + /* Device Address and read indication */
27965 + status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
27969 + status = ixgbe_get_i2c_ack(hw);
27973 + status = ixgbe_clock_in_i2c_byte(hw, data);
27977 + status = ixgbe_clock_out_i2c_bit(hw, nack);
27981 + ixgbe_i2c_stop(hw);
27985 + ixgbe_release_swfw_sync(hw, swfw_mask);
27987 + ixgbe_i2c_bus_clear(hw);
27989 + if (retry < max_retry)
27990 + hw_dbg(hw, "I2C byte read error - Retrying.\n");
27992 + hw_dbg(hw, "I2C byte read error.\n");
27994 + } while (retry < max_retry);
27996 + ixgbe_release_swfw_sync(hw, swfw_mask);
28003 + * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
28004 + * @hw: pointer to hardware structure
28005 + * @byte_offset: byte offset to write
28006 + * @data: value to write
28008 + * Performs byte write operation to SFP module's EEPROM over I2C interface at
28009 + * a specified device address.
28011 +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
28012 + u8 dev_addr, u8 data)
28015 + u32 max_retry = 1;
28017 + u16 swfw_mask = 0;
28019 + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
28020 + swfw_mask = IXGBE_GSSR_PHY1_SM;
28022 + swfw_mask = IXGBE_GSSR_PHY0_SM;
28024 + if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
28025 + status = IXGBE_ERR_SWFW_SYNC;
28026 + goto write_byte_out;
28030 + ixgbe_i2c_start(hw);
28032 + status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
28036 + status = ixgbe_get_i2c_ack(hw);
28040 + status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
28044 + status = ixgbe_get_i2c_ack(hw);
28048 + status = ixgbe_clock_out_i2c_byte(hw, data);
28052 + status = ixgbe_get_i2c_ack(hw);
28056 + ixgbe_i2c_stop(hw);
28060 + ixgbe_i2c_bus_clear(hw);
28062 + if (retry < max_retry)
28063 + hw_dbg(hw, "I2C byte write error - Retrying.\n");
28065 + hw_dbg(hw, "I2C byte write error.\n");
28066 + } while (retry < max_retry);
28068 + ixgbe_release_swfw_sync(hw, swfw_mask);
28075 + * ixgbe_i2c_start - Sets I2C start condition
28076 + * @hw: pointer to hardware structure
28078 + * Sets I2C start condition (High -> Low on SDA while SCL is High)
28080 +static void ixgbe_i2c_start(struct ixgbe_hw *hw)
28082 + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
28084 + /* Start condition must begin with data and clock high */
28085 + ixgbe_set_i2c_data(hw, &i2cctl, 1);
28086 + ixgbe_raise_i2c_clk(hw, &i2cctl);
28088 + /* Setup time for start condition (4.7us) */
28089 + udelay(IXGBE_I2C_T_SU_STA);
28091 + ixgbe_set_i2c_data(hw, &i2cctl, 0);
28093 + /* Hold time for start condition (4us) */
28094 + udelay(IXGBE_I2C_T_HD_STA);
28096 + ixgbe_lower_i2c_clk(hw, &i2cctl);
28098 + /* Minimum low period of clock is 4.7 us */
28099 + udelay(IXGBE_I2C_T_LOW);
28104 + * ixgbe_i2c_stop - Sets I2C stop condition
28105 + * @hw: pointer to hardware structure
28107 + * Sets I2C stop condition (Low -> High on SDA while SCL is High)
28109 +static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
28111 + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
28113 + /* Stop condition must begin with data low and clock high */
28114 + ixgbe_set_i2c_data(hw, &i2cctl, 0);
28115 + ixgbe_raise_i2c_clk(hw, &i2cctl);
28117 + /* Setup time for stop condition (4us) */
28118 + udelay(IXGBE_I2C_T_SU_STO);
28120 + ixgbe_set_i2c_data(hw, &i2cctl, 1);
28122 + /* bus free time between stop and start (4.7us)*/
28123 + udelay(IXGBE_I2C_T_BUF);
28127 + * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C
28128 + * @hw: pointer to hardware structure
28129 + * @data: data byte to clock in
28131 + * Clocks in one byte data via I2C data/clock
28133 +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
28139 + for (i = 7; i >= 0; i--) {
28140 + status = ixgbe_clock_in_i2c_bit(hw, &bit);
28151 + * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C
28152 + * @hw: pointer to hardware structure
28153 + * @data: data byte clocked out
28155 + * Clocks out one byte data via I2C data/clock
28157 +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
28164 + for (i = 7; i >= 0; i--) {
28165 + bit = (data >> i) & 0x1;
28166 + status = ixgbe_clock_out_i2c_bit(hw, bit);
28172 + /* Release SDA line (set high) */
28173 + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
28174 + i2cctl |= IXGBE_I2C_DATA_OUT;
28175 + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
28181 + * ixgbe_get_i2c_ack - Polls for I2C ACK
28182 + * @hw: pointer to hardware structure
28184 + * Clocks in/out one bit via I2C data/clock
28186 +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
28190 + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
28191 + u32 timeout = 10;
28194 + status = ixgbe_raise_i2c_clk(hw, &i2cctl);
28199 + /* Minimum high period of clock is 4us */
28200 + udelay(IXGBE_I2C_T_HIGH);
28202 + /* Poll for ACK. Note that ACK in I2C spec is
28203 + * transition from 1 to 0 */
28204 + for (i = 0; i < timeout; i++) {
28205 + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
28206 + ack = ixgbe_get_i2c_data(&i2cctl);
28214 + hw_dbg(hw, "I2C ack was not received.\n");
28215 + status = IXGBE_ERR_I2C;
28218 + ixgbe_lower_i2c_clk(hw, &i2cctl);
28220 + /* Minimum low period of clock is 4.7 us */
28221 + udelay(IXGBE_I2C_T_LOW);
28228 + * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
28229 + * @hw: pointer to hardware structure
28230 + * @data: read data value
28232 + * Clocks in one bit via I2C data/clock
28234 +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
28237 + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
28239 + status = ixgbe_raise_i2c_clk(hw, &i2cctl);
28241 + /* Minimum high period of clock is 4us */
28242 + udelay(IXGBE_I2C_T_HIGH);
28244 + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
28245 + *data = ixgbe_get_i2c_data(&i2cctl);
28247 + ixgbe_lower_i2c_clk(hw, &i2cctl);
28249 + /* Minimum low period of clock is 4.7 us */
28250 + udelay(IXGBE_I2C_T_LOW);
28256 + * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
28257 + * @hw: pointer to hardware structure
28258 + * @data: data value to write
28260 + * Clocks out one bit via I2C data/clock
28262 +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
28265 + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
28267 + status = ixgbe_set_i2c_data(hw, &i2cctl, data);
28268 + if (status == 0) {
28269 + status = ixgbe_raise_i2c_clk(hw, &i2cctl);
28271 + /* Minimum high period of clock is 4us */
28272 + udelay(IXGBE_I2C_T_HIGH);
28274 + ixgbe_lower_i2c_clk(hw, &i2cctl);
28276 + /* Minimum low period of clock is 4.7 us.
28277 + * This also takes care of the data hold time.
28279 + udelay(IXGBE_I2C_T_LOW);
28281 + status = IXGBE_ERR_I2C;
28282 + hw_dbg(hw, "I2C data was not set to %X\n", data);
28288 + * ixgbe_raise_i2c_clk - Raises the I2C SCL clock
28289 + * @hw: pointer to hardware structure
28290 + * @i2cctl: Current value of I2CCTL register
28292 + * Raises the I2C clock line '0'->'1'
28294 +static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
28298 + *i2cctl |= IXGBE_I2C_CLK_OUT;
28300 + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
28302 + /* SCL rise time (1000ns) */
28303 + udelay(IXGBE_I2C_T_RISE);
28309 + * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock
28310 + * @hw: pointer to hardware structure
28311 + * @i2cctl: Current value of I2CCTL register
28313 + * Lowers the I2C clock line '1'->'0'
28315 +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
28318 + *i2cctl &= ~IXGBE_I2C_CLK_OUT;
28320 + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
28322 + /* SCL fall time (300ns) */
28323 + udelay(IXGBE_I2C_T_FALL);
28327 + * ixgbe_set_i2c_data - Sets the I2C data bit
28328 + * @hw: pointer to hardware structure
28329 + * @i2cctl: Current value of I2CCTL register
28330 + * @data: I2C data value (0 or 1) to set
28332 + * Sets the I2C data bit
28334 +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
28339 + *i2cctl |= IXGBE_I2C_DATA_OUT;
28341 + *i2cctl &= ~IXGBE_I2C_DATA_OUT;
28343 + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
28345 + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
28346 + udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
28348 + /* Verify data was set correctly */
28349 + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
28350 + if (data != ixgbe_get_i2c_data(i2cctl)) {
28351 + status = IXGBE_ERR_I2C;
28352 + hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
28359 + * ixgbe_get_i2c_data - Reads the I2C SDA data bit
28360 + * @hw: pointer to hardware structure
28361 + * @i2cctl: Current value of I2CCTL register
28363 + * Returns the I2C data bit value
28365 +static bool ixgbe_get_i2c_data(u32 *i2cctl)
28369 + if (*i2cctl & IXGBE_I2C_DATA_IN)
28378 + * ixgbe_i2c_bus_clear - Clears the I2C bus
28379 + * @hw: pointer to hardware structure
28381 + * Clears the I2C bus by sending nine clock pulses.
28382 + * Used when data line is stuck low.
28384 +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
28386 + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
28389 + ixgbe_i2c_start(hw);
28391 + ixgbe_set_i2c_data(hw, &i2cctl, 1);
28393 + for (i = 0; i < 9; i++) {
28394 + ixgbe_raise_i2c_clk(hw, &i2cctl);
28396 + /* Min high period of clock is 4us */
28397 + udelay(IXGBE_I2C_T_HIGH);
28399 + ixgbe_lower_i2c_clk(hw, &i2cctl);
28401 + /* Min low period of clock is 4.7us*/
28402 + udelay(IXGBE_I2C_T_LOW);
28405 + ixgbe_i2c_start(hw);
28407 + /* Put the i2c bus back to default state */
28408 + ixgbe_i2c_stop(hw);
28412 + * ixgbe_check_overtemp - Checks if an overtemp occured.
28413 + * @hw: pointer to hardware structure
28415 + * Checks if the LASI temp alarm status was triggered due to overtemp
28417 +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
28420 + u16 phy_data = 0;
28422 + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
28425 + /* Check that the LASI temp alarm status was triggered */
28426 + hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
28427 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
28429 + if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
28432 + status = IXGBE_ERR_OVERTEMP;
28437 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_phy.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_phy.h
28438 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_phy.h 1969-12-31 19:00:00.000000000 -0500
28439 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_phy.h 2010-08-25 17:56:26.000000000 -0400
28441 +/*******************************************************************************
28443 + Intel 10 Gigabit PCI Express Linux driver
28444 + Copyright(c) 1999 - 2010 Intel Corporation.
28446 + This program is free software; you can redistribute it and/or modify it
28447 + under the terms and conditions of the GNU General Public License,
28448 + version 2, as published by the Free Software Foundation.
28450 + This program is distributed in the hope it will be useful, but WITHOUT
28451 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28452 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28455 + You should have received a copy of the GNU General Public License along with
28456 + this program; if not, write to the Free Software Foundation, Inc.,
28457 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28459 + The full GNU General Public License is included in this distribution in
28460 + the file called "COPYING".
28462 + Contact Information:
28463 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
28464 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28466 +*******************************************************************************/
28468 +#ifndef _IXGBE_PHY_H_
28469 +#define _IXGBE_PHY_H_
28471 +#include "ixgbe_type.h"
28472 +#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
28474 +/* EEPROM byte offsets */
28475 +#define IXGBE_SFF_IDENTIFIER 0x0
28476 +#define IXGBE_SFF_IDENTIFIER_SFP 0x3
28477 +#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
28478 +#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
28479 +#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
28480 +#define IXGBE_SFF_1GBE_COMP_CODES 0x6
28481 +#define IXGBE_SFF_10GBE_COMP_CODES 0x3
28482 +#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
28483 +#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
28486 +#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
28487 +#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
28488 +#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
28489 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
28490 +#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
28491 +#define IXGBE_SFF_1GBASET_CAPABLE 0x8
28492 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
28493 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
28494 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100
28495 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
28496 +#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
28497 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
28498 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
28499 +#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
28501 +/* Flow control defines */
28502 +#define IXGBE_TAF_SYM_PAUSE 0x400
28503 +#define IXGBE_TAF_ASM_PAUSE 0x800
28505 +/* Bit-shift macros */
28506 +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
28507 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
28508 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
28510 +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
28511 +#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
28512 +#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
28513 +#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
28514 +#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
28516 +/* I2C SDA and SCL timing parameters for standard mode */
28517 +#define IXGBE_I2C_T_HD_STA 4
28518 +#define IXGBE_I2C_T_LOW 5
28519 +#define IXGBE_I2C_T_HIGH 4
28520 +#define IXGBE_I2C_T_SU_STA 5
28521 +#define IXGBE_I2C_T_HD_DATA 5
28522 +#define IXGBE_I2C_T_SU_DATA 1
28523 +#define IXGBE_I2C_T_RISE 1
28524 +#define IXGBE_I2C_T_FALL 1
28525 +#define IXGBE_I2C_T_SU_STO 4
28526 +#define IXGBE_I2C_T_BUF 5
28528 +#define IXGBE_TN_LASI_STATUS_REG 0x9005
28529 +#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
28532 +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
28533 +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
28534 +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
28535 +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
28536 +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
28537 +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
28538 +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
28539 + u32 device_type, u16 *phy_data);
28540 +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
28541 + u32 device_type, u16 phy_data);
28542 +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
28543 +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
28544 + ixgbe_link_speed speed,
28546 + bool autoneg_wait_to_complete);
28547 +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
28548 + ixgbe_link_speed *speed,
28551 +/* PHY specific */
28552 +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
28553 + ixgbe_link_speed *speed,
28555 +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
28556 +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
28557 + u16 *firmware_version);
28558 +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
28559 + u16 *firmware_version);
28561 +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
28562 +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
28563 +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
28564 + u16 *list_offset,
28565 + u16 *data_offset);
28566 +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
28567 +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
28568 + u8 dev_addr, u8 *data);
28569 +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
28570 + u8 dev_addr, u8 data);
28571 +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
28572 + u8 *eeprom_data);
28573 +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
28575 +#endif /* _IXGBE_PHY_H_ */
28576 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sriov.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sriov.c
28577 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sriov.c 1969-12-31 19:00:00.000000000 -0500
28578 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sriov.c 2010-08-25 17:56:26.000000000 -0400
28580 +/*******************************************************************************
28582 + Intel 10 Gigabit PCI Express Linux driver
28583 + Copyright(c) 1999 - 2010 Intel Corporation.
28585 + This program is free software; you can redistribute it and/or modify it
28586 + under the terms and conditions of the GNU General Public License,
28587 + version 2, as published by the Free Software Foundation.
28589 + This program is distributed in the hope it will be useful, but WITHOUT
28590 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28591 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28594 + You should have received a copy of the GNU General Public License along with
28595 + this program; if not, write to the Free Software Foundation, Inc.,
28596 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28598 + The full GNU General Public License is included in this distribution in
28599 + the file called "COPYING".
28601 + Contact Information:
28602 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
28603 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28605 +*******************************************************************************/
28608 +#include <linux/types.h>
28609 +#include <linux/module.h>
28610 +#include <linux/pci.h>
28611 +#include <linux/netdevice.h>
28612 +#include <linux/vmalloc.h>
28613 +#include <linux/string.h>
28614 +#include <linux/in.h>
28615 +#include <linux/ip.h>
28616 +#include <linux/tcp.h>
28617 +#include <linux/ipv6.h>
28618 +#ifdef NETIF_F_HW_VLAN_TX
28619 +#include <linux/if_vlan.h>
28622 +#include "ixgbe.h"
28624 +#include "ixgbe_sriov.h"
28626 +int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
28627 + int entries, u16 *hash_list, u32 vf)
28629 + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
28630 + struct ixgbe_hw *hw = &adapter->hw;
28636 + /* only so many hash values supported */
28637 + entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
28639 + /* salt away the number of multi cast addresses assigned
28640 + * to this VF for later use to restore when the PF multi cast
28643 + vfinfo->num_vf_mc_hashes = entries;
28645 + /* VFs are limited to using the MTA hash table for their multicast
28647 + for (i = 0; i < entries; i++) {
28648 + vfinfo->vf_mc_hashes[i] = hash_list[i];;
28651 + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
28652 + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
28653 + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
28654 + mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
28655 + mta_reg |= (1 << vector_bit);
28656 + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
28662 +void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
28664 + struct ixgbe_hw *hw = &adapter->hw;
28665 + struct vf_data_storage *vfinfo;
28671 + for (i = 0; i < adapter->num_vfs; i++) {
28672 + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
28673 + vfinfo = &adapter->vfinfo[i];
28674 + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
28675 + hw->addr_ctrl.mta_in_use++;
28676 + vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
28677 + vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
28678 + mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
28679 + mta_reg |= (1 << vector_bit);
28680 + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
28682 + if (vfinfo->num_vf_mc_hashes)
28683 + vmolr |= IXGBE_VMOLR_ROMPE;
28685 + vmolr &= ~IXGBE_VMOLR_ROMPE;
28686 + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
28690 +int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
28692 + return ixgbe_set_vfta(&adapter->hw, vid, vf, (bool)add);
28695 +void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
28699 +void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
28701 + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
28702 + vmolr |= IXGBE_VMOLR_BAM;
28704 + vmolr |= IXGBE_VMOLR_AUPE;
28706 + vmolr &= ~IXGBE_VMOLR_AUPE;
28707 + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
28710 +static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
28712 + struct ixgbe_hw *hw = &adapter->hw;
28715 + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
28716 + (vid | IXGBE_VMVIR_VLANA_DEFAULT));
28718 + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
28721 +inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
28723 + struct ixgbe_hw *hw = &adapter->hw;
28724 + int rar_entry = hw->mac.num_rar_entries - (vf + 1);
28726 + /* reset offloads to defaults */
28727 + if (adapter->vfinfo[vf].pf_vlan) {
28728 + ixgbe_set_vf_vlan(adapter, true,
28729 + adapter->vfinfo[vf].pf_vlan, vf);
28730 + ixgbe_set_vmvir(adapter,
28731 + (adapter->vfinfo[vf].pf_vlan |
28732 + (adapter->vfinfo[vf].pf_qos << 13)), vf);
28733 + ixgbe_set_vmolr(hw, vf, false);
28735 + ixgbe_set_vmvir(adapter, 0, vf);
28736 + ixgbe_set_vmolr(hw, vf, true);
28740 + /* reset multicast table array for vf */
28741 + adapter->vfinfo[vf].num_vf_mc_hashes = 0;
28743 + /* Flush and reset the mta with the new values */
28744 + ixgbe_set_rx_mode(adapter->netdev);
28746 + hw->mac.ops.clear_rar(hw, rar_entry);
28749 +int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
28750 + int vf, unsigned char *mac_addr)
28752 + struct ixgbe_hw *hw = &adapter->hw;
28753 + int rar_entry = hw->mac.num_rar_entries - (vf + 1);
28755 + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
28756 + hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
28761 +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
28763 + unsigned char vf_mac_addr[6];
28764 + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
28765 + unsigned int vfn = (event_mask & 0x3f);
28767 + bool enable = ((event_mask & 0x10000000U) != 0);
28770 + random_ether_addr(vf_mac_addr);
28771 + DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
28772 + "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
28774 + vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
28775 + vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
28776 + /* Store away the VF "permananet" MAC address, it will ask
28779 + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
28785 +inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
28787 + struct ixgbe_hw *hw = &adapter->hw;
28789 + u32 reg_offset, vf_shift;
28791 + vf_shift = vf % 32;
28792 + reg_offset = vf / 32;
28794 + /* enable transmit and receive for vf */
28795 + reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
28796 + reg |= (reg | (1 << vf_shift));
28797 + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
28799 + reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
28800 + reg |= (reg | (1 << vf_shift));
28801 + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
28803 + ixgbe_vf_reset_event(adapter, vf);
28806 +static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
28808 + u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
28809 + u32 msgbuf[mbx_size];
28810 + struct ixgbe_hw *hw = &adapter->hw;
28816 + retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
28819 + printk(KERN_ERR "Error receiving message from VF\n");
28821 + /* this is a message we already processed, do nothing */
28822 + if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
28826 + * until the vf completes a virtual function reset it should not be
28827 + * allowed to start any configuration.
28830 + if (msgbuf[0] == IXGBE_VF_RESET) {
28831 + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
28832 + u8 *new_mac = (u8 *)(&msgbuf[1]);
28833 + adapter->vfinfo[vf].clear_to_send = false;
28834 + ixgbe_vf_reset_msg(adapter, vf);
28835 + adapter->vfinfo[vf].clear_to_send = true;
28837 + if (is_valid_ether_addr(new_mac) &&
28838 + !adapter->vfinfo[vf].pf_set_mac)
28839 + ixgbe_set_vf_mac(adapter, vf, vf_mac);
28841 + ixgbe_set_vf_mac(adapter,
28842 + vf, adapter->vfinfo[vf].vf_mac_addresses);
28844 + /* reply to reset with ack and vf mac address */
28845 + msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
28846 + memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
28847 + /* Piggyback the multicast filter type so VF can compute the
28848 + * correct vectors */
28849 + msgbuf[3] = hw->mac.mc_filter_type;
28850 + ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
28855 + if (!adapter->vfinfo[vf].clear_to_send) {
28856 + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
28857 + ixgbe_write_mbx(hw, msgbuf, 1, vf);
28861 + switch ((msgbuf[0] & 0xFFFF)) {
28862 + case IXGBE_VF_SET_MAC_ADDR:
28863 + DPRINTK(PROBE, INFO, "Set MAC msg received from vf %d\n", vf);
28865 + u8 *new_mac = ((u8 *)(&msgbuf[1]));
28866 + if (is_valid_ether_addr(new_mac))
28867 + ixgbe_set_vf_mac(adapter, vf, new_mac);
28872 + case IXGBE_VF_SET_MULTICAST:
28873 + entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
28874 + >> IXGBE_VT_MSGINFO_SHIFT;
28875 + hash_list = (u16 *)&msgbuf[1];
28876 + retval = ixgbe_set_vf_multicasts(adapter, entries,
28879 + case IXGBE_VF_SET_LPE:
28880 + DPRINTK(PROBE, INFO, "Set LPE msg received from vf %d\n", vf);
28881 + ixgbe_set_vf_lpe(adapter, msgbuf);
28883 + case IXGBE_VF_SET_VLAN:
28884 + add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
28885 + >> IXGBE_VT_MSGINFO_SHIFT;
28886 + vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
28887 + retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
28890 + DPRINTK(PROBE, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
28891 + retval = IXGBE_ERR_MBX;
28895 + /* notify the VF of the results of what it sent us */
28897 + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
28899 + msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
28901 + msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
28903 + ixgbe_write_mbx(hw, msgbuf, 1, vf);
28908 +static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
28910 + struct ixgbe_hw *hw = &adapter->hw;
28911 + u32 msg = IXGBE_VT_MSGTYPE_NACK;
28913 + /* if device isn't clear to send it shouldn't be reading either */
28914 + if (!adapter->vfinfo[vf].clear_to_send)
28915 + ixgbe_write_mbx(hw, &msg, 1, vf);
28918 +void ixgbe_msg_task(struct ixgbe_adapter *adapter)
28920 + struct ixgbe_hw *hw = &adapter->hw;
28923 + for (vf = 0; vf < adapter->num_vfs; vf++) {
28924 + /* process any reset requests */
28925 + if (!ixgbe_check_for_rst(hw, vf))
28926 + ixgbe_vf_reset_event(adapter, vf);
28928 + /* process any messages pending */
28929 + if (!ixgbe_check_for_msg(hw, vf))
28930 + ixgbe_rcv_msg_from_vf(adapter, vf);
28932 + /* process any acks */
28933 + if (!ixgbe_check_for_ack(hw, vf))
28934 + ixgbe_rcv_ack_from_vf(adapter, vf);
28938 +void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
28940 + struct ixgbe_hw *hw = &adapter->hw;
28942 + /* disable transmit and receive for all vfs */
28943 + IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
28944 + IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
28946 + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
28947 + IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
28950 +void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
28952 + struct ixgbe_hw *hw = &adapter->hw;
28956 + for (i = 0 ; i < adapter->num_vfs; i++) {
28957 + ping = IXGBE_PF_CONTROL_MSG;
28958 + if (adapter->vfinfo[i].clear_to_send)
28959 + ping |= IXGBE_VT_MSGTYPE_CTS;
28960 + ixgbe_write_mbx(hw, &ping, 1, i);
28965 +#ifdef HAVE_IPLINK_VF_CONFIG
28966 +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
28968 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
28969 + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
28971 + adapter->vfinfo[vf].pf_set_mac = true;
28972 + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
28973 + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
28974 + " change effective.");
28975 + if (test_bit(__IXGBE_DOWN, &adapter->state)) {
28976 + dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
28977 + " but the PF device is not up.\n");
28978 + dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
28979 + " attempting to use the VF device.\n");
28981 + return ixgbe_set_vf_mac(adapter, vf, mac);
28984 +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
28987 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
28989 + if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
28991 + if (vlan || qos) {
28992 + err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
28995 + ixgbe_set_vmvir(adapter, vlan | (qos << 13), vf);
28996 + ixgbe_set_vmolr(&adapter->hw, vf, false);
28997 + adapter->vfinfo[vf].pf_vlan = vlan;
28998 + adapter->vfinfo[vf].pf_qos = qos;
28999 + dev_info(&adapter->pdev->dev,
29000 + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
29001 + if (test_bit(__IXGBE_DOWN, &adapter->state)) {
29002 + dev_warn(&adapter->pdev->dev,
29003 + "The VF VLAN has been set,"
29004 + " but the PF device is not up.\n");
29005 + dev_warn(&adapter->pdev->dev,
29006 + "Bring the PF device up before"
29007 + " attempting to use the VF device.\n");
29010 + err = ixgbe_set_vf_vlan(adapter, false,
29011 + adapter->vfinfo[vf].pf_vlan, vf);
29012 + ixgbe_set_vmvir(adapter, vlan, vf);
29013 + ixgbe_set_vmolr(&adapter->hw, vf, true);
29014 + adapter->vfinfo[vf].pf_vlan = 0;
29015 + adapter->vfinfo[vf].pf_qos = 0;
29021 +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
29023 + return -EOPNOTSUPP;
29026 +int ixgbe_ndo_get_vf_config(struct net_device *netdev,
29027 + int vf, struct ifla_vf_info *ivi)
29029 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
29030 + if (vf >= adapter->num_vfs)
29033 + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
29034 + ivi->tx_rate = 0;
29035 + ivi->vlan = adapter->vfinfo[vf].pf_vlan;
29036 + ivi->qos = adapter->vfinfo[vf].pf_qos;
29041 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sriov.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sriov.h
29042 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sriov.h 1969-12-31 19:00:00.000000000 -0500
29043 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sriov.h 2010-08-25 17:56:26.000000000 -0400
29045 +/*******************************************************************************
29047 + Intel 10 Gigabit PCI Express Linux driver
29048 + Copyright(c) 1999 - 2010 Intel Corporation.
29050 + This program is free software; you can redistribute it and/or modify it
29051 + under the terms and conditions of the GNU General Public License,
29052 + version 2, as published by the Free Software Foundation.
29054 + This program is distributed in the hope it will be useful, but WITHOUT
29055 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
29056 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
29059 + You should have received a copy of the GNU General Public License along with
29060 + this program; if not, write to the Free Software Foundation, Inc.,
29061 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
29063 + The full GNU General Public License is included in this distribution in
29064 + the file called "COPYING".
29066 + Contact Information:
29067 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
29068 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29070 +*******************************************************************************/
29073 +#ifndef _IXGBE_SRIOV_H_
29074 +#define _IXGBE_SRIOV_H_
29076 +int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
29077 + int entries, u16 *hash_list, u32 vf);
29078 +void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
29079 +int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
29080 +void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
29081 +void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
29082 +void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
29083 +void ixgbe_msg_task(struct ixgbe_adapter *adapter);
29084 +int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
29085 + int vf, unsigned char *mac_addr);
29086 +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
29087 +void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
29088 +void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
29089 +#ifdef HAVE_IPLINK_VF_CONFIG
29090 +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
29091 +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
29093 +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
29094 +int ixgbe_ndo_get_vf_config(struct net_device *netdev,
29095 + int vf, struct ifla_vf_info *ivi);
29097 +void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
29099 +#endif /* _IXGBE_SRIOV_H_ */
29101 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sysfs.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sysfs.c
29102 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sysfs.c 1969-12-31 19:00:00.000000000 -0500
29103 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sysfs.c 2010-08-25 17:56:26.000000000 -0400
29105 +/*******************************************************************************
29107 + Intel 10 Gigabit PCI Express Linux driver
29108 + Copyright(c) 1999 - 2010 Intel Corporation.
29110 + This program is free software; you can redistribute it and/or modify it
29111 + under the terms and conditions of the GNU General Public License,
29112 + version 2, as published by the Free Software Foundation.
29114 + This program is distributed in the hope it will be useful, but WITHOUT
29115 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
29116 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
29119 + You should have received a copy of the GNU General Public License along with
29120 + this program; if not, write to the Free Software Foundation, Inc.,
29121 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
29123 + The full GNU General Public License is included in this distribution in
29124 + the file called "COPYING".
29126 + Contact Information:
29127 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
29128 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29130 +*******************************************************************************/
29132 +#include "ixgbe.h"
29135 +#include <linux/sysfs.h>
29136 +#include <linux/device.h>
29137 +#include <linux/netdevice.h>
29139 +/* Ethernet payload size for FCoE to be able to carry full sized FC Frames
29140 + * 14 byte FCoE header + 24 byte FC header + 2112 max payload + 4 byte CRC
29141 + * + 4 byte FCoE trailing encapsulation = 2158
29142 + * This is the Ethernet payload, replacing the default of 1500, and does
29143 + * not include Ethernet headers, VLAN tags, or Ethernet CRC.
29145 +#define IXGBE_FCOE_MTU 2158
29147 +static ssize_t ixgbe_show_fcoe_mtu(struct device *dev, struct device_attribute *attr, char *buf)
29149 + return sprintf(buf, "%d\n", IXGBE_FCOE_MTU);
29152 +static struct device_attribute ixgbe_attrs[] = {
29153 + __ATTR(fcoe-mtu, S_IRUGO, ixgbe_show_fcoe_mtu, NULL),
29156 +int ixgbe_sysfs_create(struct ixgbe_adapter *adapter)
29158 + struct net_device *netdev = adapter->netdev;
29162 + for (i = 0 ; i < ARRAY_SIZE(ixgbe_attrs); i++) {
29163 + err = device_create_file(&netdev->dev, &ixgbe_attrs[i]);
29171 + device_remove_file(&netdev->dev, &ixgbe_attrs[i]);
29175 +void ixgbe_sysfs_remove(struct ixgbe_adapter *adapter)
29177 + struct net_device *netdev = adapter->netdev;
29180 + for (i = 0 ; i < ARRAY_SIZE(ixgbe_attrs); i++)
29181 + device_remove_file(&netdev->dev, &ixgbe_attrs[i]);
29183 +#endif /* IXGBE_FCOE */
29185 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_type.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_type.h
29186 --- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_type.h 1969-12-31 19:00:00.000000000 -0500
29187 +++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_type.h 2010-08-25 17:56:26.000000000 -0400
29189 +/*******************************************************************************
29191 + Intel 10 Gigabit PCI Express Linux driver
29192 + Copyright(c) 1999 - 2010 Intel Corporation.
29194 + This program is free software; you can redistribute it and/or modify it
29195 + under the terms and conditions of the GNU General Public License,
29196 + version 2, as published by the Free Software Foundation.
29198 + This program is distributed in the hope it will be useful, but WITHOUT
29199 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
29200 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
29203 + You should have received a copy of the GNU General Public License along with
29204 + this program; if not, write to the Free Software Foundation, Inc.,
29205 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
29207 + The full GNU General Public License is included in this distribution in
29208 + the file called "COPYING".
29210 + Contact Information:
29211 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
29212 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29214 +*******************************************************************************/
29216 +#ifndef _IXGBE_TYPE_H_
29217 +#define _IXGBE_TYPE_H_
29219 +#include "ixgbe_osdep.h"
29223 +#define IXGBE_INTEL_VENDOR_ID 0x8086
29226 +#define IXGBE_DEV_ID_82598 0x10B6
29227 +#define IXGBE_DEV_ID_82598_BX 0x1508
29228 +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
29229 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
29230 +#define IXGBE_DEV_ID_82598AT 0x10C8
29231 +#define IXGBE_DEV_ID_82598AT2 0x150B
29232 +#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
29233 +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
29234 +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
29235 +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
29236 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
29237 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
29238 +#define IXGBE_DEV_ID_82599_KX4 0x10F7
29239 +#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
29240 +#define IXGBE_DEV_ID_82599_KR 0x1517
29241 +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
29242 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
29243 +#define IXGBE_DEV_ID_82599_CX4 0x10F9
29244 +#define IXGBE_DEV_ID_82599_SFP 0x10FB
29245 +#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
29246 +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
29247 +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
29249 +/* General Registers */
29250 +#define IXGBE_CTRL 0x00000
29251 +#define IXGBE_STATUS 0x00008
29252 +#define IXGBE_CTRL_EXT 0x00018
29253 +#define IXGBE_ESDP 0x00020
29254 +#define IXGBE_EODSDP 0x00028
29255 +#define IXGBE_I2CCTL 0x00028
29256 +#define IXGBE_LEDCTL 0x00200
29257 +#define IXGBE_FRTIMER 0x00048
29258 +#define IXGBE_TCPTIMER 0x0004C
29259 +#define IXGBE_CORESPARE 0x00600
29260 +#define IXGBE_EXVET 0x05078
29262 +/* NVM Registers */
29263 +#define IXGBE_EEC 0x10010
29264 +#define IXGBE_EERD 0x10014
29265 +#define IXGBE_EEWR 0x10018
29266 +#define IXGBE_FLA 0x1001C
29267 +#define IXGBE_EEMNGCTL 0x10110
29268 +#define IXGBE_EEMNGDATA 0x10114
29269 +#define IXGBE_FLMNGCTL 0x10118
29270 +#define IXGBE_FLMNGDATA 0x1011C
29271 +#define IXGBE_FLMNGCNT 0x10120
29272 +#define IXGBE_FLOP 0x1013C
29273 +#define IXGBE_GRC 0x10200
29275 +/* General Receive Control */
29276 +#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
29277 +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
29279 +#define IXGBE_VPDDIAG0 0x10204
29280 +#define IXGBE_VPDDIAG1 0x10208
29282 +/* I2CCTL Bit Masks */
29283 +#define IXGBE_I2C_CLK_IN 0x00000001
29284 +#define IXGBE_I2C_CLK_OUT 0x00000002
29285 +#define IXGBE_I2C_DATA_IN 0x00000004
29286 +#define IXGBE_I2C_DATA_OUT 0x00000008
29288 +/* Interrupt Registers */
29289 +#define IXGBE_EICR 0x00800
29290 +#define IXGBE_EICS 0x00808
29291 +#define IXGBE_EIMS 0x00880
29292 +#define IXGBE_EIMC 0x00888
29293 +#define IXGBE_EIAC 0x00810
29294 +#define IXGBE_EIAM 0x00890
29295 +#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
29296 +#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
29297 +#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
29298 +#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
29299 +/* 82599 EITR is only 12 bits, with the lower 3 always zero */
29301 + * 82598 EITR is 16 bits but set the limits based on the max
29302 + * supported by all ixgbe hardware
29304 +#define IXGBE_MAX_INT_RATE 488281
29305 +#define IXGBE_MIN_INT_RATE 956
29306 +#define IXGBE_MAX_EITR 0x00000FF8
29307 +#define IXGBE_MIN_EITR 8
29308 +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
29309 + (0x012300 + (((_i) - 24) * 4)))
29310 +#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
29311 +#define IXGBE_EITR_LLI_MOD 0x00008000
29312 +#define IXGBE_EITR_CNT_WDIS 0x80000000
29313 +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
29314 +#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */
29315 +#define IXGBE_EITRSEL 0x00894
29316 +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
29317 +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
29318 +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
29319 +#define IXGBE_GPIE 0x00898
29321 +/* Flow Control Registers */
29322 +#define IXGBE_FCADBUL 0x03210
29323 +#define IXGBE_FCADBUH 0x03214
29324 +#define IXGBE_FCAMACL 0x04328
29325 +#define IXGBE_FCAMACH 0x0432C
29326 +#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
29327 +#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
29328 +#define IXGBE_PFCTOP 0x03008
29329 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
29330 +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
29331 +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
29332 +#define IXGBE_FCRTV 0x032A0
29333 +#define IXGBE_FCCFG 0x03D00
29334 +#define IXGBE_TFCS 0x0CE00
29336 +/* Receive DMA Registers */
29337 +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
29338 + (0x0D000 + ((_i - 64) * 0x40)))
29339 +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
29340 + (0x0D004 + ((_i - 64) * 0x40)))
29341 +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
29342 + (0x0D008 + ((_i - 64) * 0x40)))
29343 +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
29344 + (0x0D010 + ((_i - 64) * 0x40)))
29345 +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
29346 + (0x0D018 + ((_i - 64) * 0x40)))
29347 +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
29348 + (0x0D028 + ((_i - 64) * 0x40)))
29349 +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
29350 + (0x0D02C + ((_i - 64) * 0x40)))
29351 +#define IXGBE_RSCDBU 0x03028
29352 +#define IXGBE_RDDCC 0x02F20
29353 +#define IXGBE_RXMEMWRAP 0x03190
29354 +#define IXGBE_STARCTRL 0x03024
29356 + * Split and Replication Receive Control Registers
29357 + * 00-15 : 0x02100 + n*4
29358 + * 16-64 : 0x01014 + n*0x40
29359 + * 64-127: 0x0D014 + (n-64)*0x40
29361 +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
29362 + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
29363 + (0x0D014 + ((_i - 64) * 0x40))))
29365 + * Rx DCA Control Register:
29366 + * 00-15 : 0x02200 + n*4
29367 + * 16-64 : 0x0100C + n*0x40
29368 + * 64-127: 0x0D00C + (n-64)*0x40
29370 +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
29371 + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
29372 + (0x0D00C + ((_i - 64) * 0x40))))
29373 +#define IXGBE_RDRXCTL 0x02F00
29374 +#define IXGBE_RDRXCTL_RSC_PUSH 0x80
29375 +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
29376 + /* 8 of these 0x03C00 - 0x03C1C */
29377 +#define IXGBE_RXCTRL 0x03000
29378 +#define IXGBE_DROPEN 0x03D04
29379 +#define IXGBE_RXPBSIZE_SHIFT 10
29381 +/* Receive Registers */
29382 +#define IXGBE_RXCSUM 0x05000
29383 +#define IXGBE_RFCTL 0x05008
29384 +#define IXGBE_DRECCCTL 0x02F08
29385 +#define IXGBE_DRECCCTL_DISABLE 0
29387 +/* Multicast Table Array - 128 entries */
29388 +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
29389 +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
29390 + (0x0A200 + ((_i) * 8)))
29391 +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
29392 + (0x0A204 + ((_i) * 8)))
29393 +#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
29394 +#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
29395 +/* Packet split receive type */
29396 +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
29397 + (0x0EA00 + ((_i) * 4)))
29398 +/* array of 4096 1-bit vlan filters */
29399 +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
29400 +/*array of 4096 4-bit vlan vmdq indices */
29401 +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
29402 +#define IXGBE_FCTRL 0x05080
29403 +#define IXGBE_VLNCTRL 0x05088
29404 +#define IXGBE_MCSTCTRL 0x05090
29405 +#define IXGBE_MRQC 0x05818
29406 +#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
29407 +#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
29408 +#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
29409 +#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
29410 +#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */
29411 +#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
29412 +#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */
29413 +#define IXGBE_RQTC 0x0EC70
29414 +#define IXGBE_MTQC 0x08120
29415 +#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
29416 +#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
29417 +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
29418 +#define IXGBE_VT_CTL 0x051B0
29419 +#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
29420 +#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
29421 +#define IXGBE_QDE 0x2F04
29422 +#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
29423 +#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
29424 +#define IXGBE_VMRCTL(_i) (0x0F600 + ((_i) * 4))
29425 +#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
29426 +#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
29427 +#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
29428 +#define IXGBE_LLITHRESH 0x0EC90
29429 +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
29430 +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
29431 +#define IXGBE_IMIRVP 0x05AC0
29432 +#define IXGBE_VMD_CTL 0x0581C
29433 +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
29434 +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
29436 +/* Flow Director registers */
29437 +#define IXGBE_FDIRCTRL 0x0EE00
29438 +#define IXGBE_FDIRHKEY 0x0EE68
29439 +#define IXGBE_FDIRSKEY 0x0EE6C
29440 +#define IXGBE_FDIRDIP4M 0x0EE3C
29441 +#define IXGBE_FDIRSIP4M 0x0EE40
29442 +#define IXGBE_FDIRTCPM 0x0EE44
29443 +#define IXGBE_FDIRUDPM 0x0EE48
29444 +#define IXGBE_FDIRIP6M 0x0EE74
29445 +#define IXGBE_FDIRM 0x0EE70
29447 +/* Flow Director Stats registers */
29448 +#define IXGBE_FDIRFREE 0x0EE38
29449 +#define IXGBE_FDIRLEN 0x0EE4C
29450 +#define IXGBE_FDIRUSTAT 0x0EE50
29451 +#define IXGBE_FDIRFSTAT 0x0EE54
29452 +#define IXGBE_FDIRMATCH 0x0EE58
29453 +#define IXGBE_FDIRMISS 0x0EE5C
29455 +/* Flow Director Programming registers */
29456 +#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
29457 +#define IXGBE_FDIRIPSA 0x0EE18
29458 +#define IXGBE_FDIRIPDA 0x0EE1C
29459 +#define IXGBE_FDIRPORT 0x0EE20
29460 +#define IXGBE_FDIRVLAN 0x0EE24
29461 +#define IXGBE_FDIRHASH 0x0EE28
29462 +#define IXGBE_FDIRCMD 0x0EE2C
29464 +/* Transmit DMA registers */
29465 +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
29466 +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
29467 +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
29468 +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
29469 +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40))
29470 +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
29471 +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
29472 +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
29473 +#define IXGBE_DTXCTL 0x07E00
29475 +#define IXGBE_DMATXCTL 0x04A80
29476 +#define IXGBE_PFDTXGSWC 0x08220
29477 +#define IXGBE_DTXMXSZRQ 0x08100
29478 +#define IXGBE_DTXTCPFLGL 0x04A88
29479 +#define IXGBE_DTXTCPFLGH 0x04A8C
29480 +#define IXGBE_LBDRPEN 0x0CA00
29481 +#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
29483 +#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
29484 +#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
29485 +#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
29486 +#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
29488 +#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
29489 +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
29490 +/* Tx DCA Control register : 128 of these (0-127) */
29491 +#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
29492 +#define IXGBE_TIPG 0x0CB00
29493 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
29494 +#define IXGBE_MNGTXMAP 0x0CD10
29495 +#define IXGBE_TIPG_FIBER_DEFAULT 3
29496 +#define IXGBE_TXPBSIZE_SHIFT 10
29498 +/* Wake up registers */
29499 +#define IXGBE_WUC 0x05800
29500 +#define IXGBE_WUFC 0x05808
29501 +#define IXGBE_WUS 0x05810
29502 +#define IXGBE_IPAV 0x05838
29503 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
29504 +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
29506 +#define IXGBE_WUPL 0x05900
29507 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
29508 +#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
29509 +#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host
29510 + * Filter Table */
29512 +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
29513 +#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
29515 +/* Each Flexible Filter is at most 128 (0x80) bytes in length */
29516 +#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128
29517 +#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
29518 +#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
29520 +/* Definitions for power management and wakeup registers */
29521 +/* Wake Up Control */
29522 +#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
29523 +#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
29524 +#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
29526 +/* Wake Up Filter Control */
29527 +#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
29528 +#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
29529 +#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
29530 +#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
29531 +#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
29532 +#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
29533 +#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
29534 +#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
29535 +#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
29537 +#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
29538 +#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
29539 +#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
29540 +#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
29541 +#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
29542 +#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
29543 +#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
29544 +#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
29545 +#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */
29546 +#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
29547 +#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
29549 +/* Wake Up Status */
29550 +#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC
29551 +#define IXGBE_WUS_MAG IXGBE_WUFC_MAG
29552 +#define IXGBE_WUS_EX IXGBE_WUFC_EX
29553 +#define IXGBE_WUS_MC IXGBE_WUFC_MC
29554 +#define IXGBE_WUS_BC IXGBE_WUFC_BC
29555 +#define IXGBE_WUS_ARP IXGBE_WUFC_ARP
29556 +#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4
29557 +#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6
29558 +#define IXGBE_WUS_MNG IXGBE_WUFC_MNG
29559 +#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0
29560 +#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1
29561 +#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2
29562 +#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3
29563 +#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4
29564 +#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
29565 +#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
29567 +/* Wake Up Packet Length */
29568 +#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
29570 +/* DCB registers */
29571 +#define IXGBE_RMCS 0x03D00
29572 +#define IXGBE_DPMCS 0x07F40
29573 +#define IXGBE_PDPMCS 0x0CD00
29574 +#define IXGBE_RUPPBMR 0x050A0
29575 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
29576 +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
29577 +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
29578 +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
29579 +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
29580 +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
29583 +/* Security Control Registers */
29584 +#define IXGBE_SECTXCTRL 0x08800
29585 +#define IXGBE_SECTXSTAT 0x08804
29586 +#define IXGBE_SECTXBUFFAF 0x08808
29587 +#define IXGBE_SECTXMINIFG 0x08810
29588 +#define IXGBE_SECTXSTAT 0x08804
29589 +#define IXGBE_SECRXCTRL 0x08D00
29590 +#define IXGBE_SECRXSTAT 0x08D04
29592 +/* Security Bit Fields and Masks */
29593 +#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001
29594 +#define IXGBE_SECTXCTRL_TX_DIS 0x00000002
29595 +#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004
29597 +#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001
29598 +#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002
29600 +#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001
29601 +#define IXGBE_SECRXCTRL_RX_DIS 0x00000002
29603 +#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001
29604 +#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002
29606 +/* LinkSec (MacSec) Registers */
29607 +#define IXGBE_LSECTXCAP 0x08A00
29608 +#define IXGBE_LSECRXCAP 0x08F00
29609 +#define IXGBE_LSECTXCTRL 0x08A04
29610 +#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */
29611 +#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */
29612 +#define IXGBE_LSECTXSA 0x08A10
29613 +#define IXGBE_LSECTXPN0 0x08A14
29614 +#define IXGBE_LSECTXPN1 0x08A18
29615 +#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
29616 +#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
29617 +#define IXGBE_LSECRXCTRL 0x08F04
29618 +#define IXGBE_LSECRXSCL 0x08F08
29619 +#define IXGBE_LSECRXSCH 0x08F0C
29620 +#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
29621 +#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
29622 +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
29623 +#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */
29624 +#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */
29625 +#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */
29626 +#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */
29627 +#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */
29628 +#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */
29629 +#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */
29630 +#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */
29631 +#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */
29632 +#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */
29633 +#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */
29634 +#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */
29635 +#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */
29636 +#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */
29637 +#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */
29638 +#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
29639 +#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
29640 +#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */
29641 +#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */
29643 +/* LinkSec (MacSec) Bit Fields and Masks */
29644 +#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000
29645 +#define IXGBE_LSECTXCAP_SUM_SHIFT 16
29646 +#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000
29647 +#define IXGBE_LSECRXCAP_SUM_SHIFT 16
29649 +#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003
29650 +#define IXGBE_LSECTXCTRL_DISABLE 0x0
29651 +#define IXGBE_LSECTXCTRL_AUTH 0x1
29652 +#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2
29653 +#define IXGBE_LSECTXCTRL_AISCI 0x00000020
29654 +#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
29655 +#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8
29657 +#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C
29658 +#define IXGBE_LSECRXCTRL_EN_SHIFT 2
29659 +#define IXGBE_LSECRXCTRL_DISABLE 0x0
29660 +#define IXGBE_LSECRXCTRL_CHECK 0x1
29661 +#define IXGBE_LSECRXCTRL_STRICT 0x2
29662 +#define IXGBE_LSECRXCTRL_DROP 0x3
29663 +#define IXGBE_LSECRXCTRL_PLSH 0x00000040
29664 +#define IXGBE_LSECRXCTRL_RP 0x00000080
29665 +#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33
29667 +/* IpSec Registers */
29668 +#define IXGBE_IPSTXIDX 0x08900
29669 +#define IXGBE_IPSTXSALT 0x08904
29670 +#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
29671 +#define IXGBE_IPSRXIDX 0x08E00
29672 +#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
29673 +#define IXGBE_IPSRXSPI 0x08E14
29674 +#define IXGBE_IPSRXIPIDX 0x08E18
29675 +#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
29676 +#define IXGBE_IPSRXSALT 0x08E2C
29677 +#define IXGBE_IPSRXMOD 0x08E30
29679 +#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
29681 +/* DCB registers */
29682 +#define IXGBE_RTRPCS 0x02430
29683 +#define IXGBE_RTTDCS 0x04900
29684 +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
29685 +#define IXGBE_RTTPCS 0x0CD00
29686 +#define IXGBE_RTRUP2TC 0x03020
29687 +#define IXGBE_RTTUP2TC 0x0C800
29688 +#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
29689 +#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
29690 +#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
29691 +#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
29692 +#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
29693 +#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
29694 +#define IXGBE_RTTDQSEL 0x04904
29695 +#define IXGBE_RTTDT1C 0x04908
29696 +#define IXGBE_RTTDT1S 0x0490C
29697 +#define IXGBE_RTTDTECC 0x04990
29698 +#define IXGBE_RTTDTECC_NO_BCN 0x00000100
29700 +#define IXGBE_RTTBCNRC 0x04984
29703 +/* FCoE DMA Context Registers */
29704 +#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
29705 +#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
29706 +#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
29707 +#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
29708 +#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
29709 +#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
29710 +#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
29711 +#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
29712 +#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
29713 +#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
29714 +#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
29715 +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
29716 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
29717 +#define IXGBE_FCBUFF_OFFSET_SHIFT 16
29718 +#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
29719 +#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
29720 +#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
29721 +#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
29722 +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
29723 +/* FCoE SOF/EOF */
29724 +#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */
29725 +#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */
29726 +#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
29727 +#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
29728 +/* FCoE Filter Context Registers */
29729 +#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
29730 +#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
29731 +#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
29732 +#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
29733 +#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
29734 +#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
29735 +#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
29736 +#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
29737 +#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
29738 +#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
29739 +/* FCoE Receive Control */
29740 +#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
29741 +#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
29742 +#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
29743 +#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
29744 +#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
29745 +#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
29746 +#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
29747 +#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
29748 +#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
29749 +#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
29750 +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
29751 +/* FCoE Redirection */
29752 +#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */
29753 +#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */
29754 +#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
29755 +#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */
29756 +#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
29757 +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
29759 +/* Stats registers */
29760 +#define IXGBE_CRCERRS 0x04000
29761 +#define IXGBE_ILLERRC 0x04004
29762 +#define IXGBE_ERRBC 0x04008
29763 +#define IXGBE_MSPDC 0x04010
29764 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
29765 +#define IXGBE_MLFC 0x04034
29766 +#define IXGBE_MRFC 0x04038
29767 +#define IXGBE_RLEC 0x04040
29768 +#define IXGBE_LXONTXC 0x03F60
29769 +#define IXGBE_LXONRXC 0x0CF60
29770 +#define IXGBE_LXOFFTXC 0x03F68
29771 +#define IXGBE_LXOFFRXC 0x0CF68
29772 +#define IXGBE_LXONRXCNT 0x041A4
29773 +#define IXGBE_LXOFFRXCNT 0x041A8
29774 +#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */
29775 +#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */
29776 +#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */
29777 +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
29778 +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
29779 +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
29780 +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
29781 +#define IXGBE_PRC64 0x0405C
29782 +#define IXGBE_PRC127 0x04060
29783 +#define IXGBE_PRC255 0x04064
29784 +#define IXGBE_PRC511 0x04068
29785 +#define IXGBE_PRC1023 0x0406C
29786 +#define IXGBE_PRC1522 0x04070
29787 +#define IXGBE_GPRC 0x04074
29788 +#define IXGBE_BPRC 0x04078
29789 +#define IXGBE_MPRC 0x0407C
29790 +#define IXGBE_GPTC 0x04080
29791 +#define IXGBE_GORCL 0x04088
29792 +#define IXGBE_GORCH 0x0408C
29793 +#define IXGBE_GOTCL 0x04090
29794 +#define IXGBE_GOTCH 0x04094
29795 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
29796 +#define IXGBE_RUC 0x040A4
29797 +#define IXGBE_RFC 0x040A8
29798 +#define IXGBE_ROC 0x040AC
29799 +#define IXGBE_RJC 0x040B0
29800 +#define IXGBE_MNGPRC 0x040B4
29801 +#define IXGBE_MNGPDC 0x040B8
29802 +#define IXGBE_MNGPTC 0x0CF90
29803 +#define IXGBE_TORL 0x040C0
29804 +#define IXGBE_TORH 0x040C4
29805 +#define IXGBE_TPR 0x040D0
29806 +#define IXGBE_TPT 0x040D4
29807 +#define IXGBE_PTC64 0x040D8
29808 +#define IXGBE_PTC127 0x040DC
29809 +#define IXGBE_PTC255 0x040E0
29810 +#define IXGBE_PTC511 0x040E4
29811 +#define IXGBE_PTC1023 0x040E8
29812 +#define IXGBE_PTC1522 0x040EC
29813 +#define IXGBE_MPTC 0x040F0
29814 +#define IXGBE_BPTC 0x040F4
29815 +#define IXGBE_XEC 0x04120
29816 +#define IXGBE_SSVPC 0x08780
29818 +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
29819 +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
29820 + (0x08600 + ((_i) * 4)))
29821 +#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
29823 +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
29824 +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
29825 +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
29826 +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
29827 +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
29828 +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
29829 +#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
29830 +#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
29831 +#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
29832 +#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */
29833 +#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */
29834 +#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */
29835 +#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */
29836 +#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
29837 +#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
29838 +#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
29839 +#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */
29840 +#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */
29843 +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
29844 +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
29845 +#define IXGBE_MANC 0x05820
29846 +#define IXGBE_MFVAL 0x05824
29847 +#define IXGBE_MANC2H 0x05860
29848 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
29849 +#define IXGBE_MIPAF 0x058B0
29850 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
29851 +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
29852 +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
29853 +#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
29854 +#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
29855 +#define IXGBE_LSWFW 0x15014
29857 +/* ARC Subsystem registers */
29858 +#define IXGBE_HICR 0x15F00
29859 +#define IXGBE_FWSTS 0x15F0C
29860 +#define IXGBE_HSMC0R 0x15F04
29861 +#define IXGBE_HSMC1R 0x15F08
29862 +#define IXGBE_SWSR 0x15F10
29863 +#define IXGBE_HFDR 0x15FE8
29864 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */
29866 +/* PCI-E registers */
29867 +#define IXGBE_GCR 0x11000
29868 +#define IXGBE_GTV 0x11004
29869 +#define IXGBE_FUNCTAG 0x11008
29870 +#define IXGBE_GLT 0x1100C
29871 +#define IXGBE_GSCL_1 0x11010
29872 +#define IXGBE_GSCL_2 0x11014
29873 +#define IXGBE_GSCL_3 0x11018
29874 +#define IXGBE_GSCL_4 0x1101C
29875 +#define IXGBE_GSCN_0 0x11020
29876 +#define IXGBE_GSCN_1 0x11024
29877 +#define IXGBE_GSCN_2 0x11028
29878 +#define IXGBE_GSCN_3 0x1102C
29879 +#define IXGBE_FACTPS 0x10150
29880 +#define IXGBE_PCIEANACTL 0x11040
29881 +#define IXGBE_SWSM 0x10140
29882 +#define IXGBE_FWSM 0x10148
29883 +#define IXGBE_GSSR 0x10160
29884 +#define IXGBE_MREVID 0x11064
29885 +#define IXGBE_DCA_ID 0x11070
29886 +#define IXGBE_DCA_CTRL 0x11074
29887 +#define IXGBE_SWFW_SYNC IXGBE_GSSR
29889 +/* PCI-E registers 82599-Specific */
29890 +#define IXGBE_GCR_EXT 0x11050
29891 +#define IXGBE_GSCL_5_82599 0x11030
29892 +#define IXGBE_GSCL_6_82599 0x11034
29893 +#define IXGBE_GSCL_7_82599 0x11038
29894 +#define IXGBE_GSCL_8_82599 0x1103C
29895 +#define IXGBE_PHYADR_82599 0x11040
29896 +#define IXGBE_PHYDAT_82599 0x11044
29897 +#define IXGBE_PHYCTL_82599 0x11048
29898 +#define IXGBE_PBACLR_82599 0x11068
29899 +#define IXGBE_CIAA_82599 0x11088
29900 +#define IXGBE_CIAD_82599 0x1108C
29901 +#define IXGBE_INTRPT_CSR_82599 0x110B0
29902 +#define IXGBE_INTRPT_MASK_82599 0x110B8
29903 +#define IXGBE_CDQ_MBR_82599 0x110B4
29904 +#define IXGBE_MISC_REG_82599 0x110F0
29905 +#define IXGBE_ECC_CTRL_0_82599 0x11100
29906 +#define IXGBE_ECC_CTRL_1_82599 0x11104
29907 +#define IXGBE_ECC_STATUS_82599 0x110E0
29908 +#define IXGBE_BAR_CTRL_82599 0x110F4
29910 +/* PCI Express Control */
29911 +#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
29912 +#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
29913 +#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
29914 +#define IXGBE_GCR_CAP_VER2 0x00040000
29916 +#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
29917 +#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
29918 +#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
29919 +#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
29920 +#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
29921 + IXGBE_GCR_EXT_VT_MODE_64)
29922 +/* Time Sync Registers */
29923 +#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
29924 +#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
29925 +#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */
29926 +#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */
29927 +#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */
29928 +#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */
29929 +#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */
29930 +#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */
29931 +#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */
29932 +#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
29933 +#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
29934 +#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
29935 +#define IXGBE_RXUDP 0x08C1C /* Time Sync Rx UDP Port - RW */
29937 +/* Diagnostic Registers */
29938 +#define IXGBE_RDSTATCTL 0x02C20
29939 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
29940 +#define IXGBE_RDHMPN 0x02F08
29941 +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
29942 +#define IXGBE_RDPROBE 0x02F20
29943 +#define IXGBE_RDMAM 0x02F30
29944 +#define IXGBE_RDMAD 0x02F34
29945 +#define IXGBE_TDSTATCTL 0x07C20
29946 +#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
29947 +#define IXGBE_TDHMPN 0x07F08
29948 +#define IXGBE_TDHMPN2 0x082FC
29949 +#define IXGBE_TXDESCIC 0x082CC
29950 +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
29951 +#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
29952 +#define IXGBE_TDPROBE 0x07F20
29953 +#define IXGBE_TXBUFCTRL 0x0C600
29954 +#define IXGBE_TXBUFDATA0 0x0C610
29955 +#define IXGBE_TXBUFDATA1 0x0C614
29956 +#define IXGBE_TXBUFDATA2 0x0C618
29957 +#define IXGBE_TXBUFDATA3 0x0C61C
29958 +#define IXGBE_RXBUFCTRL 0x03600
29959 +#define IXGBE_RXBUFDATA0 0x03610
29960 +#define IXGBE_RXBUFDATA1 0x03614
29961 +#define IXGBE_RXBUFDATA2 0x03618
29962 +#define IXGBE_RXBUFDATA3 0x0361C
29963 +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
29964 +#define IXGBE_RFVAL 0x050A4
29965 +#define IXGBE_MDFTC1 0x042B8
29966 +#define IXGBE_MDFTC2 0x042C0
29967 +#define IXGBE_MDFTFIFO1 0x042C4
29968 +#define IXGBE_MDFTFIFO2 0x042C8
29969 +#define IXGBE_MDFTS 0x042CC
29970 +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
29971 +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
29972 +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
29973 +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
29974 +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
29975 +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
29976 +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
29977 +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
29978 +#define IXGBE_PCIEECCCTL 0x1106C
29979 +#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
29980 +#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
29981 +#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
29982 +#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
29983 +#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
29984 +#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
29985 +#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
29986 +#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
29987 +#define IXGBE_PCIEECCCTL0 0x11100
29988 +#define IXGBE_PCIEECCCTL1 0x11104
29989 +#define IXGBE_RXDBUECC 0x03F70
29990 +#define IXGBE_TXDBUECC 0x0CF70
29991 +#define IXGBE_RXDBUEST 0x03F74
29992 +#define IXGBE_TXDBUEST 0x0CF74
29993 +#define IXGBE_PBTXECC 0x0C300
29994 +#define IXGBE_PBRXECC 0x03300
29995 +#define IXGBE_GHECCR 0x110B0
29997 +/* MAC Registers */
29998 +#define IXGBE_PCS1GCFIG 0x04200
29999 +#define IXGBE_PCS1GLCTL 0x04208
30000 +#define IXGBE_PCS1GLSTA 0x0420C
30001 +#define IXGBE_PCS1GDBG0 0x04210
30002 +#define IXGBE_PCS1GDBG1 0x04214
30003 +#define IXGBE_PCS1GANA 0x04218
30004 +#define IXGBE_PCS1GANLP 0x0421C
30005 +#define IXGBE_PCS1GANNP 0x04220
30006 +#define IXGBE_PCS1GANLPNP 0x04224
30007 +#define IXGBE_HLREG0 0x04240
30008 +#define IXGBE_HLREG1 0x04244
30009 +#define IXGBE_PAP 0x04248
30010 +#define IXGBE_MACA 0x0424C
30011 +#define IXGBE_APAE 0x04250
30012 +#define IXGBE_ARD 0x04254
30013 +#define IXGBE_AIS 0x04258
30014 +#define IXGBE_MSCA 0x0425C
30015 +#define IXGBE_MSRWD 0x04260
30016 +#define IXGBE_MLADD 0x04264
30017 +#define IXGBE_MHADD 0x04268
30018 +#define IXGBE_MAXFRS 0x04268
30019 +#define IXGBE_TREG 0x0426C
30020 +#define IXGBE_PCSS1 0x04288
30021 +#define IXGBE_PCSS2 0x0428C
30022 +#define IXGBE_XPCSS 0x04290
30023 +#define IXGBE_MFLCN 0x04294
30024 +#define IXGBE_SERDESC 0x04298
30025 +#define IXGBE_MACS 0x0429C
30026 +#define IXGBE_AUTOC 0x042A0
30027 +#define IXGBE_LINKS 0x042A4
30028 +#define IXGBE_LINKS2 0x04324
30029 +#define IXGBE_AUTOC2 0x042A8
30030 +#define IXGBE_AUTOC3 0x042AC
30031 +#define IXGBE_ANLP1 0x042B0
30032 +#define IXGBE_ANLP2 0x042B4
30033 +#define IXGBE_ATLASCTL 0x04800
30034 +#define IXGBE_MMNGC 0x042D0
30035 +#define IXGBE_ANLPNP1 0x042D4
30036 +#define IXGBE_ANLPNP2 0x042D8
30037 +#define IXGBE_KRPCSFC 0x042E0
30038 +#define IXGBE_KRPCSS 0x042E4
30039 +#define IXGBE_FECS1 0x042E8
30040 +#define IXGBE_FECS2 0x042EC
30041 +#define IXGBE_SMADARCTL 0x14F10
30042 +#define IXGBE_MPVC 0x04318
30043 +#define IXGBE_SGMIIC 0x04314
30045 +/* Copper Pond 2 link timeout */
30046 +#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
30048 +/* Omer CORECTL */
30049 +#define IXGBE_CORECTL 0x014F00
30051 +#define IXGBE_BARCTRL 0x110F4
30052 +#define IXGBE_BARCTRL_FLSIZE 0x0700
30053 +#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
30054 +#define IXGBE_BARCTRL_CSRSIZE 0x2000
30056 +/* RSCCTL Bit Masks */
30057 +#define IXGBE_RSCCTL_RSCEN 0x01
30058 +#define IXGBE_RSCCTL_MAXDESC_1 0x00
30059 +#define IXGBE_RSCCTL_MAXDESC_4 0x04
30060 +#define IXGBE_RSCCTL_MAXDESC_8 0x08
30061 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C
30063 +/* RSCDBU Bit Masks */
30064 +#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
30065 +#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
30067 +/* RDRXCTL Bit Masks */
30068 +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
30069 +#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */
30070 +#define IXGBE_RDRXCTL_MVMEN 0x00000020
30071 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
30072 +#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
30073 +#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
30074 +#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
30075 +#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
30076 +#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
30078 +/* RQTC Bit Masks and Shifts */
30079 +#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
30080 +#define IXGBE_RQTC_TC0_MASK (0x7 << 0)
30081 +#define IXGBE_RQTC_TC1_MASK (0x7 << 4)
30082 +#define IXGBE_RQTC_TC2_MASK (0x7 << 8)
30083 +#define IXGBE_RQTC_TC3_MASK (0x7 << 12)
30084 +#define IXGBE_RQTC_TC4_MASK (0x7 << 16)
30085 +#define IXGBE_RQTC_TC5_MASK (0x7 << 20)
30086 +#define IXGBE_RQTC_TC6_MASK (0x7 << 24)
30087 +#define IXGBE_RQTC_TC7_MASK (0x7 << 28)
30089 +/* PSRTYPE.RQPL Bit masks and shift */
30090 +#define IXGBE_PSRTYPE_RQPL_MASK 0x7
30091 +#define IXGBE_PSRTYPE_RQPL_SHIFT 29
30093 +/* CTRL Bit Masks */
30094 +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
30095 +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
30096 +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
30099 +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
30101 +/* MHADD Bit Masks */
30102 +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000
30103 +#define IXGBE_MHADD_MFS_SHIFT 16
30105 +/* Extended Device Control */
30106 +#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */
30107 +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */
30108 +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
30109 +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
30111 +/* Direct Cache Access (DCA) definitions */
30112 +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
30113 +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
30115 +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
30116 +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
30118 +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
30119 +#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
30120 +#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
30121 +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
30122 +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
30123 +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
30124 +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
30125 +#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */
30126 +#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */
30128 +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
30129 +#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
30130 +#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
30131 +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
30132 +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
30133 +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
30135 +/* MSCA Bit Masks */
30136 +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */
30137 +#define IXGBE_MSCA_NP_ADDR_SHIFT 0
30138 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */
30139 +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */
30140 +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */
30141 +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/
30142 +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */
30143 +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
30144 +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
30145 +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */
30146 +#define IXGBE_MSCA_READ 0x08000000 /* OP CODE 10 (read) */
30147 +#define IXGBE_MSCA_READ_AUTOINC 0x0C000000 /* OP CODE 11 (read, auto inc)*/
30148 +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
30149 +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
30150 +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */
30151 +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */
30152 +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */
30153 +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */
30155 +/* MSRWD bit masks */
30156 +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
30157 +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
30158 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
30159 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16
30161 +/* Atlas registers */
30162 +#define IXGBE_ATLAS_PDN_LPBK 0x24
30163 +#define IXGBE_ATLAS_PDN_10G 0xB
30164 +#define IXGBE_ATLAS_PDN_1G 0xC
30165 +#define IXGBE_ATLAS_PDN_AN 0xD
30167 +/* Atlas bit masks */
30168 +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000
30169 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10
30170 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0
30171 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
30172 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
30174 +/* Omer bit masks */
30175 +#define IXGBE_CORECTL_WRITE_CMD 0x00010000
30177 +/* Device Type definitions for new protocol MDIO commands */
30178 +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
30179 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
30180 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
30181 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
30182 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
30183 +#define IXGBE_TWINAX_DEV 1
30185 +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
30187 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
30188 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
30189 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
30190 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */
30191 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
30192 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
30194 +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
30195 +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
30196 +#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
30197 +#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
30198 +#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
30199 +#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
30200 +#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
30201 +#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
30202 +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
30203 +#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
30204 +#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
30205 +#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */
30206 +#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */
30207 +#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
30208 +#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
30209 +#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
30210 +#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
30212 +#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
30213 +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
30214 +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
30215 +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
30217 +/* MII clause 22/28 definitions */
30218 +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
30220 +#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
30221 +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
30222 +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
30223 +#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */
30224 +#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
30225 +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
30226 +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
30227 +#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
30228 +#define IXGBE_MII_RESTART 0x200
30229 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20
30230 +#define IXGBE_MII_AUTONEG_LINK_UP 0x04
30231 +#define IXGBE_MII_AUTONEG_REG 0x0
30233 +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
30234 +#define IXGBE_MAX_PHY_ADDR 32
30237 +#define TN1010_PHY_ID 0x00A19410
30238 +#define TNX_FW_REV 0xB
30239 +#define AQ1002_PHY_ID 0x03A1B420
30240 +#define AQ_FW_REV 0x20
30241 +#define QT2022_PHY_ID 0x0043A400
30242 +#define ATH_PHY_ID 0x03429050
30245 +#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
30247 +/* Special PHY Init Routine */
30248 +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
30249 +#define IXGBE_PHY_INIT_END_NL 0xFFFF
30250 +#define IXGBE_CONTROL_MASK_NL 0xF000
30251 +#define IXGBE_DATA_MASK_NL 0x0FFF
30252 +#define IXGBE_CONTROL_SHIFT_NL 12
30253 +#define IXGBE_DELAY_NL 0
30254 +#define IXGBE_DATA_NL 1
30255 +#define IXGBE_CONTROL_NL 0x000F
30256 +#define IXGBE_CONTROL_EOL_NL 0x0FFF
30257 +#define IXGBE_CONTROL_SOL_NL 0x0000
30259 +/* General purpose Interrupt Enable */
30260 +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
30261 +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
30262 +#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
30263 +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
30264 +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
30265 +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
30266 +#define IXGBE_GPIE_EIAME 0x40000000
30267 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
30268 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
30269 +#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
30270 +#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
30271 +#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
30272 +#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */
30274 +/* Transmit Flow Control status */
30275 +#define IXGBE_TFCS_TXOFF 0x00000001
30276 +#define IXGBE_TFCS_TXOFF0 0x00000100
30277 +#define IXGBE_TFCS_TXOFF1 0x00000200
30278 +#define IXGBE_TFCS_TXOFF2 0x00000400
30279 +#define IXGBE_TFCS_TXOFF3 0x00000800
30280 +#define IXGBE_TFCS_TXOFF4 0x00001000
30281 +#define IXGBE_TFCS_TXOFF5 0x00002000
30282 +#define IXGBE_TFCS_TXOFF6 0x00004000
30283 +#define IXGBE_TFCS_TXOFF7 0x00008000
30286 +#define IXGBE_TCPTIMER_KS 0x00000100
30287 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200
30288 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400
30289 +#define IXGBE_TCPTIMER_LOOP 0x00000800
30290 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
30292 +/* HLREG0 Bit Masks */
30293 +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */
30294 +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */
30295 +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */
30296 +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */
30297 +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */
30298 +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */
30299 +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */
30300 +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */
30301 +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */
30302 +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */
30303 +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */
30304 +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */
30305 +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */
30306 +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */
30307 +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */
30309 +/* VMD_CTL bitmasks */
30310 +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001
30311 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
30313 +/* VT_CTL bitmasks */
30314 +#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */
30315 +#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */
30316 +#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */
30317 +#define IXGBE_VT_CTL_POOL_SHIFT 7
30318 +#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
30320 +/* VMOLR bitmasks */
30321 +#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
30322 +#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
30323 +#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
30324 +#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */
30325 +#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */
30327 +/* VFRE bitmask */
30328 +#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
30330 +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
30332 +/* RDHMPN and TDHMPN bitmasks */
30333 +#define IXGBE_RDHMPN_RDICADDR 0x007FF800
30334 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
30335 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
30336 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800
30337 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000
30338 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
30340 +#define IXGBE_RDMAM_MEM_SEL_SHIFT 13
30341 +#define IXGBE_RDMAM_DWORD_SHIFT 9
30342 +#define IXGBE_RDMAM_DESC_COMP_FIFO 1
30343 +#define IXGBE_RDMAM_DFC_CMD_FIFO 2
30344 +#define IXGBE_RDMAM_RSC_HEADER_ADDR 3
30345 +#define IXGBE_RDMAM_TCN_STATUS_RAM 4
30346 +#define IXGBE_RDMAM_WB_COLL_FIFO 5
30347 +#define IXGBE_RDMAM_QSC_CNT_RAM 6
30348 +#define IXGBE_RDMAM_QSC_FCOE_RAM 7
30349 +#define IXGBE_RDMAM_QSC_QUEUE_CNT 8
30350 +#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA
30351 +#define IXGBE_RDMAM_QSC_RSC_RAM 0xB
30352 +#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135
30353 +#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4
30354 +#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48
30355 +#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7
30356 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32
30357 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4
30358 +#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256
30359 +#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9
30360 +#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8
30361 +#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4
30362 +#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64
30363 +#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4
30364 +#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512
30365 +#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5
30366 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32
30367 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4
30368 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128
30369 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8
30370 +#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32
30371 +#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8
30373 +#define IXGBE_TXDESCIC_READY 0x80000000
30375 +/* Receive Checksum Control */
30376 +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
30377 +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
30379 +/* FCRTL Bit Masks */
30380 +#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */
30381 +#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */
30383 +/* PAP bit masks*/
30384 +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
30386 +/* RMCS Bit Masks */
30387 +#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */
30388 +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
30389 +#define IXGBE_RMCS_RAC 0x00000004
30390 +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
30391 +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */
30392 +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */
30393 +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
30395 +/* FCCFG Bit Masks */
30396 +#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */
30397 +#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */
30399 +/* Interrupt register bitmasks */
30401 +/* Extended Interrupt Cause Read */
30402 +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
30403 +#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */
30404 +#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */
30405 +#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
30406 +#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
30407 +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
30408 +#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
30409 +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
30410 +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
30411 +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
30412 +#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
30413 +#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
30414 +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
30415 +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
30416 +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
30417 +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
30419 +/* Extended Interrupt Cause Set */
30420 +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
30421 +#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
30422 +#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */
30423 +#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
30424 +#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
30425 +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
30426 +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
30427 +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
30428 +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
30429 +#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
30430 +#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
30431 +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
30432 +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
30433 +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
30434 +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
30436 +/* Extended Interrupt Mask Set */
30437 +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
30438 +#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
30439 +#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
30440 +#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
30441 +#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
30442 +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
30443 +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
30444 +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
30445 +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
30446 +#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
30447 +#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
30448 +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
30449 +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
30450 +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
30451 +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
30453 +/* Extended Interrupt Mask Clear */
30454 +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
30455 +#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
30456 +#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
30457 +#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
30458 +#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
30459 +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
30460 +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
30461 +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
30462 +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
30463 +#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
30464 +#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
30465 +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
30466 +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
30467 +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
30468 +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
30470 +#define IXGBE_EIMS_ENABLE_MASK ( \
30471 + IXGBE_EIMS_RTX_QUEUE | \
30472 + IXGBE_EIMS_LSC | \
30473 + IXGBE_EIMS_TCP_TIMER | \
30474 + IXGBE_EIMS_OTHER)
30476 +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
30477 +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
30478 +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
30479 +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
30480 +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
30481 +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
30482 +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
30483 +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
30484 +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
30485 +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
30486 +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */
30487 +#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */
30488 +#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */
30489 +#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */
30490 +#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */
30491 +#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */
30492 +#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */
30493 +#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */
30494 +#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */
30495 +#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */
30496 +#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */
30497 +#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */
30498 +#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */
30499 +#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */
30501 +#define IXGBE_MAX_FTQF_FILTERS 128
30502 +#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003
30503 +#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000
30504 +#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001
30505 +#define IXGBE_FTQF_PROTOCOL_SCTP 2
30506 +#define IXGBE_FTQF_PRIORITY_MASK 0x00000007
30507 +#define IXGBE_FTQF_PRIORITY_SHIFT 2
30508 +#define IXGBE_FTQF_POOL_MASK 0x0000003F
30509 +#define IXGBE_FTQF_POOL_SHIFT 8
30510 +#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F
30511 +#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25
30512 +#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E
30513 +#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D
30514 +#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B
30515 +#define IXGBE_FTQF_DEST_PORT_MASK 0x17
30516 +#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F
30517 +#define IXGBE_FTQF_POOL_MASK_EN 0x40000000
30518 +#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000
30520 +/* Interrupt clear mask */
30521 +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF
30523 +/* Interrupt Vector Allocation Registers */
30524 +#define IXGBE_IVAR_REG_NUM 25
30525 +#define IXGBE_IVAR_REG_NUM_82599 64
30526 +#define IXGBE_IVAR_TXRX_ENTRY 96
30527 +#define IXGBE_IVAR_RX_ENTRY 64
30528 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i))
30529 +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i))
30530 +#define IXGBE_IVAR_TX_ENTRY 32
30532 +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */
30533 +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */
30535 +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i))
30537 +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
30539 +/* ETYPE Queue Filter/Select Bit Masks */
30540 +#define IXGBE_MAX_ETQF_FILTERS 8
30541 +#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
30542 +#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
30543 +#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
30544 +#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
30545 +#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
30547 +#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
30548 +#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
30549 +#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */
30550 +#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */
30553 + * ETQF filter list: one static filter per filter consumer. This is
30554 + * to avoid filter collisions later. Add new filters
30557 + * Current filters:
30558 + * EAPOL 802.1x (0x888e): Filter 0
30559 + * FCoE (0x8906): Filter 2
30560 + * 1588 (0x88f7): Filter 3
30561 + * FIP (0x8914): Filter 4
30563 +#define IXGBE_ETQF_FILTER_EAPOL 0
30564 +#define IXGBE_ETQF_FILTER_FCOE 2
30565 +#define IXGBE_ETQF_FILTER_1588 3
30566 +#define IXGBE_ETQF_FILTER_FIP 4
30567 +/* VLAN Control Bit Masks */
30568 +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
30569 +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
30570 +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */
30571 +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
30572 +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
30574 +/* VLAN pool filtering masks */
30575 +#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
30576 +#define IXGBE_VLVF_ENTRIES 64
30577 +#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
30578 +/* Per VF Port VLAN insertion rules */
30579 +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
30580 +#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
30582 +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
30584 +/* STATUS Bit Masks */
30585 +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
30586 +#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
30587 +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
30589 +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
30590 +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
30592 +/* ESDP Bit Masks */
30593 +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
30594 +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
30595 +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
30596 +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
30597 +#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
30598 +#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
30599 +#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
30600 +#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
30601 +#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
30603 +/* LEDCTL Bit Masks */
30604 +#define IXGBE_LED_IVRT_BASE 0x00000040
30605 +#define IXGBE_LED_BLINK_BASE 0x00000080
30606 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
30607 +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
30608 +#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
30609 +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
30610 +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
30611 +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
30614 +#define IXGBE_LED_LINK_UP 0x0
30615 +#define IXGBE_LED_LINK_10G 0x1
30616 +#define IXGBE_LED_MAC 0x2
30617 +#define IXGBE_LED_FILTER 0x3
30618 +#define IXGBE_LED_LINK_ACTIVE 0x4
30619 +#define IXGBE_LED_LINK_1G 0x5
30620 +#define IXGBE_LED_ON 0xE
30621 +#define IXGBE_LED_OFF 0xF
30623 +/* AUTOC Bit Masks */
30624 +#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
30625 +#define IXGBE_AUTOC_KX4_SUPP 0x80000000
30626 +#define IXGBE_AUTOC_KX_SUPP 0x40000000
30627 +#define IXGBE_AUTOC_PAUSE 0x30000000
30628 +#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
30629 +#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
30630 +#define IXGBE_AUTOC_RF 0x08000000
30631 +#define IXGBE_AUTOC_PD_TMR 0x06000000
30632 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
30633 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
30634 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
30635 +#define IXGBE_AUTOC_FECA 0x00040000
30636 +#define IXGBE_AUTOC_FECR 0x00020000
30637 +#define IXGBE_AUTOC_KR_SUPP 0x00010000
30638 +#define IXGBE_AUTOC_AN_RESTART 0x00001000
30639 +#define IXGBE_AUTOC_FLU 0x00000001
30640 +#define IXGBE_AUTOC_LMS_SHIFT 13
30641 +#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT)
30642 +#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT)
30643 +#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT)
30644 +#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
30645 +#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT)
30646 +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
30647 +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
30648 +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
30649 +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
30650 +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
30651 +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
30652 +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
30654 +#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200
30655 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
30656 +#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
30657 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
30658 +#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
30659 +#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
30660 +#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
30661 +#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
30662 +#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
30663 +#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
30664 +#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
30666 +#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
30667 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
30668 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
30669 +#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
30670 +#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
30671 +#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
30674 +/* LINKS Bit Masks */
30675 +#define IXGBE_LINKS_KX_AN_COMP 0x80000000
30676 +#define IXGBE_LINKS_UP 0x40000000
30677 +#define IXGBE_LINKS_SPEED 0x20000000
30678 +#define IXGBE_LINKS_MODE 0x18000000
30679 +#define IXGBE_LINKS_RX_MODE 0x06000000
30680 +#define IXGBE_LINKS_TX_MODE 0x01800000
30681 +#define IXGBE_LINKS_XGXS_EN 0x00400000
30682 +#define IXGBE_LINKS_SGMII_EN 0x02000000
30683 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000
30684 +#define IXGBE_LINKS_1G_AN_EN 0x00100000
30685 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000
30686 +#define IXGBE_LINKS_1G_SYNC 0x00040000
30687 +#define IXGBE_LINKS_10G_ALIGN 0x00020000
30688 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
30689 +#define IXGBE_LINKS_TL_FAULT 0x00001000
30690 +#define IXGBE_LINKS_SIGNAL 0x00000F00
30692 +#define IXGBE_LINKS_SPEED_82599 0x30000000
30693 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
30694 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
30695 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000
30696 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
30697 +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
30699 +#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
30701 +/* PCS1GLSTA Bit Masks */
30702 +#define IXGBE_PCS1GLSTA_LINK_OK 1
30703 +#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
30704 +#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000
30705 +#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000
30706 +#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000
30707 +#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
30708 +#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000
30710 +#define IXGBE_PCS1GANA_SYM_PAUSE 0x80
30711 +#define IXGBE_PCS1GANA_ASM_PAUSE 0x100
30713 +/* PCS1GLCTL Bit Masks */
30714 +#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
30715 +#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1
30716 +#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20
30717 +#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
30718 +#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
30719 +#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
30721 +/* ANLP1 Bit Masks */
30722 +#define IXGBE_ANLP1_PAUSE 0x0C00
30723 +#define IXGBE_ANLP1_SYM_PAUSE 0x0400
30724 +#define IXGBE_ANLP1_ASM_PAUSE 0x0800
30725 +#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
30727 +/* SW Semaphore Register bitmasks */
30728 +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
30729 +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
30730 +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
30731 +#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
30733 +/* SW_FW_SYNC/GSSR definitions */
30734 +#define IXGBE_GSSR_EEP_SM 0x0001
30735 +#define IXGBE_GSSR_PHY0_SM 0x0002
30736 +#define IXGBE_GSSR_PHY1_SM 0x0004
30737 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008
30738 +#define IXGBE_GSSR_FLASH_SM 0x0010
30740 +/* EEC Register */
30741 +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
30742 +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */
30743 +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */
30744 +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */
30745 +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */
30746 +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */
30747 +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */
30748 +#define IXGBE_EEC_FWE_SHIFT 4
30749 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */
30750 +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
30751 +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
30752 +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
30753 +#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
30754 +#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
30755 +/* EEPROM Addressing bits based on type (0-small, 1-large) */
30756 +#define IXGBE_EEC_ADDR_SIZE 0x00000400
30757 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
30759 +#define IXGBE_EEC_SIZE_SHIFT 11
30760 +#define IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT 6
30761 +#define IXGBE_EEPROM_OPCODE_BITS 8
30763 +/* Checksum and EEPROM pointers */
30764 +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
30765 +#define IXGBE_EEPROM_CHECKSUM 0x3F
30766 +#define IXGBE_EEPROM_SUM 0xBABA
30767 +#define IXGBE_PCIE_ANALOG_PTR 0x03
30768 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04
30769 +#define IXGBE_PHY_PTR 0x04
30770 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05
30771 +#define IXGBE_OPTION_ROM_PTR 0x05
30772 +#define IXGBE_PCIE_GENERAL_PTR 0x06
30773 +#define IXGBE_PCIE_CONFIG0_PTR 0x07
30774 +#define IXGBE_PCIE_CONFIG1_PTR 0x08
30775 +#define IXGBE_CORE0_PTR 0x09
30776 +#define IXGBE_CORE1_PTR 0x0A
30777 +#define IXGBE_MAC0_PTR 0x0B
30778 +#define IXGBE_MAC1_PTR 0x0C
30779 +#define IXGBE_CSR0_CONFIG_PTR 0x0D
30780 +#define IXGBE_CSR1_CONFIG_PTR 0x0E
30781 +#define IXGBE_FW_PTR 0x0F
30782 +#define IXGBE_PBANUM0_PTR 0x15
30783 +#define IXGBE_PBANUM1_PTR 0x16
30784 +#define IXGBE_SAN_MAC_ADDR_PTR 0x28
30785 +#define IXGBE_DEVICE_CAPS 0x2C
30786 +#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
30787 +#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
30788 +#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
30790 +/* MSI-X capability fields masks */
30791 +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
30793 +/* Legacy EEPROM word offsets */
30794 +#define IXGBE_ISCSI_BOOT_CAPS 0x0033
30795 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
30796 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
30798 +/* EEPROM Commands - SPI */
30799 +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
30800 +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
30801 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
30802 +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
30803 +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
30804 +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
30805 +/* EEPROM reset Write Enable latch */
30806 +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
30807 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
30808 +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
30809 +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
30810 +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
30811 +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
30813 +/* EEPROM Read Register */
30814 +#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
30815 +#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
30816 +#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
30817 +#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
30818 +#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
30819 +#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
30821 +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
30823 +#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
30824 +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
30827 +#ifndef IXGBE_EERD_EEWR_ATTEMPTS
30828 +/* Number of 5 microseconds we wait for EERD read and
30829 + * EERW write to complete */
30830 +#define IXGBE_EERD_EEWR_ATTEMPTS 100000
30833 +#ifndef IXGBE_FLUDONE_ATTEMPTS
30834 +/* # attempts we wait for flush update to complete */
30835 +#define IXGBE_FLUDONE_ATTEMPTS 20000
30838 +#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
30839 +#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
30840 +#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
30841 +#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
30843 +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
30844 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
30845 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
30846 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
30847 +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
30848 +#define IXGBE_FW_PATCH_VERSION_4 0x7
30849 +#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
30850 +#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
30851 +#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
30852 +#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
30853 +#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
30854 +#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
30855 +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
30856 +#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
30857 +#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
30858 +#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
30859 +#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
30860 +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
30861 +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
30863 +/* PCI Bus Info */
30864 +#define IXGBE_PCI_DEVICE_STATUS 0xAA
30865 +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
30866 +#define IXGBE_PCI_LINK_STATUS 0xB2
30867 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
30868 +#define IXGBE_PCI_LINK_WIDTH 0x3F0
30869 +#define IXGBE_PCI_LINK_WIDTH_1 0x10
30870 +#define IXGBE_PCI_LINK_WIDTH_2 0x20
30871 +#define IXGBE_PCI_LINK_WIDTH_4 0x40
30872 +#define IXGBE_PCI_LINK_WIDTH_8 0x80
30873 +#define IXGBE_PCI_LINK_SPEED 0xF
30874 +#define IXGBE_PCI_LINK_SPEED_2500 0x1
30875 +#define IXGBE_PCI_LINK_SPEED_5000 0x2
30876 +#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
30877 +#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
30878 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
30880 +/* Number of 100 microseconds we wait for PCI Express master disable */
30881 +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
30883 +/* Check whether address is multicast. This is little-endian specific check.*/
30884 +#define IXGBE_IS_MULTICAST(Address) \
30885 + (bool)(((u8 *)(Address))[0] & ((u8)0x01))
30887 +/* Check whether an address is broadcast. */
30888 +#define IXGBE_IS_BROADCAST(Address) \
30889 + ((((u8 *)(Address))[0] == ((u8)0xff)) && \
30890 + (((u8 *)(Address))[1] == ((u8)0xff)))
30893 +#define IXGBE_RAH_VIND_MASK 0x003C0000
30894 +#define IXGBE_RAH_VIND_SHIFT 18
30895 +#define IXGBE_RAH_AV 0x80000000
30896 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
30898 +/* Header split receive */
30899 +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
30900 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
30901 +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
30902 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040
30903 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080
30904 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
30905 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8
30906 +#define IXGBE_RFCTL_NFS_VER_2 0
30907 +#define IXGBE_RFCTL_NFS_VER_3 1
30908 +#define IXGBE_RFCTL_NFS_VER_4 2
30909 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400
30910 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800
30911 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000
30912 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000
30913 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
30915 +/* Transmit Config masks */
30916 +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
30917 +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */
30918 +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
30919 +/* Enable short packet padding to 64 bytes */
30920 +#define IXGBE_TX_PAD_ENABLE 0x00000400
30921 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
30922 +/* This allows for 16K packets + 4k for vlan */
30923 +#define IXGBE_MAX_FRAME_SZ 0x40040000
30925 +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
30926 +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
30928 +/* Receive Config masks */
30929 +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
30930 +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
30931 +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
30932 +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
30934 +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
30935 +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
30936 +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
30937 +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
30938 +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
30939 +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
30940 +/* Receive Priority Flow Control Enable */
30941 +#define IXGBE_FCTRL_RPFCE 0x00004000
30942 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
30943 +#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */
30944 +#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
30945 +#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
30946 +#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
30948 +/* Multiple Receive Queue Control */
30949 +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
30950 +#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */
30951 +#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */
30952 +#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */
30953 +#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */
30954 +#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */
30955 +#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */
30956 +#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */
30957 +#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
30958 +#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
30959 +#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
30960 +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
30961 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
30962 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
30963 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
30964 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000
30965 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000
30966 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
30967 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
30968 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
30969 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
30970 +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
30972 +/* Queue Drop Enable */
30973 +#define IXGBE_QDE_ENABLE 0x00000001
30974 +#define IXGBE_QDE_IDX_MASK 0x00007F00
30975 +#define IXGBE_QDE_IDX_SHIFT 8
30977 +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
30978 +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
30979 +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
30980 +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
30981 +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
30982 +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
30983 +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
30984 +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
30985 +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
30987 +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
30988 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
30989 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
30990 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
30991 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
30992 +/* Multiple Transmit Queue Command Register */
30993 +#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
30994 +#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
30995 +#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
30996 +#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
30997 +#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
30998 +#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */
30999 +#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
31001 +/* Receive Descriptor bit definitions */
31002 +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
31003 +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
31004 +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
31005 +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
31006 +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
31007 +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
31008 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
31009 +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
31010 +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
31011 +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
31012 +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
31013 +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
31014 +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
31015 +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
31016 +#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */
31017 +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
31018 +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
31019 +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
31020 +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
31021 +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
31022 +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
31023 +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
31024 +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
31025 +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
31026 +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
31027 +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
31028 +#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
31029 +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
31030 +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
31031 +#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
31032 +#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
31033 +#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
31034 +#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */
31035 +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
31036 +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
31037 +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
31038 +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
31039 +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
31040 +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
31041 +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
31042 +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
31043 +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
31044 +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
31045 +#define IXGBE_RXD_PRI_SHIFT 13
31046 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
31047 +#define IXGBE_RXD_CFI_SHIFT 12
31049 +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
31050 +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
31051 +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
31052 +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
31053 +#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */
31054 +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
31055 +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
31056 +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
31057 +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
31058 +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
31059 +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
31061 +/* PSRTYPE bit definitions */
31062 +#define IXGBE_PSRTYPE_TCPHDR 0x00000010
31063 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020
31064 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
31065 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
31066 +#define IXGBE_PSRTYPE_L2HDR 0x00001000
31068 +/* SRRCTL bit definitions */
31069 +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
31070 +#define IXGBE_SRRCTL_RDMTS_SHIFT 22
31071 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
31072 +#define IXGBE_SRRCTL_DROP_EN 0x10000000
31073 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
31074 +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
31075 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
31076 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
31077 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
31078 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
31079 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
31080 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
31082 +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
31083 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
31085 +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
31086 +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
31087 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
31088 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
31089 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
31090 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17
31091 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
31092 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
31093 +#define IXGBE_RXDADV_SPH 0x8000
31095 +/* RSS Hash results */
31096 +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
31097 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
31098 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
31099 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
31100 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
31101 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
31102 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
31103 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
31104 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
31105 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
31107 +/* RSS Packet Types as indicated in the receive descriptor. */
31108 +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000
31109 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */
31110 +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */
31111 +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */
31112 +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */
31113 +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
31114 +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
31115 +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
31116 +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
31117 +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
31118 +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
31119 +#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
31120 +#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
31121 +#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
31122 +#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
31124 +/* Security Processing bit Indication */
31125 +#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
31126 +#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
31127 +#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
31128 +#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
31129 +#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
31131 +/* Masks to determine if packets should be dropped due to frame errors */
31132 +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
31133 + IXGBE_RXD_ERR_CE | \
31134 + IXGBE_RXD_ERR_LE | \
31135 + IXGBE_RXD_ERR_PE | \
31136 + IXGBE_RXD_ERR_OSE | \
31137 + IXGBE_RXD_ERR_USE)
31139 +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
31140 + IXGBE_RXDADV_ERR_CE | \
31141 + IXGBE_RXDADV_ERR_LE | \
31142 + IXGBE_RXDADV_ERR_PE | \
31143 + IXGBE_RXDADV_ERR_OSE | \
31144 + IXGBE_RXDADV_ERR_USE)
31146 +/* Multicast bit mask */
31147 +#define IXGBE_MCSTCTRL_MFE 0x4
31149 +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
31150 +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
31151 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
31152 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
31154 +/* Vlan-specific macros */
31155 +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */
31156 +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */
31157 +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
31158 +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
31160 +/* SR-IOV specific macros */
31161 +#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
31162 +#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
31163 +#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
31164 +#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
31165 +/* Translated register #defines */
31166 +#define IXGBE_PVFCTRL(P) (0x00300 + (4 * P))
31167 +#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * P))
31168 +#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * P))
31169 +#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * P))
31170 +#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * P))
31171 +#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * P))
31172 +#define IXGBE_PVTEICR(P) (0x00B00 + (4 * P))
31173 +#define IXGBE_PVTEICS(P) (0x00C00 + (4 * P))
31174 +#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * P))
31175 +#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * P))
31176 +#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * P))
31177 +#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * P))
31178 +#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \
31179 + (0x012300 + (((P) - 24) * 4)))
31180 +#define IXGBE_PVTIVAR(P) (0x12500 + (4 * P))
31181 +#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * P))
31182 +#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * P))
31183 +#define IXGBE_VFPBACL(P) (0x110C8 + (4 * P))
31184 +#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * P)) \
31185 + : (0x0D000 + (0x40 * (P - 64))))
31186 +#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * P)) \
31187 + : (0x0D004 + (0x40 * (P - 64))))
31188 +#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * P)) \
31189 + : (0x0D008 + (0x40 * (P - 64))))
31190 +#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * P)) \
31191 + : (0x0D010 + (0x40 * (P - 64))))
31192 +#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * P)) \
31193 + : (0x0D018 + (0x40 * (P - 64))))
31194 +#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * P)) \
31195 + : (0x0D028 + (0x40 * (P - 64))))
31196 +#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * P)) \
31197 + : (0x0D014 + (0x40 * (P - 64))))
31198 +#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * P))
31199 +#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * P))
31200 +#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * P))
31201 +#define IXGBE_PVFTTDLEN(P) (0x06008 + (0x40 * P))
31202 +#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * P))
31203 +#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * P))
31204 +#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * P))
31205 +#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * P))
31206 +#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * P))
31207 +#define IXGBE_PVFDCA_RXCTRL(P) ((P < 64) ? (0x0100C + (0x40 * P)) \
31208 + : (0x0D00C + (0x40 * (P - 64))))
31209 +#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * P))
31210 +#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * x))
31211 +#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * x))
31212 +#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * x))
31213 +#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * x))
31214 +#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * x))
31215 +#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * x))
31216 +#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * x))
31218 +/* Little Endian defines */
31220 +#define __le16 u16
31223 +#define __le32 u32
31226 +#define __le64 u64
31230 +/* Big Endian defines */
31231 +#define __be16 u16
31232 +#define __be32 u32
31233 +#define __be64 u64
31236 +enum ixgbe_fdir_pballoc_type {
31237 + IXGBE_FDIR_PBALLOC_64K = 0,
31238 + IXGBE_FDIR_PBALLOC_128K,
31239 + IXGBE_FDIR_PBALLOC_256K,
31241 +#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16
31243 +/* Flow Director register values */
31244 +#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001
31245 +#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002
31246 +#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003
31247 +#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008
31248 +#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010
31249 +#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020
31250 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
31251 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
31252 +#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
31253 +#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
31254 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
31255 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
31256 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28
31258 +#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16
31259 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16
31260 +#define IXGBE_FDIRIP6M_DIPM_SHIFT 16
31261 +#define IXGBE_FDIRM_VLANID 0x00000001
31262 +#define IXGBE_FDIRM_VLANP 0x00000002
31263 +#define IXGBE_FDIRM_POOL 0x00000004
31264 +#define IXGBE_FDIRM_L4P 0x00000008
31265 +#define IXGBE_FDIRM_FLEX 0x00000010
31266 +#define IXGBE_FDIRM_DIPv6 0x00000020
31268 +#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
31269 +#define IXGBE_FDIRFREE_FREE_SHIFT 0
31270 +#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000
31271 +#define IXGBE_FDIRFREE_COLL_SHIFT 16
31272 +#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F
31273 +#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0
31274 +#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000
31275 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16
31276 +#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF
31277 +#define IXGBE_FDIRUSTAT_ADD_SHIFT 0
31278 +#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000
31279 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16
31280 +#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF
31281 +#define IXGBE_FDIRFSTAT_FADD_SHIFT 0
31282 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00
31283 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8
31284 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16
31285 +#define IXGBE_FDIRVLAN_FLEX_SHIFT 16
31286 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15
31287 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16
31289 +#define IXGBE_FDIRCMD_CMD_MASK 0x00000003
31290 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
31291 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
31292 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
31293 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH 0x00000007
31294 +#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
31295 +#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
31296 +#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
31297 +#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040
31298 +#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060
31299 +#define IXGBE_FDIRCMD_IPV6 0x00000080
31300 +#define IXGBE_FDIRCMD_CLEARHT 0x00000100
31301 +#define IXGBE_FDIRCMD_DROP 0x00000200
31302 +#define IXGBE_FDIRCMD_INT 0x00000400
31303 +#define IXGBE_FDIRCMD_LAST 0x00000800
31304 +#define IXGBE_FDIRCMD_COLLISION 0x00001000
31305 +#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
31306 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
31307 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
31308 +#define IXGBE_FDIR_INIT_DONE_POLL 10
31309 +#define IXGBE_FDIRCMD_CMD_POLL 10
31311 +/* Transmit Descriptor - Legacy */
31312 +struct ixgbe_legacy_tx_desc {
31313 + u64 buffer_addr; /* Address of the descriptor's data buffer */
31317 + __le16 length; /* Data buffer length */
31318 + u8 cso; /* Checksum offset */
31319 + u8 cmd; /* Descriptor control */
31325 + u8 status; /* Descriptor status */
31326 + u8 css; /* Checksum start */
31332 +/* Transmit Descriptor - Advanced */
31333 +union ixgbe_adv_tx_desc {
31335 + __le64 buffer_addr; /* Address of descriptor's data buf */
31336 + __le32 cmd_type_len;
31337 + __le32 olinfo_status;
31340 + __le64 rsvd; /* Reserved */
31341 + __le32 nxtseq_seed;
31346 +/* Receive Descriptor - Legacy */
31347 +struct ixgbe_legacy_rx_desc {
31348 + __le64 buffer_addr; /* Address of the descriptor's data buffer */
31349 + __le16 length; /* Length of data DMAed into data buffer */
31350 + __le16 csum; /* Packet checksum */
31351 + u8 status; /* Descriptor status */
31352 + u8 errors; /* Descriptor Errors */
31356 +/* Receive Descriptor - Advanced */
31357 +union ixgbe_adv_rx_desc {
31359 + __le64 pkt_addr; /* Packet buffer address */
31360 + __le64 hdr_addr; /* Header buffer address */
31367 + __le16 pkt_info; /* RSS, Pkt type */
31368 + __le16 hdr_info; /* Splithdr, hdrlen */
31372 + __le32 rss; /* RSS Hash */
31374 + __le16 ip_id; /* IP id */
31375 + __le16 csum; /* Packet Checksum */
31380 + __le32 status_error; /* ext status/error */
31381 + __le16 length; /* Packet length */
31382 + __le16 vlan; /* VLAN tag */
31384 + } wb; /* writeback */
31387 +/* Context descriptors */
31388 +struct ixgbe_adv_tx_context_desc {
31389 + __le32 vlan_macip_lens;
31390 + __le32 seqnum_seed;
31391 + __le32 type_tucmd_mlhl;
31392 + __le32 mss_l4len_idx;
31395 +/* Adv Transmit Descriptor Config Masks */
31396 +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
31397 +#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
31398 +#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
31399 +#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
31400 +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
31401 +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
31402 +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
31403 +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
31404 +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
31405 +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
31406 +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
31407 +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
31408 +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
31409 +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
31410 +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
31411 +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
31412 +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
31413 +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
31414 +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
31415 +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
31416 +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
31417 + IXGBE_ADVTXD_POPTS_SHIFT)
31418 +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
31419 + IXGBE_ADVTXD_POPTS_SHIFT)
31420 +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
31421 +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
31422 +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
31423 +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */
31424 +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
31425 +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
31426 +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
31427 +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
31428 +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
31429 +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
31430 +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
31431 +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
31432 +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
31433 +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
31434 +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
31435 +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
31436 +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
31437 +#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
31438 +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
31439 +#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
31440 +#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
31441 +#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */
31442 +#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */
31443 +#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
31444 +#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
31445 +#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
31446 +#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
31447 +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
31448 +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
31450 +/* Autonegotiation advertised speeds */
31451 +typedef u32 ixgbe_autoneg_advertised;
31453 +typedef u32 ixgbe_link_speed;
31454 +#define IXGBE_LINK_SPEED_UNKNOWN 0
31455 +#define IXGBE_LINK_SPEED_100_FULL 0x0008
31456 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
31457 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
31458 +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
31459 + IXGBE_LINK_SPEED_10GB_FULL)
31460 +#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
31461 + IXGBE_LINK_SPEED_1GB_FULL | \
31462 + IXGBE_LINK_SPEED_10GB_FULL)
31465 +/* Physical layer type */
31466 +typedef u32 ixgbe_physical_layer;
31467 +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
31468 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
31469 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
31470 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
31471 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
31472 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
31473 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
31474 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
31475 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
31476 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
31477 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
31478 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
31479 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
31480 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
31481 +#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
31484 +/* Software ATR hash keys */
31485 +#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
31486 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
31488 +/* Software ATR input stream offsets and masks */
31489 +#define IXGBE_ATR_VLAN_OFFSET 0
31490 +#define IXGBE_ATR_SRC_IPV6_OFFSET 2
31491 +#define IXGBE_ATR_SRC_IPV4_OFFSET 14
31492 +#define IXGBE_ATR_DST_IPV6_OFFSET 18
31493 +#define IXGBE_ATR_DST_IPV4_OFFSET 30
31494 +#define IXGBE_ATR_SRC_PORT_OFFSET 34
31495 +#define IXGBE_ATR_DST_PORT_OFFSET 36
31496 +#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
31497 +#define IXGBE_ATR_VM_POOL_OFFSET 40
31498 +#define IXGBE_ATR_L4TYPE_OFFSET 41
31500 +#define IXGBE_ATR_L4TYPE_MASK 0x3
31501 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
31502 +#define IXGBE_ATR_L4TYPE_UDP 0x1
31503 +#define IXGBE_ATR_L4TYPE_TCP 0x2
31504 +#define IXGBE_ATR_L4TYPE_SCTP 0x3
31505 +#define IXGBE_ATR_HASH_MASK 0x7fff
31507 +/* Flow Director ATR input struct. */
31508 +struct ixgbe_atr_input {
31509 + /* Byte layout in order, all values with MSB first:
31511 + * vlan_id - 2 bytes
31512 + * src_ip - 16 bytes
31513 + * dst_ip - 16 bytes
31514 + * src_port - 2 bytes
31515 + * dst_port - 2 bytes
31516 + * flex_bytes - 2 bytes
31517 + * vm_pool - 1 byte
31518 + * l4type - 1 byte
31520 + u8 byte_stream[42];
31523 +struct ixgbe_atr_input_masks {
31526 + u16 src_port_mask;
31527 + u16 dst_port_mask;
31528 + u16 vlan_id_mask;
31533 + * Unavailable: The FCoE Boot Option ROM is not present in the flash.
31534 + * Disabled: Present; boot order is not set for any targets on the port.
31535 + * Enabled: Present; boot order is set for at least one target on the port.
31537 +enum ixgbe_fcoe_boot_status {
31538 + ixgbe_fcoe_bootstatus_disabled = 0,
31539 + ixgbe_fcoe_bootstatus_enabled = 1,
31540 + ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
31543 +enum ixgbe_eeprom_type {
31544 + ixgbe_eeprom_uninitialized = 0,
31545 + ixgbe_eeprom_spi,
31547 + ixgbe_eeprom_none /* No NVM support */
31550 +enum ixgbe_mac_type {
31551 + ixgbe_mac_unknown = 0,
31552 + ixgbe_mac_82598EB,
31553 + ixgbe_mac_82599EB,
31557 +enum ixgbe_phy_type {
31558 + ixgbe_phy_unknown = 0,
31562 + ixgbe_phy_cu_unknown,
31566 + ixgbe_phy_sfp_passive_tyco,
31567 + ixgbe_phy_sfp_passive_unknown,
31568 + ixgbe_phy_sfp_active_unknown,
31569 + ixgbe_phy_sfp_avago,
31570 + ixgbe_phy_sfp_ftl,
31571 + ixgbe_phy_sfp_ftl_active,
31572 + ixgbe_phy_sfp_unknown,
31573 + ixgbe_phy_sfp_intel,
31574 + ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
31575 + ixgbe_phy_generic
31579 + * SFP+ module type IDs:
31586 + * 3 SFP_DA_CU_CORE0 - 82599-specific
31587 + * 4 SFP_DA_CU_CORE1 - 82599-specific
31588 + * 5 SFP_SR/LR_CORE0 - 82599-specific
31589 + * 6 SFP_SR/LR_CORE1 - 82599-specific
31591 +enum ixgbe_sfp_type {
31592 + ixgbe_sfp_type_da_cu = 0,
31593 + ixgbe_sfp_type_sr = 1,
31594 + ixgbe_sfp_type_lr = 2,
31595 + ixgbe_sfp_type_da_cu_core0 = 3,
31596 + ixgbe_sfp_type_da_cu_core1 = 4,
31597 + ixgbe_sfp_type_srlr_core0 = 5,
31598 + ixgbe_sfp_type_srlr_core1 = 6,
31599 + ixgbe_sfp_type_da_act_lmt_core0 = 7,
31600 + ixgbe_sfp_type_da_act_lmt_core1 = 8,
31601 + ixgbe_sfp_type_1g_cu_core0 = 9,
31602 + ixgbe_sfp_type_1g_cu_core1 = 10,
31603 + ixgbe_sfp_type_not_present = 0xFFFE,
31604 + ixgbe_sfp_type_unknown = 0xFFFF
31607 +enum ixgbe_media_type {
31608 + ixgbe_media_type_unknown = 0,
31609 + ixgbe_media_type_fiber,
31610 + ixgbe_media_type_copper,
31611 + ixgbe_media_type_backplane,
31612 + ixgbe_media_type_cx4,
31613 + ixgbe_media_type_virtual
31616 +/* Flow Control Settings */
31617 +enum ixgbe_fc_mode {
31618 + ixgbe_fc_none = 0,
31619 + ixgbe_fc_rx_pause,
31620 + ixgbe_fc_tx_pause,
31628 +/* Smart Speed Settings */
31629 +#define IXGBE_SMARTSPEED_MAX_RETRIES 3
31630 +enum ixgbe_smart_speed {
31631 + ixgbe_smart_speed_auto = 0,
31632 + ixgbe_smart_speed_on,
31633 + ixgbe_smart_speed_off
31636 +/* PCI bus types */
31637 +enum ixgbe_bus_type {
31638 + ixgbe_bus_type_unknown = 0,
31639 + ixgbe_bus_type_pci,
31640 + ixgbe_bus_type_pcix,
31641 + ixgbe_bus_type_pci_express,
31642 + ixgbe_bus_type_reserved
31645 +/* PCI bus speeds */
31646 +enum ixgbe_bus_speed {
31647 + ixgbe_bus_speed_unknown = 0,
31648 + ixgbe_bus_speed_33 = 33,
31649 + ixgbe_bus_speed_66 = 66,
31650 + ixgbe_bus_speed_100 = 100,
31651 + ixgbe_bus_speed_120 = 120,
31652 + ixgbe_bus_speed_133 = 133,
31653 + ixgbe_bus_speed_2500 = 2500,
31654 + ixgbe_bus_speed_5000 = 5000,
31655 + ixgbe_bus_speed_reserved
31658 +/* PCI bus widths */
31659 +enum ixgbe_bus_width {
31660 + ixgbe_bus_width_unknown = 0,
31661 + ixgbe_bus_width_pcie_x1 = 1,
31662 + ixgbe_bus_width_pcie_x2 = 2,
31663 + ixgbe_bus_width_pcie_x4 = 4,
31664 + ixgbe_bus_width_pcie_x8 = 8,
31665 + ixgbe_bus_width_32 = 32,
31666 + ixgbe_bus_width_64 = 64,
31667 + ixgbe_bus_width_reserved
31670 +struct ixgbe_addr_filter_info {
31671 + u32 num_mc_addrs;
31672 + u32 rar_used_count;
31674 + u32 overflow_promisc;
31675 + bool user_set_promisc;
31678 +/* Bus parameters */
31679 +struct ixgbe_bus_info {
31680 + enum ixgbe_bus_speed speed;
31681 + enum ixgbe_bus_width width;
31682 + enum ixgbe_bus_type type;
31688 +/* Flow control parameters */
31689 +struct ixgbe_fc_info {
31690 + u32 high_water; /* Flow Control High-water */
31691 + u32 low_water; /* Flow Control Low-water */
31692 + u16 pause_time; /* Flow Control Pause timer */
31693 + bool send_xon; /* Flow control send XON */
31694 + bool strict_ieee; /* Strict IEEE mode */
31695 + bool disable_fc_autoneg; /* Do not autonegotiate FC */
31696 + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
31697 + enum ixgbe_fc_mode current_mode; /* FC mode in effect */
31698 + enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
31701 +/* Statistics counters collected by the MAC */
31702 +struct ixgbe_hw_stats {
31757 + u64 pxon2offc[8];
31758 + u64 fdirustat_add;
31759 + u64 fdirustat_remove;
31760 + u64 fdirfstat_fadd;
31761 + u64 fdirfstat_fremove;
31773 +/* forward declaration */
31776 +/* iterator type for walking multicast address lists */
31777 +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
31780 +/* Function pointer table */
31781 +struct ixgbe_eeprom_operations {
31782 + s32 (*init_params)(struct ixgbe_hw *);
31783 + s32 (*read)(struct ixgbe_hw *, u16, u16 *);
31784 + s32 (*write)(struct ixgbe_hw *, u16, u16);
31785 + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
31786 + s32 (*update_checksum)(struct ixgbe_hw *);
31787 + u16 (*calc_checksum)(struct ixgbe_hw *);
31790 +struct ixgbe_mac_operations {
31791 + s32 (*init_hw)(struct ixgbe_hw *);
31792 + s32 (*reset_hw)(struct ixgbe_hw *);
31793 + s32 (*start_hw)(struct ixgbe_hw *);
31794 + s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
31795 + void (*enable_relaxed_ordering)(struct ixgbe_hw *);
31796 + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
31797 + u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
31798 + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
31799 + s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
31800 + s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
31801 + s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
31802 + s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
31803 + s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *);
31804 + s32 (*stop_adapter)(struct ixgbe_hw *);
31805 + s32 (*get_bus_info)(struct ixgbe_hw *);
31806 + void (*set_lan_id)(struct ixgbe_hw *);
31807 + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
31808 + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
31809 + s32 (*setup_sfp)(struct ixgbe_hw *);
31810 + s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
31811 + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
31812 + void (*release_swfw_sync)(struct ixgbe_hw *, u16);
31815 + void (*disable_tx_laser)(struct ixgbe_hw *);
31816 + void (*enable_tx_laser)(struct ixgbe_hw *);
31817 + void (*flap_tx_laser)(struct ixgbe_hw *);
31818 + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
31819 + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
31820 + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
31824 + s32 (*led_on)(struct ixgbe_hw *, u32);
31825 + s32 (*led_off)(struct ixgbe_hw *, u32);
31826 + s32 (*blink_led_start)(struct ixgbe_hw *, u32);
31827 + s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
31829 + /* RAR, Multicast, VLAN */
31830 + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
31831 + s32 (*clear_rar)(struct ixgbe_hw *, u32);
31832 + s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
31833 + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
31834 + s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
31835 + s32 (*init_rx_addrs)(struct ixgbe_hw *);
31836 + s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
31837 + ixgbe_mc_addr_itr);
31838 + s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
31839 + ixgbe_mc_addr_itr);
31840 + s32 (*enable_mc)(struct ixgbe_hw *);
31841 + s32 (*disable_mc)(struct ixgbe_hw *);
31842 + s32 (*clear_vfta)(struct ixgbe_hw *);
31843 + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
31844 + s32 (*init_uta_tables)(struct ixgbe_hw *);
31846 + /* Flow Control */
31847 + s32 (*fc_enable)(struct ixgbe_hw *, s32);
31850 +struct ixgbe_phy_operations {
31851 + s32 (*identify)(struct ixgbe_hw *);
31852 + s32 (*identify_sfp)(struct ixgbe_hw *);
31853 + s32 (*init)(struct ixgbe_hw *);
31854 + s32 (*reset)(struct ixgbe_hw *);
31855 + s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
31856 + s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
31857 + s32 (*setup_link)(struct ixgbe_hw *);
31858 + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
31860 + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
31861 + s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
31862 + s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
31863 + s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
31864 + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
31865 + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
31866 + void (*i2c_bus_clear)(struct ixgbe_hw *);
31867 + s32 (*check_overtemp)(struct ixgbe_hw *);
31870 +struct ixgbe_eeprom_info {
31871 + struct ixgbe_eeprom_operations ops;
31872 + enum ixgbe_eeprom_type type;
31873 + u32 semaphore_delay;
31875 + u16 address_bits;
31878 +#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
31879 +struct ixgbe_mac_info {
31880 + struct ixgbe_mac_operations ops;
31881 + enum ixgbe_mac_type type;
31882 + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31883 + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31884 + u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31885 + /* prefix for World Wide Node Name (WWNN) */
31887 + /* prefix for World Wide Port Name (WWPN) */
31889 +#define IXGBE_MAX_MTA 128
31890 + u32 mta_shadow[IXGBE_MAX_MTA];
31891 + s32 mc_filter_type;
31894 + u32 num_rar_entries;
31895 + u32 rar_highwater;
31897 + u32 max_tx_queues;
31898 + u32 max_rx_queues;
31899 + u32 max_msix_vectors;
31900 + bool msix_vectors_from_pcie;
31903 + bool orig_link_settings_stored;
31904 + bool autotry_restart;
31908 +struct ixgbe_phy_info {
31909 + struct ixgbe_phy_operations ops;
31910 + enum ixgbe_phy_type type;
31913 + enum ixgbe_sfp_type sfp_type;
31914 + bool sfp_setup_needed;
31916 + enum ixgbe_media_type media_type;
31917 + bool reset_disable;
31918 + ixgbe_autoneg_advertised autoneg_advertised;
31919 + enum ixgbe_smart_speed smart_speed;
31920 + bool smart_speed_active;
31921 + bool multispeed_fiber;
31922 + bool reset_if_overtemp;
31925 +#include "ixgbe_mbx.h"
31927 +struct ixgbe_mbx_operations {
31928 + void (*init_params)(struct ixgbe_hw *hw);
31929 + s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
31930 + s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
31931 + s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
31932 + s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
31933 + s32 (*check_for_msg)(struct ixgbe_hw *, u16);
31934 + s32 (*check_for_ack)(struct ixgbe_hw *, u16);
31935 + s32 (*check_for_rst)(struct ixgbe_hw *, u16);
31938 +struct ixgbe_mbx_stats {
31947 +struct ixgbe_mbx_info {
31948 + struct ixgbe_mbx_operations ops;
31949 + struct ixgbe_mbx_stats stats;
31957 + u8 __iomem *hw_addr;
31959 + struct ixgbe_mac_info mac;
31960 + struct ixgbe_addr_filter_info addr_ctrl;
31961 + struct ixgbe_fc_info fc;
31962 + struct ixgbe_phy_info phy;
31963 + struct ixgbe_eeprom_info eeprom;
31964 + struct ixgbe_bus_info bus;
31965 + struct ixgbe_mbx_info mbx;
31968 + u16 subsystem_device_id;
31969 + u16 subsystem_vendor_id;
31971 + bool adapter_stopped;
31974 +#define ixgbe_call_func(hw, func, params, error) \
31975 + (func != NULL) ? func params : error
31979 +#define IXGBE_ERR_EEPROM -1
31980 +#define IXGBE_ERR_EEPROM_CHECKSUM -2
31981 +#define IXGBE_ERR_PHY -3
31982 +#define IXGBE_ERR_CONFIG -4
31983 +#define IXGBE_ERR_PARAM -5
31984 +#define IXGBE_ERR_MAC_TYPE -6
31985 +#define IXGBE_ERR_UNKNOWN_PHY -7
31986 +#define IXGBE_ERR_LINK_SETUP -8
31987 +#define IXGBE_ERR_ADAPTER_STOPPED -9
31988 +#define IXGBE_ERR_INVALID_MAC_ADDR -10
31989 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
31990 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
31991 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
31992 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
31993 +#define IXGBE_ERR_RESET_FAILED -15
31994 +#define IXGBE_ERR_SWFW_SYNC -16
31995 +#define IXGBE_ERR_PHY_ADDR_INVALID -17
31996 +#define IXGBE_ERR_I2C -18
31997 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
31998 +#define IXGBE_ERR_SFP_NOT_PRESENT -20
31999 +#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
32000 +#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
32001 +#define IXGBE_ERR_FDIR_REINIT_FAILED -23
32002 +#define IXGBE_ERR_EEPROM_VERSION -24
32003 +#define IXGBE_ERR_NO_SPACE -25
32004 +#define IXGBE_ERR_OVERTEMP -26
32005 +#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
32006 +#define IXGBE_ERR_FC_NOT_SUPPORTED -28
32007 +#define IXGBE_ERR_FLOW_CONTROL -29
32008 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
32009 +#define IXGBE_ERR_PBA_SECTION -31
32010 +#define IXGBE_ERR_INVALID_ARGUMENT -32
32011 +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
32013 +#define UNREFERENCED_2PARAMETER(_p, _q) (_p); (_q);
32014 +#define UNREFERENCED_3PARAMETER(_p, _q, _r) (_p); (_q); (_r);
32015 +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s);
32017 +#endif /* _IXGBE_TYPE_H_ */
32018 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/kcompat.c linux-2.6.22-50/drivers/net/ixgbe/kcompat.c
32019 --- linux-2.6.22-40/drivers/net/ixgbe/kcompat.c 1969-12-31 19:00:00.000000000 -0500
32020 +++ linux-2.6.22-50/drivers/net/ixgbe/kcompat.c 2010-08-25 17:56:26.000000000 -0400
32022 +/*******************************************************************************
32024 + Intel 10 Gigabit PCI Express Linux driver
32025 + Copyright(c) 1999 - 2010 Intel Corporation.
32027 + This program is free software; you can redistribute it and/or modify it
32028 + under the terms and conditions of the GNU General Public License,
32029 + version 2, as published by the Free Software Foundation.
32031 + This program is distributed in the hope it will be useful, but WITHOUT
32032 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
32033 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
32036 + You should have received a copy of the GNU General Public License along with
32037 + this program; if not, write to the Free Software Foundation, Inc.,
32038 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
32040 + The full GNU General Public License is included in this distribution in
32041 + the file called "COPYING".
32043 + Contact Information:
32044 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
32045 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32047 +*******************************************************************************/
32049 +#include "ixgbe.h"
32050 +#include "kcompat.h"
32052 +/*****************************************************************************/
32053 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
32054 +/* From lib/vsprintf.c */
32055 +#include <asm/div64.h>
32057 +static int skip_atoi(const char **s)
32061 + while (isdigit(**s))
32062 + i = i*10 + *((*s)++) - '0';
32066 +#define _kc_ZEROPAD 1 /* pad with zero */
32067 +#define _kc_SIGN 2 /* unsigned/signed long */
32068 +#define _kc_PLUS 4 /* show plus */
32069 +#define _kc_SPACE 8 /* space if plus */
32070 +#define _kc_LEFT 16 /* left justified */
32071 +#define _kc_SPECIAL 32 /* 0x */
32072 +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
32074 +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
32076 + char c,sign,tmp[66];
32077 + const char *digits;
32078 + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
32079 + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
32082 + digits = (type & _kc_LARGE) ? large_digits : small_digits;
32083 + if (type & _kc_LEFT)
32084 + type &= ~_kc_ZEROPAD;
32085 + if (base < 2 || base > 36)
32087 + c = (type & _kc_ZEROPAD) ? '0' : ' ';
32089 + if (type & _kc_SIGN) {
32094 + } else if (type & _kc_PLUS) {
32097 + } else if (type & _kc_SPACE) {
32102 + if (type & _kc_SPECIAL) {
32105 + else if (base == 8)
32111 + else while (num != 0)
32112 + tmp[i++] = digits[do_div(num,base)];
32113 + if (i > precision)
32115 + size -= precision;
32116 + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
32117 + while(size-->0) {
32128 + if (type & _kc_SPECIAL) {
32133 + } else if (base==16) {
32138 + *buf = digits[33];
32142 + if (!(type & _kc_LEFT)) {
32143 + while (size-- > 0) {
32149 + while (i < precision--) {
32154 + while (i-- > 0) {
32159 + while (size-- > 0) {
32167 +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
32170 + unsigned long long num;
32172 + char *str, *end, c;
32175 + int flags; /* flags to number() */
32177 + int field_width; /* width of output field */
32178 + int precision; /* min. # of digits for integers; max
32179 + number of chars for from string */
32180 + int qualifier; /* 'h', 'l', or 'L' for integer fields */
32181 + /* 'z' support added 23/7/1999 S.H. */
32182 + /* 'z' changed to 'Z' --davidm 1/25/99 */
32185 + end = buf + size - 1;
32187 + if (end < buf - 1) {
32188 + end = ((void *) -1);
32189 + size = end - buf + 1;
32192 + for (; *fmt ; ++fmt) {
32193 + if (*fmt != '%') {
32200 + /* process flags */
32203 + ++fmt; /* this also skips first '%' */
32205 + case '-': flags |= _kc_LEFT; goto repeat;
32206 + case '+': flags |= _kc_PLUS; goto repeat;
32207 + case ' ': flags |= _kc_SPACE; goto repeat;
32208 + case '#': flags |= _kc_SPECIAL; goto repeat;
32209 + case '0': flags |= _kc_ZEROPAD; goto repeat;
32212 + /* get field width */
32213 + field_width = -1;
32214 + if (isdigit(*fmt))
32215 + field_width = skip_atoi(&fmt);
32216 + else if (*fmt == '*') {
32218 + /* it's the next argument */
32219 + field_width = va_arg(args, int);
32220 + if (field_width < 0) {
32221 + field_width = -field_width;
32222 + flags |= _kc_LEFT;
32226 + /* get the precision */
32228 + if (*fmt == '.') {
32230 + if (isdigit(*fmt))
32231 + precision = skip_atoi(&fmt);
32232 + else if (*fmt == '*') {
32234 + /* it's the next argument */
32235 + precision = va_arg(args, int);
32237 + if (precision < 0)
32241 + /* get the conversion qualifier */
32243 + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
32244 + qualifier = *fmt;
32248 + /* default base */
32253 + if (!(flags & _kc_LEFT)) {
32254 + while (--field_width > 0) {
32260 + c = (unsigned char) va_arg(args, int);
32264 + while (--field_width > 0) {
32272 + s = va_arg(args, char *);
32276 + len = strnlen(s, precision);
32278 + if (!(flags & _kc_LEFT)) {
32279 + while (len < field_width--) {
32285 + for (i = 0; i < len; ++i) {
32290 + while (len < field_width--) {
32298 + if (field_width == -1) {
32299 + field_width = 2*sizeof(void *);
32300 + flags |= _kc_ZEROPAD;
32302 + str = number(str, end,
32303 + (unsigned long) va_arg(args, void *),
32304 + 16, field_width, precision, flags);
32310 + * What does C99 say about the overflow case here? */
32311 + if (qualifier == 'l') {
32312 + long * ip = va_arg(args, long *);
32313 + *ip = (str - buf);
32314 + } else if (qualifier == 'Z') {
32315 + size_t * ip = va_arg(args, size_t *);
32316 + *ip = (str - buf);
32318 + int * ip = va_arg(args, int *);
32319 + *ip = (str - buf);
32329 + /* integer number formats - set up the flags and "break" */
32335 + flags |= _kc_LARGE;
32342 + flags |= _kc_SIGN;
32359 + if (qualifier == 'L')
32360 + num = va_arg(args, long long);
32361 + else if (qualifier == 'l') {
32362 + num = va_arg(args, unsigned long);
32363 + if (flags & _kc_SIGN)
32364 + num = (signed long) num;
32365 + } else if (qualifier == 'Z') {
32366 + num = va_arg(args, size_t);
32367 + } else if (qualifier == 'h') {
32368 + num = (unsigned short) va_arg(args, int);
32369 + if (flags & _kc_SIGN)
32370 + num = (signed short) num;
32372 + num = va_arg(args, unsigned int);
32373 + if (flags & _kc_SIGN)
32374 + num = (signed int) num;
32376 + str = number(str, end, num, base,
32377 + field_width, precision, flags);
32381 + else if (size > 0)
32382 + /* don't write out a null byte if the buf size is zero */
32384 + /* the trailing null byte doesn't count towards the total
32390 +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
32395 + va_start(args, fmt);
32396 + i = _kc_vsnprintf(buf,size,fmt,args);
32400 +#endif /* < 2.4.8 */
32402 +/*****************************************************************************/
32403 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) )
32405 +_kc_skb_pad(struct sk_buff *skb, int pad)
32407 + struct sk_buff *nskb;
32409 + /* If the skbuff is non linear tailroom is always zero.. */
32410 + if(skb_tailroom(skb) >= pad)
32412 + memset(skb->data+skb->len, 0, pad);
32416 + nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
32419 + memset(nskb->data+nskb->len, 0, pad);
32422 +#endif /* < 2.4.21 */
32424 +/*****************************************************************************/
32425 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
32427 +/**************************************/
32428 +/* PCI DMA MAPPING */
32430 +#if defined(CONFIG_HIGHMEM)
32432 +#ifndef PCI_DRAM_OFFSET
32433 +#define PCI_DRAM_OFFSET 0
32437 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
32438 + size_t size, int direction)
32440 + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
32441 + PCI_DRAM_OFFSET);
32444 +#else /* CONFIG_HIGHMEM */
32447 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
32448 + size_t size, int direction)
32450 + return pci_map_single(dev, (void *)page_address(page) + offset, size,
32454 +#endif /* CONFIG_HIGHMEM */
32457 +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
32460 + return pci_unmap_single(dev, dma_addr, size, direction);
32463 +#endif /* 2.4.13 => 2.4.3 */
32465 +/*****************************************************************************/
32466 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
32468 +/**************************************/
32469 +/* PCI DRIVER API */
32472 +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
32474 + if (!pci_dma_supported(dev, mask))
32476 + dev->dma_mask = mask;
32481 +_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
32485 + for (i = 0; i < 6; i++) {
32486 + if (pci_resource_len(dev, i) == 0)
32489 + if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
32490 + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
32491 + pci_release_regions(dev);
32494 + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
32495 + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
32496 + pci_release_regions(dev);
32505 +_kc_pci_release_regions(struct pci_dev *dev)
32509 + for (i = 0; i < 6; i++) {
32510 + if (pci_resource_len(dev, i) == 0)
32513 + if (pci_resource_flags(dev, i) & IORESOURCE_IO)
32514 + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
32516 + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
32517 + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
32521 +/**************************************/
32522 +/* NETWORK DRIVER API */
32524 +struct net_device *
32525 +_kc_alloc_etherdev(int sizeof_priv)
32527 + struct net_device *dev;
32530 + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
32531 + dev = kmalloc(alloc_size, GFP_KERNEL);
32534 + memset(dev, 0, alloc_size);
32537 + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
32538 + dev->name[0] = '\0';
32539 + ether_setup(dev);
32545 +_kc_is_valid_ether_addr(u8 *addr)
32547 + const char zaddr[6] = { 0, };
32549 + return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
32552 +#endif /* 2.4.3 => 2.4.0 */
32554 +/*****************************************************************************/
32555 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
32558 +_kc_pci_set_power_state(struct pci_dev *dev, int state)
32564 +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
32569 +#endif /* 2.4.6 => 2.4.3 */
32571 +/*****************************************************************************/
32572 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
32573 +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
32574 + int off, int size)
32576 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
32577 + frag->page = page;
32578 + frag->page_offset = off;
32579 + frag->size = size;
32580 + skb_shinfo(skb)->nr_frags = i + 1;
32584 + * Original Copyright:
32585 + * find_next_bit.c: fallback find next bit implementation
32587 + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
32588 + * Written by David Howells (dhowells@redhat.com)
32592 + * find_next_bit - find the next set bit in a memory region
32593 + * @addr: The address to base the search on
32594 + * @offset: The bitnumber to start searching at
32595 + * @size: The maximum size to search
32597 +unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
32598 + unsigned long offset)
32600 + const unsigned long *p = addr + BITOP_WORD(offset);
32601 + unsigned long result = offset & ~(BITS_PER_LONG-1);
32602 + unsigned long tmp;
32604 + if (offset >= size)
32607 + offset %= BITS_PER_LONG;
32610 + tmp &= (~0UL << offset);
32611 + if (size < BITS_PER_LONG)
32612 + goto found_first;
32614 + goto found_middle;
32615 + size -= BITS_PER_LONG;
32616 + result += BITS_PER_LONG;
32618 + while (size & ~(BITS_PER_LONG-1)) {
32619 + if ((tmp = *(p++)))
32620 + goto found_middle;
32621 + result += BITS_PER_LONG;
32622 + size -= BITS_PER_LONG;
32629 + tmp &= (~0UL >> (BITS_PER_LONG - size));
32630 + if (tmp == 0UL) /* Are any bits set? */
32631 + return result + size; /* Nope. */
32633 + return result + ffs(tmp);
32636 +#endif /* 2.6.0 => 2.4.6 */
32638 +/*****************************************************************************/
32639 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
32640 +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
32645 + va_start(args, fmt);
32646 + i = vsnprintf(buf, size, fmt, args);
32648 + return (i >= size) ? (size - 1) : i;
32650 +#endif /* < 2.6.4 */
32652 +/*****************************************************************************/
32653 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
32654 +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
32655 +#endif /* < 2.6.10 */
32657 +/*****************************************************************************/
32658 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
32659 +void *_kc_kzalloc(size_t size, int flags)
32661 + void *ret = kmalloc(size, flags);
32663 + memset(ret, 0, size);
32666 +#endif /* <= 2.6.13 */
32668 +/*****************************************************************************/
32669 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
32670 +int _kc_pci_save_state(struct pci_dev *pdev)
32672 + struct adapter_struct *adapter = pci_get_drvdata(pdev);
32673 + int size = PCI_CONFIG_SPACE_LEN, i;
32674 + u16 pcie_cap_offset, pcie_link_status;
32676 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
32677 + /* no ->dev for 2.4 kernels */
32678 + WARN_ON(pdev->dev.driver_data == NULL);
32680 + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
32681 + if (pcie_cap_offset) {
32682 + if (!pci_read_config_word(pdev,
32683 + pcie_cap_offset + PCIE_LINK_STATUS,
32684 + &pcie_link_status))
32685 + size = PCIE_CONFIG_SPACE_LEN;
32687 + pci_config_space_ich8lan();
32688 +#ifdef HAVE_PCI_ERS
32689 + if (adapter->config_space == NULL)
32691 + WARN_ON(adapter->config_space != NULL);
32693 + adapter->config_space = kmalloc(size, GFP_KERNEL);
32694 + if (!adapter->config_space) {
32695 + printk(KERN_ERR "Out of memory in pci_save_state\n");
32698 + for (i = 0; i < (size / 4); i++)
32699 + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
32703 +void _kc_pci_restore_state(struct pci_dev *pdev)
32705 + struct adapter_struct *adapter = pci_get_drvdata(pdev);
32706 + int size = PCI_CONFIG_SPACE_LEN, i;
32707 + u16 pcie_cap_offset;
32708 + u16 pcie_link_status;
32710 + if (adapter->config_space != NULL) {
32711 + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
32712 + if (pcie_cap_offset &&
32713 + !pci_read_config_word(pdev,
32714 + pcie_cap_offset + PCIE_LINK_STATUS,
32715 + &pcie_link_status))
32716 + size = PCIE_CONFIG_SPACE_LEN;
32718 + pci_config_space_ich8lan();
32719 + for (i = 0; i < (size / 4); i++)
32720 + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
32721 +#ifndef HAVE_PCI_ERS
32722 + kfree(adapter->config_space);
32723 + adapter->config_space = NULL;
32728 +#ifdef HAVE_PCI_ERS
32729 +void _kc_free_netdev(struct net_device *netdev)
32731 + struct adapter_struct *adapter = netdev_priv(netdev);
32733 + if (adapter->config_space != NULL)
32734 + kfree(adapter->config_space);
32735 +#ifdef CONFIG_SYSFS
32736 + if (netdev->reg_state == NETREG_UNINITIALIZED) {
32737 + kfree((char *)netdev - netdev->padded);
32739 + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
32740 + netdev->reg_state = NETREG_RELEASED;
32741 + class_device_put(&netdev->class_dev);
32744 + kfree((char *)netdev - netdev->padded);
32748 +#endif /* <= 2.6.18 */
32750 +/*****************************************************************************/
32751 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
32752 +/* hexdump code taken from lib/hexdump.c */
32753 +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
32754 + int groupsize, unsigned char *linebuf,
32755 + size_t linebuflen, bool ascii)
32757 + const u8 *ptr = buf;
32760 + int ascii_column;
32762 + if (rowsize != 16 && rowsize != 32)
32767 + if (len > rowsize) /* limit to one line at a time */
32769 + if ((len % groupsize) != 0) /* no mixed size output */
32772 + switch (groupsize) {
32774 + const u64 *ptr8 = buf;
32775 + int ngroups = len / groupsize;
32777 + for (j = 0; j < ngroups; j++)
32778 + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
32779 + "%s%16.16llx", j ? " " : "",
32780 + (unsigned long long)*(ptr8 + j));
32781 + ascii_column = 17 * ngroups + 2;
32786 + const u32 *ptr4 = buf;
32787 + int ngroups = len / groupsize;
32789 + for (j = 0; j < ngroups; j++)
32790 + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
32791 + "%s%8.8x", j ? " " : "", *(ptr4 + j));
32792 + ascii_column = 9 * ngroups + 2;
32797 + const u16 *ptr2 = buf;
32798 + int ngroups = len / groupsize;
32800 + for (j = 0; j < ngroups; j++)
32801 + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
32802 + "%s%4.4x", j ? " " : "", *(ptr2 + j));
32803 + ascii_column = 5 * ngroups + 2;
32808 + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
32810 + linebuf[lx++] = hex_asc(ch >> 4);
32811 + linebuf[lx++] = hex_asc(ch & 0x0f);
32812 + linebuf[lx++] = ' ';
32817 + ascii_column = 3 * rowsize + 2;
32823 + while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
32824 + linebuf[lx++] = ' ';
32825 + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
32826 + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
32829 + linebuf[lx++] = '\0';
32832 +void _kc_print_hex_dump(const char *level,
32833 + const char *prefix_str, int prefix_type,
32834 + int rowsize, int groupsize,
32835 + const void *buf, size_t len, bool ascii)
32837 + const u8 *ptr = buf;
32838 + int i, linelen, remaining = len;
32839 + unsigned char linebuf[200];
32841 + if (rowsize != 16 && rowsize != 32)
32844 + for (i = 0; i < len; i += rowsize) {
32845 + linelen = min(remaining, rowsize);
32846 + remaining -= rowsize;
32847 + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
32848 + linebuf, sizeof(linebuf), ascii);
32850 + switch (prefix_type) {
32851 + case DUMP_PREFIX_ADDRESS:
32852 + printk("%s%s%*p: %s\n", level, prefix_str,
32853 + (int)(2 * sizeof(void *)), ptr + i, linebuf);
32855 + case DUMP_PREFIX_OFFSET:
32856 + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
32859 + printk("%s%s%s\n", level, prefix_str, linebuf);
32864 +#endif /* < 2.6.22 */
32866 +/*****************************************************************************/
32867 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
32868 +int ixgbe_dcb_netlink_register()
32873 +int ixgbe_dcb_netlink_unregister()
32878 +int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
32879 + struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
32883 +#endif /* < 2.6.23 */
32885 +/*****************************************************************************/
32886 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
32888 +struct net_device *napi_to_poll_dev(struct napi_struct *napi)
32890 + struct adapter_q_vector *q_vector = container_of(napi,
32891 + struct adapter_q_vector,
32893 + return &q_vector->poll_dev;
32896 +int __kc_adapter_clean(struct net_device *netdev, int *budget)
32899 + int work_to_do = min(*budget, netdev->quota);
32900 + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
32901 + struct napi_struct *napi = netdev->priv;
32902 + work_done = napi->poll(napi, work_to_do);
32903 + *budget -= work_done;
32904 + netdev->quota -= work_done;
32905 + return (work_done >= work_to_do) ? 1 : 0;
32908 +#endif /* <= 2.6.24 */
32910 +/*****************************************************************************/
32911 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
32912 +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
32914 + struct pci_dev *parent = pdev->bus->self;
32921 + pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
32923 + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
32924 + link_state &= ~state;
32925 + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
32928 +#endif /* < 2.6.26 */
32930 +/*****************************************************************************/
32931 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
32933 +void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
32935 + struct adapter_struct *adapter = netdev_priv(netdev);
32938 + netif_stop_queue(netdev);
32939 + if (netif_is_multiqueue(netdev))
32940 + for (i = 0; i < adapter->num_tx_queues; i++)
32941 + netif_stop_subqueue(netdev, i);
32943 +void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
32945 + struct adapter_struct *adapter = netdev_priv(netdev);
32948 + netif_wake_queue(netdev);
32949 + if (netif_is_multiqueue(netdev))
32950 + for (i = 0; i < adapter->num_tx_queues; i++)
32951 + netif_wake_subqueue(netdev, i);
32953 +void _kc_netif_tx_start_all_queues(struct net_device *netdev)
32955 + struct adapter_struct *adapter = netdev_priv(netdev);
32958 + netif_start_queue(netdev);
32959 + if (netif_is_multiqueue(netdev))
32960 + for (i = 0; i < adapter->num_tx_queues; i++)
32961 + netif_start_subqueue(netdev, i);
32963 +#endif /* HAVE_TX_MQ */
32964 +#endif /* < 2.6.27 */
32966 +/*****************************************************************************/
32967 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
32970 +_kc_pci_prepare_to_sleep(struct pci_dev *dev)
32972 + pci_power_t target_state;
32975 + target_state = pci_choose_state(dev, PMSG_SUSPEND);
32977 + pci_enable_wake(dev, target_state, true);
32979 + error = pci_set_power_state(dev, target_state);
32982 + pci_enable_wake(dev, target_state, false);
32988 +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
32992 + err = pci_enable_wake(dev, PCI_D3cold, enable);
32996 + err = pci_enable_wake(dev, PCI_D3hot, enable);
33001 +#endif /* < 2.6.28 */
33003 +/*****************************************************************************/
33004 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
33005 +#ifdef HAVE_NETDEV_SELECT_QUEUE
33006 +#include <net/ip.h>
33007 +static u32 _kc_simple_tx_hashrnd;
33008 +static u32 _kc_simple_tx_hashrnd_initialized;
33010 +u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
33012 + u32 addr1, addr2, ports;
33016 + if (unlikely(!_kc_simple_tx_hashrnd_initialized)) {
33017 + get_random_bytes(&_kc_simple_tx_hashrnd, 4);
33018 + _kc_simple_tx_hashrnd_initialized = 1;
33021 + switch (skb->protocol) {
33022 + case htons(ETH_P_IP):
33023 + if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
33024 + ip_proto = ip_hdr(skb)->protocol;
33025 + addr1 = ip_hdr(skb)->saddr;
33026 + addr2 = ip_hdr(skb)->daddr;
33027 + ihl = ip_hdr(skb)->ihl;
33029 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
33030 + case htons(ETH_P_IPV6):
33031 + ip_proto = ipv6_hdr(skb)->nexthdr;
33032 + addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
33033 + addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
33042 + switch (ip_proto) {
33043 + case IPPROTO_TCP:
33044 + case IPPROTO_UDP:
33045 + case IPPROTO_DCCP:
33046 + case IPPROTO_ESP:
33048 + case IPPROTO_SCTP:
33049 + case IPPROTO_UDPLITE:
33050 + ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
33058 + hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd);
33060 + return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
33062 +#endif /* HAVE_NETDEV_SELECT_QUEUE */
33063 +#endif /* < 2.6.30 */
33065 +/*****************************************************************************/
33066 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
33067 +struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
33068 + unsigned int length)
33070 + struct sk_buff *skb;
33072 + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
33074 + if (NET_IP_ALIGN + NET_SKB_PAD)
33075 + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
33080 +#endif /* < 2.6.33 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) */
33082 +/*****************************************************************************/
33083 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
33084 +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
33086 + unsigned long features = dev->features;
33088 + if (data & ~supported)
33091 +#ifdef NETIF_F_LRO
33092 + features &= ~NETIF_F_LRO;
33093 + if (data & ETH_FLAG_LRO)
33094 + features |= NETIF_F_LRO;
33096 +#ifdef NETIF_F_NTUPLE
33097 + features &= ~NETIF_F_NTUPLE;
33098 + if (data & ETH_FLAG_NTUPLE)
33099 + features |= NETIF_F_NTUPLE;
33101 +#ifdef NETIF_F_RXHASH
33102 + features &= ~NETIF_F_RXHASH;
33103 + if (data & ETH_FLAG_RXHASH)
33104 + features |= NETIF_F_RXHASH;
33107 + dev->features = features;
33111 +#endif /* < 2.6.36 */
33112 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/kcompat_ethtool.c linux-2.6.22-50/drivers/net/ixgbe/kcompat_ethtool.c
33113 --- linux-2.6.22-40/drivers/net/ixgbe/kcompat_ethtool.c 1969-12-31 19:00:00.000000000 -0500
33114 +++ linux-2.6.22-50/drivers/net/ixgbe/kcompat_ethtool.c 2010-08-25 17:56:26.000000000 -0400
33116 +/*******************************************************************************
33118 + Intel 10 Gigabit PCI Express Linux driver
33119 + Copyright(c) 1999 - 2010 Intel Corporation.
33121 + This program is free software; you can redistribute it and/or modify it
33122 + under the terms and conditions of the GNU General Public License,
33123 + version 2, as published by the Free Software Foundation.
33125 + This program is distributed in the hope it will be useful, but WITHOUT
33126 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
33127 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
33130 + You should have received a copy of the GNU General Public License along with
33131 + this program; if not, write to the Free Software Foundation, Inc.,
33132 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
33134 + The full GNU General Public License is included in this distribution in
33135 + the file called "COPYING".
33137 + Contact Information:
33138 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
33139 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33141 +*******************************************************************************/
33144 + * net/core/ethtool.c - Ethtool ioctl handler
33145 + * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
33147 + * This file is where we call all the ethtool_ops commands to get
33148 + * the information ethtool needs. We fall back to calling do_ioctl()
33149 + * for drivers which haven't been converted to ethtool_ops yet.
33151 + * It's GPL, stupid.
33153 + * Modification by sfeldma@pobox.com to work as backward compat
33154 + * solution for pre-ethtool_ops kernels.
33155 + * - copied struct ethtool_ops from ethtool.h
33156 + * - defined SET_ETHTOOL_OPS
33157 + * - put in some #ifndef NETIF_F_xxx wrappers
33158 + * - changes refs to dev->ethtool_ops to ethtool_ops
33159 + * - changed dev_ethtool to ethtool_ioctl
33160 + * - remove EXPORT_SYMBOL()s
33161 + * - added _kc_ prefix in built-in ethtool_op_xxx ops.
33164 +#include <linux/module.h>
33165 +#include <linux/types.h>
33166 +#include <linux/errno.h>
33167 +#include <linux/mii.h>
33168 +#include <linux/ethtool.h>
33169 +#include <linux/netdevice.h>
33170 +#include <asm/uaccess.h>
33172 +#include "kcompat.h"
33174 +#undef SUPPORTED_10000baseT_Full
33175 +#define SUPPORTED_10000baseT_Full (1 << 12)
33176 +#undef ADVERTISED_10000baseT_Full
33177 +#define ADVERTISED_10000baseT_Full (1 << 12)
33178 +#undef SPEED_10000
33179 +#define SPEED_10000 10000
33181 +#undef ethtool_ops
33182 +#define ethtool_ops _kc_ethtool_ops
33184 +struct _kc_ethtool_ops {
33185 + int (*get_settings)(struct net_device *, struct ethtool_cmd *);
33186 + int (*set_settings)(struct net_device *, struct ethtool_cmd *);
33187 + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
33188 + int (*get_regs_len)(struct net_device *);
33189 + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
33190 + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
33191 + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
33192 + u32 (*get_msglevel)(struct net_device *);
33193 + void (*set_msglevel)(struct net_device *, u32);
33194 + int (*nway_reset)(struct net_device *);
33195 + u32 (*get_link)(struct net_device *);
33196 + int (*get_eeprom_len)(struct net_device *);
33197 + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
33198 + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
33199 + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
33200 + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
33201 + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
33202 + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
33203 + void (*get_pauseparam)(struct net_device *,
33204 + struct ethtool_pauseparam*);
33205 + int (*set_pauseparam)(struct net_device *,
33206 + struct ethtool_pauseparam*);
33207 + u32 (*get_rx_csum)(struct net_device *);
33208 + int (*set_rx_csum)(struct net_device *, u32);
33209 + u32 (*get_tx_csum)(struct net_device *);
33210 + int (*set_tx_csum)(struct net_device *, u32);
33211 + u32 (*get_sg)(struct net_device *);
33212 + int (*set_sg)(struct net_device *, u32);
33213 + u32 (*get_tso)(struct net_device *);
33214 + int (*set_tso)(struct net_device *, u32);
33215 + int (*self_test_count)(struct net_device *);
33216 + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
33217 + void (*get_strings)(struct net_device *, u32 stringset, u8 *);
33218 + int (*phys_id)(struct net_device *, u32);
33219 + int (*get_stats_count)(struct net_device *);
33220 + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *,
33222 +} *ethtool_ops = NULL;
33224 +#undef SET_ETHTOOL_OPS
33225 +#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops))
33228 + * Some useful ethtool_ops methods that are device independent. If we find that
33229 + * all drivers want to do the same thing here, we can turn these into dev_()
33230 + * function calls.
33233 +#undef ethtool_op_get_link
33234 +#define ethtool_op_get_link _kc_ethtool_op_get_link
33235 +u32 _kc_ethtool_op_get_link(struct net_device *dev)
33237 + return netif_carrier_ok(dev) ? 1 : 0;
33240 +#undef ethtool_op_get_tx_csum
33241 +#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum
33242 +u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev)
33244 +#ifdef NETIF_F_IP_CSUM
33245 + return (dev->features & NETIF_F_IP_CSUM) != 0;
33251 +#undef ethtool_op_set_tx_csum
33252 +#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum
33253 +int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
33255 +#ifdef NETIF_F_IP_CSUM
33257 +#ifdef NETIF_F_IPV6_CSUM
33258 + dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
33260 + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
33262 + dev->features |= NETIF_F_IP_CSUM;
33264 + dev->features &= ~NETIF_F_IP_CSUM;
33271 +#undef ethtool_op_get_sg
33272 +#define ethtool_op_get_sg _kc_ethtool_op_get_sg
33273 +u32 _kc_ethtool_op_get_sg(struct net_device *dev)
33276 + return (dev->features & NETIF_F_SG) != 0;
33282 +#undef ethtool_op_set_sg
33283 +#define ethtool_op_set_sg _kc_ethtool_op_set_sg
33284 +int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data)
33288 + dev->features |= NETIF_F_SG;
33290 + dev->features &= ~NETIF_F_SG;
33296 +#undef ethtool_op_get_tso
33297 +#define ethtool_op_get_tso _kc_ethtool_op_get_tso
33298 +u32 _kc_ethtool_op_get_tso(struct net_device *dev)
33300 +#ifdef NETIF_F_TSO
33301 + return (dev->features & NETIF_F_TSO) != 0;
33307 +#undef ethtool_op_set_tso
33308 +#define ethtool_op_set_tso _kc_ethtool_op_set_tso
33309 +int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data)
33311 +#ifdef NETIF_F_TSO
33313 + dev->features |= NETIF_F_TSO;
33315 + dev->features &= ~NETIF_F_TSO;
33321 +/* Handlers for each ethtool command */
33323 +static int ethtool_get_settings(struct net_device *dev, void *useraddr)
33325 + struct ethtool_cmd cmd = { ETHTOOL_GSET };
33328 + if (!ethtool_ops->get_settings)
33329 + return -EOPNOTSUPP;
33331 + err = ethtool_ops->get_settings(dev, &cmd);
33335 + if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
33340 +static int ethtool_set_settings(struct net_device *dev, void *useraddr)
33342 + struct ethtool_cmd cmd;
33344 + if (!ethtool_ops->set_settings)
33345 + return -EOPNOTSUPP;
33347 + if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
33350 + return ethtool_ops->set_settings(dev, &cmd);
33353 +static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr)
33355 + struct ethtool_drvinfo info;
33356 + struct ethtool_ops *ops = ethtool_ops;
33358 + if (!ops->get_drvinfo)
33359 + return -EOPNOTSUPP;
33361 + memset(&info, 0, sizeof(info));
33362 + info.cmd = ETHTOOL_GDRVINFO;
33363 + ops->get_drvinfo(dev, &info);
33365 + if (ops->self_test_count)
33366 + info.testinfo_len = ops->self_test_count(dev);
33367 + if (ops->get_stats_count)
33368 + info.n_stats = ops->get_stats_count(dev);
33369 + if (ops->get_regs_len)
33370 + info.regdump_len = ops->get_regs_len(dev);
33371 + if (ops->get_eeprom_len)
33372 + info.eedump_len = ops->get_eeprom_len(dev);
33374 + if (copy_to_user(useraddr, &info, sizeof(info)))
33379 +static int ethtool_get_regs(struct net_device *dev, char *useraddr)
33381 + struct ethtool_regs regs;
33382 + struct ethtool_ops *ops = ethtool_ops;
33386 + if (!ops->get_regs || !ops->get_regs_len)
33387 + return -EOPNOTSUPP;
33389 + if (copy_from_user(®s, useraddr, sizeof(regs)))
33392 + reglen = ops->get_regs_len(dev);
33393 + if (regs.len > reglen)
33394 + regs.len = reglen;
33396 + regbuf = kmalloc(reglen, GFP_USER);
33400 + ops->get_regs(dev, ®s, regbuf);
33403 + if (copy_to_user(useraddr, ®s, sizeof(regs)))
33405 + useraddr += offsetof(struct ethtool_regs, data);
33406 + if (copy_to_user(useraddr, regbuf, reglen))
33415 +static int ethtool_get_wol(struct net_device *dev, char *useraddr)
33417 + struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
33419 + if (!ethtool_ops->get_wol)
33420 + return -EOPNOTSUPP;
33422 + ethtool_ops->get_wol(dev, &wol);
33424 + if (copy_to_user(useraddr, &wol, sizeof(wol)))
33429 +static int ethtool_set_wol(struct net_device *dev, char *useraddr)
33431 + struct ethtool_wolinfo wol;
33433 + if (!ethtool_ops->set_wol)
33434 + return -EOPNOTSUPP;
33436 + if (copy_from_user(&wol, useraddr, sizeof(wol)))
33439 + return ethtool_ops->set_wol(dev, &wol);
33442 +static int ethtool_get_msglevel(struct net_device *dev, char *useraddr)
33444 + struct ethtool_value edata = { ETHTOOL_GMSGLVL };
33446 + if (!ethtool_ops->get_msglevel)
33447 + return -EOPNOTSUPP;
33449 + edata.data = ethtool_ops->get_msglevel(dev);
33451 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
33456 +static int ethtool_set_msglevel(struct net_device *dev, char *useraddr)
33458 + struct ethtool_value edata;
33460 + if (!ethtool_ops->set_msglevel)
33461 + return -EOPNOTSUPP;
33463 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
33466 + ethtool_ops->set_msglevel(dev, edata.data);
33470 +static int ethtool_nway_reset(struct net_device *dev)
33472 + if (!ethtool_ops->nway_reset)
33473 + return -EOPNOTSUPP;
33475 + return ethtool_ops->nway_reset(dev);
33478 +static int ethtool_get_link(struct net_device *dev, void *useraddr)
33480 + struct ethtool_value edata = { ETHTOOL_GLINK };
33482 + if (!ethtool_ops->get_link)
33483 + return -EOPNOTSUPP;
33485 + edata.data = ethtool_ops->get_link(dev);
33487 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
33492 +static int ethtool_get_eeprom(struct net_device *dev, void *useraddr)
33494 + struct ethtool_eeprom eeprom;
33495 + struct ethtool_ops *ops = ethtool_ops;
33499 + if (!ops->get_eeprom || !ops->get_eeprom_len)
33500 + return -EOPNOTSUPP;
33502 + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
33505 + /* Check for wrap and zero */
33506 + if (eeprom.offset + eeprom.len <= eeprom.offset)
33509 + /* Check for exceeding total eeprom len */
33510 + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
33513 + data = kmalloc(eeprom.len, GFP_USER);
33518 + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
33521 + ret = ops->get_eeprom(dev, &eeprom, data);
33526 + if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
33528 + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
33537 +static int ethtool_set_eeprom(struct net_device *dev, void *useraddr)
33539 + struct ethtool_eeprom eeprom;
33540 + struct ethtool_ops *ops = ethtool_ops;
33544 + if (!ops->set_eeprom || !ops->get_eeprom_len)
33545 + return -EOPNOTSUPP;
33547 + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
33550 + /* Check for wrap and zero */
33551 + if (eeprom.offset + eeprom.len <= eeprom.offset)
33554 + /* Check for exceeding total eeprom len */
33555 + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
33558 + data = kmalloc(eeprom.len, GFP_USER);
33563 + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
33566 + ret = ops->set_eeprom(dev, &eeprom, data);
33570 + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
33578 +static int ethtool_get_coalesce(struct net_device *dev, void *useraddr)
33580 + struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
33582 + if (!ethtool_ops->get_coalesce)
33583 + return -EOPNOTSUPP;
33585 + ethtool_ops->get_coalesce(dev, &coalesce);
33587 + if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
33592 +static int ethtool_set_coalesce(struct net_device *dev, void *useraddr)
33594 + struct ethtool_coalesce coalesce;
33596 + if (!ethtool_ops->get_coalesce)
33597 + return -EOPNOTSUPP;
33599 + if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
33602 + return ethtool_ops->set_coalesce(dev, &coalesce);
33605 +static int ethtool_get_ringparam(struct net_device *dev, void *useraddr)
33607 + struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
33609 + if (!ethtool_ops->get_ringparam)
33610 + return -EOPNOTSUPP;
33612 + ethtool_ops->get_ringparam(dev, &ringparam);
33614 + if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
33619 +static int ethtool_set_ringparam(struct net_device *dev, void *useraddr)
33621 + struct ethtool_ringparam ringparam;
33623 + if (!ethtool_ops->get_ringparam)
33624 + return -EOPNOTSUPP;
33626 + if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
33629 + return ethtool_ops->set_ringparam(dev, &ringparam);
33632 +static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr)
33634 + struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
33636 + if (!ethtool_ops->get_pauseparam)
33637 + return -EOPNOTSUPP;
33639 + ethtool_ops->get_pauseparam(dev, &pauseparam);
33641 + if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
33646 +static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr)
33648 + struct ethtool_pauseparam pauseparam;
33650 + if (!ethtool_ops->get_pauseparam)
33651 + return -EOPNOTSUPP;
33653 + if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
33656 + return ethtool_ops->set_pauseparam(dev, &pauseparam);
33659 +static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr)
33661 + struct ethtool_value edata = { ETHTOOL_GRXCSUM };
33663 + if (!ethtool_ops->get_rx_csum)
33664 + return -EOPNOTSUPP;
33666 + edata.data = ethtool_ops->get_rx_csum(dev);
33668 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
33673 +static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr)
33675 + struct ethtool_value edata;
33677 + if (!ethtool_ops->set_rx_csum)
33678 + return -EOPNOTSUPP;
33680 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
33683 + ethtool_ops->set_rx_csum(dev, edata.data);
33687 +static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr)
33689 + struct ethtool_value edata = { ETHTOOL_GTXCSUM };
33691 + if (!ethtool_ops->get_tx_csum)
33692 + return -EOPNOTSUPP;
33694 + edata.data = ethtool_ops->get_tx_csum(dev);
33696 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
33701 +static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr)
33703 + struct ethtool_value edata;
33705 + if (!ethtool_ops->set_tx_csum)
33706 + return -EOPNOTSUPP;
33708 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
33711 + return ethtool_ops->set_tx_csum(dev, edata.data);
33714 +static int ethtool_get_sg(struct net_device *dev, char *useraddr)
33716 + struct ethtool_value edata = { ETHTOOL_GSG };
33718 + if (!ethtool_ops->get_sg)
33719 + return -EOPNOTSUPP;
33721 + edata.data = ethtool_ops->get_sg(dev);
33723 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
33728 +static int ethtool_set_sg(struct net_device *dev, char *useraddr)
33730 + struct ethtool_value edata;
33732 + if (!ethtool_ops->set_sg)
33733 + return -EOPNOTSUPP;
33735 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
33738 + return ethtool_ops->set_sg(dev, edata.data);
33741 +static int ethtool_get_tso(struct net_device *dev, char *useraddr)
33743 + struct ethtool_value edata = { ETHTOOL_GTSO };
33745 + if (!ethtool_ops->get_tso)
33746 + return -EOPNOTSUPP;
33748 + edata.data = ethtool_ops->get_tso(dev);
33750 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
33755 +static int ethtool_set_tso(struct net_device *dev, char *useraddr)
33757 + struct ethtool_value edata;
33759 + if (!ethtool_ops->set_tso)
33760 + return -EOPNOTSUPP;
33762 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
33765 + return ethtool_ops->set_tso(dev, edata.data);
33768 +static int ethtool_self_test(struct net_device *dev, char *useraddr)
33770 + struct ethtool_test test;
33771 + struct ethtool_ops *ops = ethtool_ops;
33775 + if (!ops->self_test || !ops->self_test_count)
33776 + return -EOPNOTSUPP;
33778 + if (copy_from_user(&test, useraddr, sizeof(test)))
33781 + test.len = ops->self_test_count(dev);
33782 + data = kmalloc(test.len * sizeof(u64), GFP_USER);
33786 + ops->self_test(dev, &test, data);
33789 + if (copy_to_user(useraddr, &test, sizeof(test)))
33791 + useraddr += sizeof(test);
33792 + if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
33801 +static int ethtool_get_strings(struct net_device *dev, void *useraddr)
33803 + struct ethtool_gstrings gstrings;
33804 + struct ethtool_ops *ops = ethtool_ops;
33808 + if (!ops->get_strings)
33809 + return -EOPNOTSUPP;
33811 + if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
33814 + switch (gstrings.string_set) {
33815 + case ETH_SS_TEST:
33816 + if (!ops->self_test_count)
33817 + return -EOPNOTSUPP;
33818 + gstrings.len = ops->self_test_count(dev);
33820 + case ETH_SS_STATS:
33821 + if (!ops->get_stats_count)
33822 + return -EOPNOTSUPP;
33823 + gstrings.len = ops->get_stats_count(dev);
33829 + data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
33833 + ops->get_strings(dev, gstrings.string_set, data);
33836 + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
33838 + useraddr += sizeof(gstrings);
33839 + if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
33848 +static int ethtool_phys_id(struct net_device *dev, void *useraddr)
33850 + struct ethtool_value id;
33852 + if (!ethtool_ops->phys_id)
33853 + return -EOPNOTSUPP;
33855 + if (copy_from_user(&id, useraddr, sizeof(id)))
33858 + return ethtool_ops->phys_id(dev, id.data);
33861 +static int ethtool_get_stats(struct net_device *dev, void *useraddr)
33863 + struct ethtool_stats stats;
33864 + struct ethtool_ops *ops = ethtool_ops;
33868 + if (!ops->get_ethtool_stats || !ops->get_stats_count)
33869 + return -EOPNOTSUPP;
33871 + if (copy_from_user(&stats, useraddr, sizeof(stats)))
33874 + stats.n_stats = ops->get_stats_count(dev);
33875 + data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER);
33879 + ops->get_ethtool_stats(dev, &stats, data);
33882 + if (copy_to_user(useraddr, &stats, sizeof(stats)))
33884 + useraddr += sizeof(stats);
33885 + if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
33894 +/* The main entry point in this file. Called from net/core/dev.c */
33896 +#define ETHTOOL_OPS_COMPAT
33897 +int ethtool_ioctl(struct ifreq *ifr)
33899 + struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
33900 + void *useraddr = (void *) ifr->ifr_data;
33904 + * XXX: This can be pushed down into the ethtool_* handlers that
33905 + * need it. Keep existing behavior for the moment.
33907 + if (!capable(CAP_NET_ADMIN))
33910 + if (!dev || !netif_device_present(dev))
33913 + if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd)))
33916 + switch (ethcmd) {
33917 + case ETHTOOL_GSET:
33918 + return ethtool_get_settings(dev, useraddr);
33919 + case ETHTOOL_SSET:
33920 + return ethtool_set_settings(dev, useraddr);
33921 + case ETHTOOL_GDRVINFO:
33922 + return ethtool_get_drvinfo(dev, useraddr);
33923 + case ETHTOOL_GREGS:
33924 + return ethtool_get_regs(dev, useraddr);
33925 + case ETHTOOL_GWOL:
33926 + return ethtool_get_wol(dev, useraddr);
33927 + case ETHTOOL_SWOL:
33928 + return ethtool_set_wol(dev, useraddr);
33929 + case ETHTOOL_GMSGLVL:
33930 + return ethtool_get_msglevel(dev, useraddr);
33931 + case ETHTOOL_SMSGLVL:
33932 + return ethtool_set_msglevel(dev, useraddr);
33933 + case ETHTOOL_NWAY_RST:
33934 + return ethtool_nway_reset(dev);
33935 + case ETHTOOL_GLINK:
33936 + return ethtool_get_link(dev, useraddr);
33937 + case ETHTOOL_GEEPROM:
33938 + return ethtool_get_eeprom(dev, useraddr);
33939 + case ETHTOOL_SEEPROM:
33940 + return ethtool_set_eeprom(dev, useraddr);
33941 + case ETHTOOL_GCOALESCE:
33942 + return ethtool_get_coalesce(dev, useraddr);
33943 + case ETHTOOL_SCOALESCE:
33944 + return ethtool_set_coalesce(dev, useraddr);
33945 + case ETHTOOL_GRINGPARAM:
33946 + return ethtool_get_ringparam(dev, useraddr);
33947 + case ETHTOOL_SRINGPARAM:
33948 + return ethtool_set_ringparam(dev, useraddr);
33949 + case ETHTOOL_GPAUSEPARAM:
33950 + return ethtool_get_pauseparam(dev, useraddr);
33951 + case ETHTOOL_SPAUSEPARAM:
33952 + return ethtool_set_pauseparam(dev, useraddr);
33953 + case ETHTOOL_GRXCSUM:
33954 + return ethtool_get_rx_csum(dev, useraddr);
33955 + case ETHTOOL_SRXCSUM:
33956 + return ethtool_set_rx_csum(dev, useraddr);
33957 + case ETHTOOL_GTXCSUM:
33958 + return ethtool_get_tx_csum(dev, useraddr);
33959 + case ETHTOOL_STXCSUM:
33960 + return ethtool_set_tx_csum(dev, useraddr);
33961 + case ETHTOOL_GSG:
33962 + return ethtool_get_sg(dev, useraddr);
33963 + case ETHTOOL_SSG:
33964 + return ethtool_set_sg(dev, useraddr);
33965 + case ETHTOOL_GTSO:
33966 + return ethtool_get_tso(dev, useraddr);
33967 + case ETHTOOL_STSO:
33968 + return ethtool_set_tso(dev, useraddr);
33969 + case ETHTOOL_TEST:
33970 + return ethtool_self_test(dev, useraddr);
33971 + case ETHTOOL_GSTRINGS:
33972 + return ethtool_get_strings(dev, useraddr);
33973 + case ETHTOOL_PHYS_ID:
33974 + return ethtool_phys_id(dev, useraddr);
33975 + case ETHTOOL_GSTATS:
33976 + return ethtool_get_stats(dev, useraddr);
33978 + return -EOPNOTSUPP;
33981 + return -EOPNOTSUPP;
33984 +#define mii_if_info _kc_mii_if_info
33985 +struct _kc_mii_if_info {
33989 + int reg_num_mask;
33991 + unsigned int full_duplex : 1; /* is full duplex? */
33992 + unsigned int force_media : 1; /* is autoneg. disabled? */
33994 + struct net_device *dev;
33995 + int (*mdio_read) (struct net_device *dev, int phy_id, int location);
33996 + void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
33999 +struct ethtool_cmd;
34000 +struct mii_ioctl_data;
34002 +#undef mii_link_ok
34003 +#define mii_link_ok _kc_mii_link_ok
34004 +#undef mii_nway_restart
34005 +#define mii_nway_restart _kc_mii_nway_restart
34006 +#undef mii_ethtool_gset
34007 +#define mii_ethtool_gset _kc_mii_ethtool_gset
34008 +#undef mii_ethtool_sset
34009 +#define mii_ethtool_sset _kc_mii_ethtool_sset
34010 +#undef mii_check_link
34011 +#define mii_check_link _kc_mii_check_link
34012 +extern int _kc_mii_link_ok (struct mii_if_info *mii);
34013 +extern int _kc_mii_nway_restart (struct mii_if_info *mii);
34014 +extern int _kc_mii_ethtool_gset(struct mii_if_info *mii,
34015 + struct ethtool_cmd *ecmd);
34016 +extern int _kc_mii_ethtool_sset(struct mii_if_info *mii,
34017 + struct ethtool_cmd *ecmd);
34018 +extern void _kc_mii_check_link (struct mii_if_info *mii);
34019 +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) )
34020 +#undef generic_mii_ioctl
34021 +#define generic_mii_ioctl _kc_generic_mii_ioctl
34022 +extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
34023 + struct mii_ioctl_data *mii_data, int cmd,
34024 + unsigned int *duplex_changed);
34025 +#endif /* > 2.4.6 */
34028 +struct _kc_pci_dev_ext {
34029 + struct pci_dev *dev;
34030 + void *pci_drvdata;
34031 + struct pci_driver *driver;
34034 +struct _kc_net_dev_ext {
34035 + struct net_device *dev;
34036 + unsigned int carrier;
34040 +/**************************************/
34043 +int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
34045 + struct net_device *dev = mii->dev;
34046 + u32 advert, bmcr, lpa, nego;
34048 + ecmd->supported =
34049 + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
34050 + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
34051 + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
34053 + /* only supports twisted-pair */
34054 + ecmd->port = PORT_MII;
34056 + /* only supports internal transceiver */
34057 + ecmd->transceiver = XCVR_INTERNAL;
34059 + /* this isn't fully supported at higher layers */
34060 + ecmd->phy_address = mii->phy_id;
34062 + ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
34063 + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
34064 + if (advert & ADVERTISE_10HALF)
34065 + ecmd->advertising |= ADVERTISED_10baseT_Half;
34066 + if (advert & ADVERTISE_10FULL)
34067 + ecmd->advertising |= ADVERTISED_10baseT_Full;
34068 + if (advert & ADVERTISE_100HALF)
34069 + ecmd->advertising |= ADVERTISED_100baseT_Half;
34070 + if (advert & ADVERTISE_100FULL)
34071 + ecmd->advertising |= ADVERTISED_100baseT_Full;
34073 + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
34074 + lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
34075 + if (bmcr & BMCR_ANENABLE) {
34076 + ecmd->advertising |= ADVERTISED_Autoneg;
34077 + ecmd->autoneg = AUTONEG_ENABLE;
34079 + nego = mii_nway_result(advert & lpa);
34080 + if (nego == LPA_100FULL || nego == LPA_100HALF)
34081 + ecmd->speed = SPEED_100;
34083 + ecmd->speed = SPEED_10;
34084 + if (nego == LPA_100FULL || nego == LPA_10FULL) {
34085 + ecmd->duplex = DUPLEX_FULL;
34086 + mii->full_duplex = 1;
34088 + ecmd->duplex = DUPLEX_HALF;
34089 + mii->full_duplex = 0;
34092 + ecmd->autoneg = AUTONEG_DISABLE;
34094 + ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
34095 + ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
34098 + /* ignore maxtxpkt, maxrxpkt for now */
34103 +int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
34105 + struct net_device *dev = mii->dev;
34107 + if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
34109 + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
34111 + if (ecmd->port != PORT_MII)
34113 + if (ecmd->transceiver != XCVR_INTERNAL)
34115 + if (ecmd->phy_address != mii->phy_id)
34117 + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
34120 + /* ignore supported, maxtxpkt, maxrxpkt */
34122 + if (ecmd->autoneg == AUTONEG_ENABLE) {
34123 + u32 bmcr, advert, tmp;
34125 + if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
34126 + ADVERTISED_10baseT_Full |
34127 + ADVERTISED_100baseT_Half |
34128 + ADVERTISED_100baseT_Full)) == 0)
34131 + /* advertise only what has been requested */
34132 + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
34133 + tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
34134 + if (ADVERTISED_10baseT_Half)
34135 + tmp |= ADVERTISE_10HALF;
34136 + if (ADVERTISED_10baseT_Full)
34137 + tmp |= ADVERTISE_10FULL;
34138 + if (ADVERTISED_100baseT_Half)
34139 + tmp |= ADVERTISE_100HALF;
34140 + if (ADVERTISED_100baseT_Full)
34141 + tmp |= ADVERTISE_100FULL;
34142 + if (advert != tmp) {
34143 + mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
34144 + mii->advertising = tmp;
34147 + /* turn on autonegotiation, and force a renegotiate */
34148 + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
34149 + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
34150 + mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
34152 + mii->force_media = 0;
34156 + /* turn off auto negotiation, set speed and duplexity */
34157 + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
34158 + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
34159 + if (ecmd->speed == SPEED_100)
34160 + tmp |= BMCR_SPEED100;
34161 + if (ecmd->duplex == DUPLEX_FULL) {
34162 + tmp |= BMCR_FULLDPLX;
34163 + mii->full_duplex = 1;
34165 + mii->full_duplex = 0;
34167 + mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
34169 + mii->force_media = 1;
34174 +int _kc_mii_link_ok (struct mii_if_info *mii)
34176 + /* first, a dummy read, needed to latch some MII phys */
34177 + mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
34178 + if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
34183 +int _kc_mii_nway_restart (struct mii_if_info *mii)
34188 + /* if autoneg is off, it's an error */
34189 + bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
34191 + if (bmcr & BMCR_ANENABLE) {
34192 + bmcr |= BMCR_ANRESTART;
34193 + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
34200 +void _kc_mii_check_link (struct mii_if_info *mii)
34202 + int cur_link = mii_link_ok(mii);
34203 + int prev_link = netif_carrier_ok(mii->dev);
34205 + if (cur_link && !prev_link)
34206 + netif_carrier_on(mii->dev);
34207 + else if (prev_link && !cur_link)
34208 + netif_carrier_off(mii->dev);
34211 +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) )
34212 +int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
34213 + struct mii_ioctl_data *mii_data, int cmd,
34214 + unsigned int *duplex_chg_out)
34217 + unsigned int duplex_changed = 0;
34219 + if (duplex_chg_out)
34220 + *duplex_chg_out = 0;
34222 + mii_data->phy_id &= mii_if->phy_id_mask;
34223 + mii_data->reg_num &= mii_if->reg_num_mask;
34226 + case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */
34227 + case SIOCGMIIPHY:
34228 + mii_data->phy_id = mii_if->phy_id;
34229 + /* fall through */
34231 + case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */
34232 + case SIOCGMIIREG:
34233 + mii_data->val_out =
34234 + mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
34235 + mii_data->reg_num);
34238 + case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */
34239 + case SIOCSMIIREG: {
34240 + u16 val = mii_data->val_in;
34242 + if (!capable(CAP_NET_ADMIN))
34245 + if (mii_data->phy_id == mii_if->phy_id) {
34246 + switch(mii_data->reg_num) {
34248 + unsigned int new_duplex = 0;
34249 + if (val & (BMCR_RESET|BMCR_ANENABLE))
34250 + mii_if->force_media = 0;
34252 + mii_if->force_media = 1;
34253 + if (mii_if->force_media &&
34254 + (val & BMCR_FULLDPLX))
34256 + if (mii_if->full_duplex != new_duplex) {
34257 + duplex_changed = 1;
34258 + mii_if->full_duplex = new_duplex;
34262 + case MII_ADVERTISE:
34263 + mii_if->advertising = val;
34271 + mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
34272 + mii_data->reg_num, val);
34277 + rc = -EOPNOTSUPP;
34281 + if ((rc == 0) && (duplex_chg_out) && (duplex_changed))
34282 + *duplex_chg_out = 1;
34286 +#endif /* > 2.4.6 */
34288 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/kcompat.h linux-2.6.22-50/drivers/net/ixgbe/kcompat.h
34289 --- linux-2.6.22-40/drivers/net/ixgbe/kcompat.h 1969-12-31 19:00:00.000000000 -0500
34290 +++ linux-2.6.22-50/drivers/net/ixgbe/kcompat.h 2010-08-25 17:56:26.000000000 -0400
34292 +/*******************************************************************************
34294 + Intel 10 Gigabit PCI Express Linux driver
34295 + Copyright(c) 1999 - 2010 Intel Corporation.
34297 + This program is free software; you can redistribute it and/or modify it
34298 + under the terms and conditions of the GNU General Public License,
34299 + version 2, as published by the Free Software Foundation.
34301 + This program is distributed in the hope it will be useful, but WITHOUT
34302 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
34303 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
34306 + You should have received a copy of the GNU General Public License along with
34307 + this program; if not, write to the Free Software Foundation, Inc.,
34308 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
34310 + The full GNU General Public License is included in this distribution in
34311 + the file called "COPYING".
34313 + Contact Information:
34314 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
34315 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
34317 +*******************************************************************************/
34319 +#ifndef _KCOMPAT_H_
34320 +#define _KCOMPAT_H_
34322 +#ifndef LINUX_VERSION_CODE
34323 +#include <linux/version.h>
34325 +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
34327 +#include <linux/init.h>
34328 +#include <linux/types.h>
34329 +#include <linux/errno.h>
34330 +#include <linux/module.h>
34331 +#include <linux/pci.h>
34332 +#include <linux/netdevice.h>
34333 +#include <linux/etherdevice.h>
34334 +#include <linux/skbuff.h>
34335 +#include <linux/ioport.h>
34336 +#include <linux/slab.h>
34337 +#include <linux/list.h>
34338 +#include <linux/delay.h>
34339 +#include <linux/sched.h>
34340 +#include <linux/in.h>
34341 +#include <linux/ip.h>
34342 +#include <linux/udp.h>
34343 +#include <linux/mii.h>
34344 +#include <asm/io.h>
34346 +/* NAPI enable/disable flags here */
34347 +/* enable NAPI for ixgbe by default */
34348 +#undef CONFIG_IXGBE_NAPI
34349 +#define CONFIG_IXGBE_NAPI
34351 +#ifdef CONFIG_IXGBE_NAPI
34354 +#endif /* CONFIG_IXGBE_NAPI */
34358 +#endif /* IXGBE_NAPI */
34359 +#ifdef IXGBE_NO_NAPI
34361 +#endif /* IXGBE_NO_NAPI */
34363 +#define adapter_struct ixgbe_adapter
34364 +#define adapter_q_vector ixgbe_q_vector
34366 +/* and finally set defines so that the code sees the changes */
34368 +#ifndef CONFIG_IXGBE_NAPI
34369 +#define CONFIG_IXGBE_NAPI
34372 +#undef CONFIG_IXGBE_NAPI
34375 +/* MSI compatibility code for all kernels and drivers */
34376 +#ifdef DISABLE_PCI_MSI
34377 +#undef CONFIG_PCI_MSI
34379 +#ifndef CONFIG_PCI_MSI
34380 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
34381 +struct msix_entry {
34382 + u16 vector; /* kernel uses to write allocated vector */
34383 + u16 entry; /* driver uses to specify entry, OS writes */
34386 +#undef pci_enable_msi
34387 +#define pci_enable_msi(a) -ENOTSUPP
34388 +#undef pci_disable_msi
34389 +#define pci_disable_msi(a) do {} while (0)
34390 +#undef pci_enable_msix
34391 +#define pci_enable_msix(a, b, c) -ENOTSUPP
34392 +#undef pci_disable_msix
34393 +#define pci_disable_msix(a) do {} while (0)
34394 +#define msi_remove_pci_irq_vectors(a) do {} while (0)
34395 +#endif /* CONFIG_PCI_MSI */
34400 +#ifdef DISABLE_NET_POLL_CONTROLLER
34401 +#undef CONFIG_NET_POLL_CONTROLLER
34404 +#ifndef PMSG_SUSPEND
34405 +#define PMSG_SUSPEND 3
34408 +/* generic boolean compatibility */
34412 +#define FALSE false
34413 +#ifdef GCC_VERSION
34414 +#if ( GCC_VERSION < 3000 )
34415 +#define _Bool char
34418 +#define _Bool char
34421 +#define bool _Bool
34427 +/* kernels less than 2.4.14 don't have this */
34428 +#ifndef ETH_P_8021Q
34429 +#define ETH_P_8021Q 0x8100
34432 +#ifndef module_param
34433 +#define module_param(v,t,p) MODULE_PARM(v, "i");
34436 +#ifndef DMA_64BIT_MASK
34437 +#define DMA_64BIT_MASK 0xffffffffffffffffULL
34440 +#ifndef DMA_32BIT_MASK
34441 +#define DMA_32BIT_MASK 0x00000000ffffffffULL
34444 +#ifndef PCI_CAP_ID_EXP
34445 +#define PCI_CAP_ID_EXP 0x10
34448 +#ifndef PCIE_LINK_STATE_L0S
34449 +#define PCIE_LINK_STATE_L0S 1
34451 +#ifndef PCIE_LINK_STATE_L1
34452 +#define PCIE_LINK_STATE_L1 2
34456 +#ifdef CONFIG_IA64
34457 +#define mmiowb() asm volatile ("mf.a" ::: "memory")
34463 +#ifndef SET_NETDEV_DEV
34464 +#define SET_NETDEV_DEV(net, pdev)
34467 +#ifndef HAVE_FREE_NETDEV
34468 +#define free_netdev(x) kfree(x)
34471 +#ifdef HAVE_POLL_CONTROLLER
34472 +#define CONFIG_NET_POLL_CONTROLLER
34475 +#ifndef NETDEV_TX_OK
34476 +#define NETDEV_TX_OK 0
34479 +#ifndef NETDEV_TX_BUSY
34480 +#define NETDEV_TX_BUSY 1
34483 +#ifndef NETDEV_TX_LOCKED
34484 +#define NETDEV_TX_LOCKED -1
34487 +#ifdef CONFIG_PCI_IOV
34488 +#define VMDQ_P(p) ((p) + adapter->num_vfs)
34490 +#define VMDQ_P(p) (p)
34493 +#ifndef SKB_DATAREF_SHIFT
34494 +/* if we do not have the infrastructure to detect if skb_header is cloned
34495 + just return false in all cases */
34496 +#define skb_header_cloned(x) 0
34499 +#ifndef NETIF_F_GSO
34500 +#define gso_size tso_size
34501 +#define gso_segs tso_segs
34504 +#ifndef NETIF_F_GRO
34505 +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
34506 + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
34507 +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
34510 +#ifndef NETIF_F_SCTP_CSUM
34511 +#define NETIF_F_SCTP_CSUM 0
34514 +#ifndef NETIF_F_LRO
34515 +#define NETIF_F_LRO (1 << 15)
34518 +#ifndef ETH_FLAG_LRO
34519 +#define ETH_FLAG_LRO (1 << 15)
34522 +#ifndef ETH_FLAG_NTUPLE
34523 +#define ETH_FLAG_NTUPLE 0
34526 +#ifndef IPPROTO_SCTP
34527 +#define IPPROTO_SCTP 132
34530 +#ifndef CHECKSUM_PARTIAL
34531 +#define CHECKSUM_PARTIAL CHECKSUM_HW
34532 +#define CHECKSUM_COMPLETE CHECKSUM_HW
34535 +#ifndef __read_mostly
34536 +#define __read_mostly
34539 +#ifndef HAVE_NETIF_MSG
34540 +#define HAVE_NETIF_MSG 1
34542 + NETIF_MSG_DRV = 0x0001,
34543 + NETIF_MSG_PROBE = 0x0002,
34544 + NETIF_MSG_LINK = 0x0004,
34545 + NETIF_MSG_TIMER = 0x0008,
34546 + NETIF_MSG_IFDOWN = 0x0010,
34547 + NETIF_MSG_IFUP = 0x0020,
34548 + NETIF_MSG_RX_ERR = 0x0040,
34549 + NETIF_MSG_TX_ERR = 0x0080,
34550 + NETIF_MSG_TX_QUEUED = 0x0100,
34551 + NETIF_MSG_INTR = 0x0200,
34552 + NETIF_MSG_TX_DONE = 0x0400,
34553 + NETIF_MSG_RX_STATUS = 0x0800,
34554 + NETIF_MSG_PKTDATA = 0x1000,
34555 + NETIF_MSG_HW = 0x2000,
34556 + NETIF_MSG_WOL = 0x4000,
34559 +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
34560 +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
34561 +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
34562 +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
34563 +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
34564 +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
34565 +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
34566 +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
34567 +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
34568 +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
34569 +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
34570 +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
34571 +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
34572 +#else /* HAVE_NETIF_MSG */
34573 +#define NETIF_MSG_HW 0x2000
34574 +#define NETIF_MSG_WOL 0x4000
34575 +#endif /* HAVE_NETIF_MSG */
34576 +#ifndef netif_msg_hw
34577 +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
34579 +#ifndef netif_msg_wol
34580 +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
34584 +#define MII_RESV1 0x17 /* Reserved... */
34588 +#define unlikely(_x) _x
34589 +#define likely(_x) _x
34593 +#define WARN_ON(x)
34596 +#ifndef PCI_DEVICE
34597 +#define PCI_DEVICE(vend,dev) \
34598 + .vendor = (vend), .device = (dev), \
34599 + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
34602 +#ifndef node_online
34603 +#define node_online(node) ((node) == 0)
34606 +#ifndef num_online_cpus
34607 +#define num_online_cpus() smp_num_cpus
34610 +#ifndef numa_node_id
34611 +#define numa_node_id() 0
34615 +#ifndef _LINUX_RANDOM_H
34616 +#include <linux/random.h>
34619 +#ifndef DECLARE_BITMAP
34620 +#ifndef BITS_TO_LONGS
34621 +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
34623 +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
34627 +#define VLAN_HLEN 4
34630 +#ifndef VLAN_ETH_HLEN
34631 +#define VLAN_ETH_HLEN 18
34634 +#ifndef VLAN_ETH_FRAME_LEN
34635 +#define VLAN_ETH_FRAME_LEN 1518
34638 +#if !defined(IXGBE_DCA) && !defined(IGB_DCA)
34639 +#define dca_get_tag(b) 0
34640 +#define dca_add_requester(a) -1
34641 +#define dca_remove_requester(b) do { } while(0)
34642 +#define DCA_PROVIDER_ADD 0x0001
34643 +#define DCA_PROVIDER_REMOVE 0x0002
34646 +#ifndef DCA_GET_TAG_TWO_ARGS
34647 +#define dca3_get_tag(a,b) dca_get_tag(b)
34650 +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
34651 +#if defined(__i386__) || defined(__x86_64__)
34652 +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
34656 +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
34657 +#ifdef NET_IP_ALIGN
34658 +#undef NET_IP_ALIGN
34660 +#ifdef NET_SKB_PAD
34661 +#undef NET_SKB_PAD
34663 +#ifdef netdev_alloc_skb_ip_align
34664 +#undef netdev_alloc_skb_ip_align
34666 +extern struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
34667 + unsigned int length);
34668 +#define NET_IP_ALIGN 0
34669 +#define NET_SKB_PAD L1_CACHE_BYTES
34670 +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
34671 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
34673 +/* taken from 2.6.24 definition in linux/kernel.h */
34674 +#ifndef IS_ALIGNED
34675 +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
34678 +/*****************************************************************************/
34679 +/* Installations with ethtool version without eeprom, adapter id, or statistics
34682 +#ifndef ETH_GSTRING_LEN
34683 +#define ETH_GSTRING_LEN 32
34686 +#ifndef ETHTOOL_GSTATS
34687 +#define ETHTOOL_GSTATS 0x1d
34688 +#undef ethtool_drvinfo
34689 +#define ethtool_drvinfo k_ethtool_drvinfo
34690 +struct k_ethtool_drvinfo {
34693 + char version[32];
34694 + char fw_version[32];
34695 + char bus_info[32];
34696 + char reserved1[32];
34697 + char reserved2[16];
34699 + u32 testinfo_len;
34704 +struct ethtool_stats {
34709 +#endif /* ETHTOOL_GSTATS */
34711 +#ifndef ETHTOOL_PHYS_ID
34712 +#define ETHTOOL_PHYS_ID 0x1c
34713 +#endif /* ETHTOOL_PHYS_ID */
34715 +#ifndef ETHTOOL_GSTRINGS
34716 +#define ETHTOOL_GSTRINGS 0x1b
34717 +enum ethtool_stringset {
34721 +struct ethtool_gstrings {
34722 + u32 cmd; /* ETHTOOL_GSTRINGS */
34723 + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
34724 + u32 len; /* number of strings in the string set */
34727 +#endif /* ETHTOOL_GSTRINGS */
34729 +#ifndef ETHTOOL_TEST
34730 +#define ETHTOOL_TEST 0x1a
34731 +enum ethtool_test_flags {
34732 + ETH_TEST_FL_OFFLINE = (1 << 0),
34733 + ETH_TEST_FL_FAILED = (1 << 1),
34735 +struct ethtool_test {
34742 +#endif /* ETHTOOL_TEST */
34744 +#ifndef ETHTOOL_GEEPROM
34745 +#define ETHTOOL_GEEPROM 0xb
34746 +#undef ETHTOOL_GREGS
34747 +struct ethtool_eeprom {
34755 +struct ethtool_value {
34759 +#endif /* ETHTOOL_GEEPROM */
34761 +#ifndef ETHTOOL_GLINK
34762 +#define ETHTOOL_GLINK 0xa
34763 +#endif /* ETHTOOL_GLINK */
34765 +#ifndef ETHTOOL_GWOL
34766 +#define ETHTOOL_GWOL 0x5
34767 +#define ETHTOOL_SWOL 0x6
34768 +#define SOPASS_MAX 6
34769 +struct ethtool_wolinfo {
34773 + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
34775 +#endif /* ETHTOOL_GWOL */
34777 +#ifndef ETHTOOL_GREGS
34778 +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
34779 +#define ethtool_regs _kc_ethtool_regs
34780 +/* for passing big chunks of data */
34781 +struct _kc_ethtool_regs {
34783 + u32 version; /* driver-specific, indicates different chips/revs */
34784 + u32 len; /* bytes */
34787 +#endif /* ETHTOOL_GREGS */
34789 +#ifndef ETHTOOL_GMSGLVL
34790 +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
34792 +#ifndef ETHTOOL_SMSGLVL
34793 +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
34795 +#ifndef ETHTOOL_NWAY_RST
34796 +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
34798 +#ifndef ETHTOOL_GLINK
34799 +#define ETHTOOL_GLINK 0x0000000a /* Get link status */
34801 +#ifndef ETHTOOL_GEEPROM
34802 +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
34804 +#ifndef ETHTOOL_SEEPROM
34805 +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
34807 +#ifndef ETHTOOL_GCOALESCE
34808 +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
34809 +/* for configuring coalescing parameters of chip */
34810 +#define ethtool_coalesce _kc_ethtool_coalesce
34811 +struct _kc_ethtool_coalesce {
34812 + u32 cmd; /* ETHTOOL_{G,S}COALESCE */
34814 + /* How many usecs to delay an RX interrupt after
34815 + * a packet arrives. If 0, only rx_max_coalesced_frames
34818 + u32 rx_coalesce_usecs;
34820 + /* How many packets to delay an RX interrupt after
34821 + * a packet arrives. If 0, only rx_coalesce_usecs is
34822 + * used. It is illegal to set both usecs and max frames
34823 + * to zero as this would cause RX interrupts to never be
34826 + u32 rx_max_coalesced_frames;
34828 + /* Same as above two parameters, except that these values
34829 + * apply while an IRQ is being serviced by the host. Not
34830 + * all cards support this feature and the values are ignored
34833 + u32 rx_coalesce_usecs_irq;
34834 + u32 rx_max_coalesced_frames_irq;
34836 + /* How many usecs to delay a TX interrupt after
34837 + * a packet is sent. If 0, only tx_max_coalesced_frames
34840 + u32 tx_coalesce_usecs;
34842 + /* How many packets to delay a TX interrupt after
34843 + * a packet is sent. If 0, only tx_coalesce_usecs is
34844 + * used. It is illegal to set both usecs and max frames
34845 + * to zero as this would cause TX interrupts to never be
34848 + u32 tx_max_coalesced_frames;
34850 + /* Same as above two parameters, except that these values
34851 + * apply while an IRQ is being serviced by the host. Not
34852 + * all cards support this feature and the values are ignored
34855 + u32 tx_coalesce_usecs_irq;
34856 + u32 tx_max_coalesced_frames_irq;
34858 + /* How many usecs to delay in-memory statistics
34859 + * block updates. Some drivers do not have an in-memory
34860 + * statistic block, and in such cases this value is ignored.
34861 + * This value must not be zero.
34863 + u32 stats_block_coalesce_usecs;
34865 + /* Adaptive RX/TX coalescing is an algorithm implemented by
34866 + * some drivers to improve latency under low packet rates and
34867 + * improve throughput under high packet rates. Some drivers
34868 + * only implement one of RX or TX adaptive coalescing. Anything
34869 + * not implemented by the driver causes these values to be
34870 + * silently ignored.
34872 + u32 use_adaptive_rx_coalesce;
34873 + u32 use_adaptive_tx_coalesce;
34875 + /* When the packet rate (measured in packets per second)
34876 + * is below pkt_rate_low, the {rx,tx}_*_low parameters are
34879 + u32 pkt_rate_low;
34880 + u32 rx_coalesce_usecs_low;
34881 + u32 rx_max_coalesced_frames_low;
34882 + u32 tx_coalesce_usecs_low;
34883 + u32 tx_max_coalesced_frames_low;
34885 + /* When the packet rate is below pkt_rate_high but above
34886 + * pkt_rate_low (both measured in packets per second) the
34887 + * normal {rx,tx}_* coalescing parameters are used.
34890 + /* When the packet rate is (measured in packets per second)
34891 + * is above pkt_rate_high, the {rx,tx}_*_high parameters are
34894 + u32 pkt_rate_high;
34895 + u32 rx_coalesce_usecs_high;
34896 + u32 rx_max_coalesced_frames_high;
34897 + u32 tx_coalesce_usecs_high;
34898 + u32 tx_max_coalesced_frames_high;
34900 + /* How often to do adaptive coalescing packet rate sampling,
34901 + * measured in seconds. Must not be zero.
34903 + u32 rate_sample_interval;
34905 +#endif /* ETHTOOL_GCOALESCE */
34907 +#ifndef ETHTOOL_SCOALESCE
34908 +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
34910 +#ifndef ETHTOOL_GRINGPARAM
34911 +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
34912 +/* for configuring RX/TX ring parameters */
34913 +#define ethtool_ringparam _kc_ethtool_ringparam
34914 +struct _kc_ethtool_ringparam {
34915 + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
34917 + /* Read only attributes. These indicate the maximum number
34918 + * of pending RX/TX ring entries the driver will allow the
34921 + u32 rx_max_pending;
34922 + u32 rx_mini_max_pending;
34923 + u32 rx_jumbo_max_pending;
34924 + u32 tx_max_pending;
34926 + /* Values changeable by the user. The valid values are
34927 + * in the range 1 to the "*_max_pending" counterpart above.
34930 + u32 rx_mini_pending;
34931 + u32 rx_jumbo_pending;
34934 +#endif /* ETHTOOL_GRINGPARAM */
34936 +#ifndef ETHTOOL_SRINGPARAM
34937 +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
34939 +#ifndef ETHTOOL_GPAUSEPARAM
34940 +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
34941 +/* for configuring link flow control parameters */
34942 +#define ethtool_pauseparam _kc_ethtool_pauseparam
34943 +struct _kc_ethtool_pauseparam {
34944 + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
34946 + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
34947 + * being true) the user may set 'autoneg' here non-zero to have the
34948 + * pause parameters be auto-negotiated too. In such a case, the
34949 + * {rx,tx}_pause values below determine what capabilities are
34952 + * If 'autoneg' is zero or the link is not being auto-negotiated,
34953 + * then {rx,tx}_pause force the driver to use/not-use pause
34960 +#endif /* ETHTOOL_GPAUSEPARAM */
34962 +#ifndef ETHTOOL_SPAUSEPARAM
34963 +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
34965 +#ifndef ETHTOOL_GRXCSUM
34966 +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
34968 +#ifndef ETHTOOL_SRXCSUM
34969 +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
34971 +#ifndef ETHTOOL_GTXCSUM
34972 +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
34974 +#ifndef ETHTOOL_STXCSUM
34975 +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
34977 +#ifndef ETHTOOL_GSG
34978 +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
34979 + * (ethtool_value) */
34981 +#ifndef ETHTOOL_SSG
34982 +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
34983 + * (ethtool_value). */
34985 +#ifndef ETHTOOL_TEST
34986 +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
34988 +#ifndef ETHTOOL_GSTRINGS
34989 +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
34991 +#ifndef ETHTOOL_PHYS_ID
34992 +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
34994 +#ifndef ETHTOOL_GSTATS
34995 +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
34997 +#ifndef ETHTOOL_GTSO
34998 +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
35000 +#ifndef ETHTOOL_STSO
35001 +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
35004 +#ifndef ETHTOOL_BUSINFO_LEN
35005 +#define ETHTOOL_BUSINFO_LEN 32
35008 +#ifndef RHEL_RELEASE_CODE
35009 +#define RHEL_RELEASE_CODE 0
35011 +#ifndef RHEL_RELEASE_VERSION
35012 +#define RHEL_RELEASE_VERSION(a,b) 0
35014 +#ifndef AX_RELEASE_CODE
35015 +#define AX_RELEASE_CODE 0
35017 +#ifndef AX_RELEASE_VERSION
35018 +#define AX_RELEASE_VERSION(a,b) 0
35021 +/*****************************************************************************/
35022 +/* 2.4.3 => 2.4.0 */
35023 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
35025 +/**************************************/
35026 +/* PCI DRIVER API */
35028 +#ifndef pci_set_dma_mask
35029 +#define pci_set_dma_mask _kc_pci_set_dma_mask
35030 +extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
35033 +#ifndef pci_request_regions
35034 +#define pci_request_regions _kc_pci_request_regions
35035 +extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
35038 +#ifndef pci_release_regions
35039 +#define pci_release_regions _kc_pci_release_regions
35040 +extern void _kc_pci_release_regions(struct pci_dev *pdev);
35043 +/**************************************/
35044 +/* NETWORK DRIVER API */
35046 +#ifndef alloc_etherdev
35047 +#define alloc_etherdev _kc_alloc_etherdev
35048 +extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
35051 +#ifndef is_valid_ether_addr
35052 +#define is_valid_ether_addr _kc_is_valid_ether_addr
35053 +extern int _kc_is_valid_ether_addr(u8 *addr);
35056 +/**************************************/
35057 +/* MISCELLANEOUS */
35059 +#ifndef INIT_TQUEUE
35060 +#define INIT_TQUEUE(_tq, _routine, _data) \
35062 + INIT_LIST_HEAD(&(_tq)->list); \
35063 + (_tq)->sync = 0; \
35064 + (_tq)->routine = _routine; \
35065 + (_tq)->data = _data; \
35069 +#endif /* 2.4.3 => 2.4.0 */
35071 +/*****************************************************************************/
35072 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
35073 +/* Generic MII registers. */
35074 +#define MII_BMCR 0x00 /* Basic mode control register */
35075 +#define MII_BMSR 0x01 /* Basic mode status register */
35076 +#define MII_PHYSID1 0x02 /* PHYS ID 1 */
35077 +#define MII_PHYSID2 0x03 /* PHYS ID 2 */
35078 +#define MII_ADVERTISE 0x04 /* Advertisement control reg */
35079 +#define MII_LPA 0x05 /* Link partner ability reg */
35080 +#define MII_EXPANSION 0x06 /* Expansion register */
35081 +/* Basic mode control register. */
35082 +#define BMCR_FULLDPLX 0x0100 /* Full duplex */
35083 +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
35084 +/* Basic mode status register. */
35085 +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
35086 +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
35087 +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
35088 +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
35089 +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
35090 +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
35091 +/* Advertisement control register. */
35092 +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
35093 +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
35094 +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
35095 +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
35096 +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
35097 +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
35098 + ADVERTISE_100HALF | ADVERTISE_100FULL)
35099 +/* Expansion register for auto-negotiation. */
35100 +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
35103 +/*****************************************************************************/
35104 +/* 2.4.6 => 2.4.3 */
35105 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
35107 +#ifndef pci_set_power_state
35108 +#define pci_set_power_state _kc_pci_set_power_state
35109 +extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
35112 +#ifndef pci_enable_wake
35113 +#define pci_enable_wake _kc_pci_enable_wake
35114 +extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
35117 +#ifndef pci_disable_device
35118 +#define pci_disable_device _kc_pci_disable_device
35119 +extern void _kc_pci_disable_device(struct pci_dev *pdev);
35122 +/* PCI PM entry point syntax changed, so don't support suspend/resume */
35125 +#endif /* 2.4.6 => 2.4.3 */
35127 +#ifndef HAVE_PCI_SET_MWI
35128 +#define pci_set_mwi(X) pci_write_config_word(X, \
35129 + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
35130 + PCI_COMMAND_INVALIDATE);
35131 +#define pci_clear_mwi(X) pci_write_config_word(X, \
35132 + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
35133 + ~PCI_COMMAND_INVALIDATE);
35136 +/*****************************************************************************/
35137 +/* 2.4.10 => 2.4.9 */
35138 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
35140 +/**************************************/
35143 +#ifndef MODULE_LICENSE
35144 + #define MODULE_LICENSE(X)
35147 +/**************************************/
35151 +#define min(x,y) ({ \
35152 + const typeof(x) _x = (x); \
35153 + const typeof(y) _y = (y); \
35154 + (void) (&_x == &_y); \
35155 + _x < _y ? _x : _y; })
35158 +#define max(x,y) ({ \
35159 + const typeof(x) _x = (x); \
35160 + const typeof(y) _y = (y); \
35161 + (void) (&_x == &_y); \
35162 + _x > _y ? _x : _y; })
35164 +#define min_t(type,x,y) ({ \
35167 + _x < _y ? _x : _y; })
35169 +#define max_t(type,x,y) ({ \
35172 + _x > _y ? _x : _y; })
35174 +#ifndef list_for_each_safe
35175 +#define list_for_each_safe(pos, n, head) \
35176 + for (pos = (head)->next, n = pos->next; pos != (head); \
35177 + pos = n, n = pos->next)
35180 +#ifndef ____cacheline_aligned_in_smp
35182 +#define ____cacheline_aligned_in_smp ____cacheline_aligned
35184 +#define ____cacheline_aligned_in_smp
35185 +#endif /* CONFIG_SMP */
35188 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
35189 +extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
35190 +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
35191 +extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
35192 +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
35193 +#else /* 2.4.8 => 2.4.9 */
35194 +extern int snprintf(char * buf, size_t size, const char *fmt, ...);
35195 +extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
35197 +#endif /* 2.4.10 -> 2.4.6 */
35200 +/*****************************************************************************/
35201 +/* 2.4.13 => 2.4.10 */
35202 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
35204 +/**************************************/
35205 +/* PCI DMA MAPPING */
35207 +#ifndef virt_to_page
35208 + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
35211 +#ifndef pci_map_page
35212 +#define pci_map_page _kc_pci_map_page
35213 +extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
35216 +#ifndef pci_unmap_page
35217 +#define pci_unmap_page _kc_pci_unmap_page
35218 +extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
35221 +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
35223 +#undef DMA_32BIT_MASK
35224 +#define DMA_32BIT_MASK 0xffffffff
35225 +#undef DMA_64BIT_MASK
35226 +#define DMA_64BIT_MASK 0xffffffff
35228 +/**************************************/
35232 +#define cpu_relax() rep_nop()
35235 +struct vlan_ethhdr {
35236 + unsigned char h_dest[ETH_ALEN];
35237 + unsigned char h_source[ETH_ALEN];
35238 + unsigned short h_vlan_proto;
35239 + unsigned short h_vlan_TCI;
35240 + unsigned short h_vlan_encapsulated_proto;
35242 +#endif /* 2.4.13 => 2.4.10 */
35244 +/*****************************************************************************/
35245 +/* 2.4.17 => 2.4.12 */
35246 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
35248 +#ifndef __devexit_p
35249 + #define __devexit_p(x) &(x)
35252 +#endif /* 2.4.17 => 2.4.13 */
35254 +/*****************************************************************************/
35255 +/* 2.4.20 => 2.4.19 */
35256 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
35258 +/* we won't support NAPI on less than 2.4.20 */
35261 +#undef CONFIG_IXGBE_NAPI
35264 +#endif /* 2.4.20 => 2.4.19 */
35266 +/*****************************************************************************/
35268 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) )
35269 +#define skb_pad(x,y) _kc_skb_pad(x, y)
35270 +struct sk_buff * _kc_skb_pad(struct sk_buff *skb, int pad);
35271 +#endif /* < 2.4.21 */
35273 +/*****************************************************************************/
35274 +/* 2.4.22 => 2.4.17 */
35275 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
35276 +#define pci_name(x) ((x)->slot_name)
35279 +/*****************************************************************************/
35280 +/* 2.4.22 => 2.4.17 */
35282 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
35283 +#ifndef IXGBE_NO_LRO
35284 +/* Don't enable LRO for these legacy kernels */
35285 +#define IXGBE_NO_LRO
35289 +/*****************************************************************************/
35290 +/*****************************************************************************/
35291 +/* 2.4.23 => 2.4.22 */
35292 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
35293 +/*****************************************************************************/
35295 +#ifndef netif_poll_disable
35296 +#define netif_poll_disable(x) _kc_netif_poll_disable(x)
35297 +static inline void _kc_netif_poll_disable(struct net_device *netdev)
35299 + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
35301 + current->state = TASK_INTERRUPTIBLE;
35302 + schedule_timeout(1);
35306 +#ifndef netif_poll_enable
35307 +#define netif_poll_enable(x) _kc_netif_poll_enable(x)
35308 +static inline void _kc_netif_poll_enable(struct net_device *netdev)
35310 + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
35314 +#ifndef netif_tx_disable
35315 +#define netif_tx_disable(x) _kc_netif_tx_disable(x)
35316 +static inline void _kc_netif_tx_disable(struct net_device *dev)
35318 + spin_lock_bh(&dev->xmit_lock);
35319 + netif_stop_queue(dev);
35320 + spin_unlock_bh(&dev->xmit_lock);
35323 +#endif /* 2.4.23 => 2.4.22 */
35325 +/*****************************************************************************/
35326 +/* 2.6.4 => 2.6.0 */
35327 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
35328 + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
35329 + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
35330 +#define ETHTOOL_OPS_COMPAT
35331 +#endif /* 2.6.4 => 2.6.0 */
35333 +/*****************************************************************************/
35334 +/* 2.5.71 => 2.4.x */
35335 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
35336 +#define sk_protocol protocol
35337 +#define pci_get_device pci_find_device
35338 +#endif /* 2.5.70 => 2.4.x */
35340 +/*****************************************************************************/
35341 +/* < 2.4.27 or 2.6.0 <= 2.6.5 */
35342 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
35343 + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
35344 + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
35346 +#ifndef netif_msg_init
35347 +#define netif_msg_init _kc_netif_msg_init
35348 +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
35350 + /* use default */
35351 + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
35352 + return default_msg_enable_bits;
35353 + if (debug_value == 0) /* no output */
35355 + /* set low N bits */
35356 + return (1 << debug_value) -1;
35360 +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
35361 +/*****************************************************************************/
35362 +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
35363 + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
35364 + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
35365 +#define netdev_priv(x) x->priv
35368 +/*****************************************************************************/
35370 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
35371 +#undef pci_register_driver
35372 +#define pci_register_driver pci_module_init
35375 + * Most of the dma compat code is copied/modifed from the 2.4.37
35376 + * /include/linux/libata-compat.h header file
35378 +/* These definitions mirror those in pci.h, so they can be used
35379 + * interchangeably with their PCI_ counterparts */
35380 +enum dma_data_direction {
35381 + DMA_BIDIRECTIONAL = 0,
35382 + DMA_TO_DEVICE = 1,
35383 + DMA_FROM_DEVICE = 2,
35388 + struct pci_dev pdev;
35391 +static inline struct pci_dev *to_pci_dev (struct device *dev)
35393 + return (struct pci_dev *) dev;
35395 +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
35397 + return (struct device *) pdev;
35400 +#define pdev_printk(lvl, pdev, fmt, args...) \
35401 + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
35402 +#define dev_err(dev, fmt, args...) \
35403 + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
35404 +#define dev_info(dev, fmt, args...) \
35405 + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
35406 +#define dev_warn(dev, fmt, args...) \
35407 + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
35409 +/* NOTE: dangerous! we ignore the 'gfp' argument */
35410 +#define dma_alloc_coherent(dev,sz,dma,gfp) \
35411 + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
35412 +#define dma_free_coherent(dev,sz,addr,dma_addr) \
35413 + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
35415 +#define dma_map_page(dev,a,b,c,d) \
35416 + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
35417 +#define dma_unmap_page(dev,a,b,c) \
35418 + pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
35420 +#define dma_map_single(dev,a,b,c) \
35421 + pci_map_single(to_pci_dev(dev),(a),(b),(c))
35422 +#define dma_unmap_single(dev,a,b,c) \
35423 + pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
35425 +#define dma_sync_single(dev,a,b,c) \
35426 + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
35428 +#define dma_set_mask(dev,mask) \
35429 + pci_set_dma_mask(to_pci_dev(dev),(mask))
35431 +/* hlist_* code - double linked lists */
35432 +struct hlist_head {
35433 + struct hlist_node *first;
35436 +struct hlist_node {
35437 + struct hlist_node *next, **pprev;
35440 +static inline void __hlist_del(struct hlist_node *n)
35442 + struct hlist_node *next = n->next;
35443 + struct hlist_node **pprev = n->pprev;
35446 + next->pprev = pprev;
35449 +static inline void hlist_del(struct hlist_node *n)
35456 +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
35458 + struct hlist_node *first = h->first;
35461 + first->pprev = &n->next;
35463 + n->pprev = &h->first;
35466 +static inline int hlist_empty(const struct hlist_head *h)
35468 + return !h->first;
35470 +#define HLIST_HEAD_INIT { .first = NULL }
35471 +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
35472 +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
35473 +static inline void INIT_HLIST_NODE(struct hlist_node *h)
35478 +#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
35480 +#define hlist_for_each_entry(tpos, pos, head, member) \
35481 + for (pos = (head)->first; \
35482 + pos && ({ prefetch(pos->next); 1;}) && \
35483 + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
35486 +#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
35487 + for (pos = (head)->first; \
35488 + pos && ({ n = pos->next; 1; }) && \
35489 + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
35492 +#ifndef might_sleep
35493 +#define might_sleep()
35496 +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
35498 + return &pdev->dev;
35500 +#endif /* <= 2.5.0 */
35502 +/*****************************************************************************/
35503 +/* 2.5.28 => 2.4.23 */
35504 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
35506 +static inline void _kc_synchronize_irq(void)
35508 + synchronize_irq();
35510 +#undef synchronize_irq
35511 +#define synchronize_irq(X) _kc_synchronize_irq()
35513 +#include <linux/tqueue.h>
35514 +#define work_struct tq_struct
35516 +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
35517 +#undef container_of
35518 +#define container_of list_entry
35519 +#define schedule_work schedule_task
35520 +#define flush_scheduled_work flush_scheduled_tasks
35521 +#define cancel_work_sync(x) flush_scheduled_work()
35523 +#endif /* 2.5.28 => 2.4.17 */
35525 +/*****************************************************************************/
35526 +/* 2.6.0 => 2.5.28 */
35527 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
35529 +#define get_cpu() smp_processor_id()
35531 +#define put_cpu() do { } while(0)
35532 +#define MODULE_INFO(version, _version)
35533 +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
35534 +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
35536 +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
35538 +#define dma_set_coherent_mask(dev,mask) 1
35541 +#define dev_put(dev) __dev_put(dev)
35543 +#ifndef skb_fill_page_desc
35544 +#define skb_fill_page_desc _kc_skb_fill_page_desc
35545 +extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
35549 +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
35551 +#ifndef page_count
35552 +#define page_count(p) atomic_read(&(p)->count)
35555 +#ifdef MAX_NUMNODES
35556 +#undef MAX_NUMNODES
35558 +#define MAX_NUMNODES 1
35560 +/* find_first_bit and find_next bit are not defined for most
35561 + * 2.4 kernels (except for the redhat 2.4.21 kernels
35563 +#include <linux/bitops.h>
35564 +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
35565 +#undef find_next_bit
35566 +#define find_next_bit _kc_find_next_bit
35567 +extern unsigned long _kc_find_next_bit(const unsigned long *addr,
35568 + unsigned long size,
35569 + unsigned long offset);
35570 +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
35573 +#ifndef netdev_name
35574 +static inline const char *_kc_netdev_name(const struct net_device *dev)
35576 + if (strchr(dev->name, '%'))
35577 + return "(unregistered net_device)";
35578 + return dev->name;
35580 +#define netdev_name(netdev) _kc_netdev_name(netdev)
35581 +#endif /* netdev_name */
35582 +#endif /* 2.6.0 => 2.5.28 */
35584 +/*****************************************************************************/
35585 +/* 2.6.4 => 2.6.0 */
35586 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
35587 +#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
35588 +#endif /* 2.6.4 => 2.6.0 */
35590 +/*****************************************************************************/
35591 +/* 2.6.5 => 2.6.0 */
35592 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
35593 +#define dma_sync_single_for_cpu dma_sync_single
35594 +#define dma_sync_single_for_device dma_sync_single
35595 +#ifndef pci_dma_mapping_error
35596 +#define pci_dma_mapping_error _kc_pci_dma_mapping_error
35597 +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
35599 + return dma_addr == 0;
35602 +#endif /* 2.6.5 => 2.6.0 */
35604 +/*****************************************************************************/
35605 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
35606 +extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
35607 +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
35608 +#endif /* < 2.6.4 */
35610 +/*****************************************************************************/
35611 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
35612 +/* taken from 2.6 include/linux/bitmap.h */
35613 +#undef bitmap_zero
35614 +#define bitmap_zero _kc_bitmap_zero
35615 +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
35617 + if (nbits <= BITS_PER_LONG)
35620 + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
35621 + memset(dst, 0, len);
35624 +#define random_ether_addr _kc_random_ether_addr
35625 +static inline void _kc_random_ether_addr(u8 *addr)
35627 + get_random_bytes(addr, ETH_ALEN);
35628 + addr[0] &= 0xfe; /* clear multicast */
35629 + addr[0] |= 0x02; /* set local assignment */
35631 +#define page_to_nid(x) 0
35633 +#endif /* < 2.6.6 */
35635 +/*****************************************************************************/
35636 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
35638 +#define if_mii _kc_if_mii
35639 +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
35641 + return (struct mii_ioctl_data *) &rq->ifr_ifru;
35643 +#endif /* < 2.6.7 */
35645 +/*****************************************************************************/
35646 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
35647 +#ifndef PCI_EXP_DEVCTL
35648 +#define PCI_EXP_DEVCTL 8
35650 +#ifndef PCI_EXP_DEVCTL_CERE
35651 +#define PCI_EXP_DEVCTL_CERE 0x0001
35653 +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
35654 + schedule_timeout((x * HZ)/1000 + 2); \
35657 +#endif /* < 2.6.8 */
35659 +/*****************************************************************************/
35660 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
35661 +#include <net/dsfield.h>
35665 +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
35666 +extern void *_kc_kzalloc(size_t size, int flags);
35668 +#define MSEC_PER_SEC 1000L
35669 +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
35671 +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
35672 + return (MSEC_PER_SEC / HZ) * j;
35673 +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
35674 + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
35676 + return (j * MSEC_PER_SEC) / HZ;
35679 +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
35681 + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
35682 + return MAX_JIFFY_OFFSET;
35683 +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
35684 + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
35685 +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
35686 + return m * (HZ / MSEC_PER_SEC);
35688 + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
35692 +#define msleep_interruptible _kc_msleep_interruptible
35693 +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
35695 + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
35697 + while (timeout && !signal_pending(current)) {
35698 + __set_current_state(TASK_INTERRUPTIBLE);
35699 + timeout = schedule_timeout(timeout);
35701 + return _kc_jiffies_to_msecs(timeout);
35704 +/* Basic mode control register. */
35705 +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
35708 +#define __le16 u16
35711 +#define __le32 u32
35714 +#define __le64 u64
35717 +#define __be16 u16
35720 +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
35722 + return (struct vlan_ethhdr *)skb->mac.raw;
35725 +/* Wake-On-Lan options. */
35726 +#define WAKE_PHY (1 << 0)
35727 +#define WAKE_UCAST (1 << 1)
35728 +#define WAKE_MCAST (1 << 2)
35729 +#define WAKE_BCAST (1 << 3)
35730 +#define WAKE_ARP (1 << 4)
35731 +#define WAKE_MAGIC (1 << 5)
35732 +#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
35734 +#endif /* < 2.6.9 */
35736 +/*****************************************************************************/
35737 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
35738 +#ifdef module_param_array_named
35739 +#undef module_param_array_named
35740 +#define module_param_array_named(name, array, type, nump, perm) \
35741 + static struct kparam_array __param_arr_##name \
35742 + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
35743 + sizeof(array[0]), array }; \
35744 + module_param_call(name, param_array_set, param_array_get, \
35745 + &__param_arr_##name, perm)
35746 +#endif /* module_param_array_named */
35748 + * num_online is broken for all < 2.6.10 kernels. This is needed to support
35749 + * Node module parameter of ixgbe.
35751 +#undef num_online_nodes
35752 +#define num_online_nodes(n) 1
35753 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
35754 +#undef node_online_map
35755 +#define node_online_map _kcompat_node_online_map
35756 +#endif /* < 2.6.10 */
35758 +/*****************************************************************************/
35759 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
35763 +#define PCI_D3hot 3
35764 +#define PCI_D3cold 4
35765 +typedef int pci_power_t;
35766 +#define pci_choose_state(pdev,state) state
35767 +#define PMSG_SUSPEND 3
35768 +#define PCI_EXP_LNKCTL 16
35770 +#undef NETIF_F_LLTX
35772 +#ifndef ARCH_HAS_PREFETCH
35773 +#define prefetch(X)
35776 +#ifndef NET_IP_ALIGN
35777 +#define NET_IP_ALIGN 2
35780 +#define KC_USEC_PER_SEC 1000000L
35781 +#define usecs_to_jiffies _kc_usecs_to_jiffies
35782 +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
35784 +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
35785 + return (KC_USEC_PER_SEC / HZ) * j;
35786 +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
35787 + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
35789 + return (j * KC_USEC_PER_SEC) / HZ;
35792 +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
35794 + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
35795 + return MAX_JIFFY_OFFSET;
35796 +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
35797 + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
35798 +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
35799 + return m * (HZ / KC_USEC_PER_SEC);
35801 + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
35804 +#endif /* < 2.6.11 */
35806 +/*****************************************************************************/
35807 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
35808 +#include <linux/reboot.h>
35809 +#define USE_REBOOT_NOTIFIER
35811 +/* Generic MII registers. */
35812 +#define MII_CTRL1000 0x09 /* 1000BASE-T control */
35813 +#define MII_STAT1000 0x0a /* 1000BASE-T status */
35814 +/* Advertisement control register. */
35815 +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
35816 +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
35817 +/* 1000BASE-T Control register */
35818 +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
35819 +#endif /* < 2.6.12 */
35821 +/*****************************************************************************/
35822 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
35823 +#define pm_message_t u32
35825 +#define kzalloc _kc_kzalloc
35826 +extern void *_kc_kzalloc(size_t size, int flags);
35829 +/* Generic MII registers. */
35830 +#define MII_ESTATUS 0x0f /* Extended Status */
35831 +/* Basic mode status register. */
35832 +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
35833 +/* Extended status register. */
35834 +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
35835 +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
35836 +#endif /* < 2.6.14 */
35838 +/*****************************************************************************/
35839 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
35840 +#ifndef vmalloc_node
35841 +#define vmalloc_node(a,b) vmalloc(a)
35842 +#endif /* vmalloc_node*/
35844 +#define setup_timer(_timer, _function, _data) \
35846 + (_timer)->function = _function; \
35847 + (_timer)->data = _data; \
35848 + init_timer(_timer); \
35850 +#ifndef device_can_wakeup
35851 +#define device_can_wakeup(dev) (1)
35853 +#ifndef device_set_wakeup_enable
35854 +#define device_set_wakeup_enable(dev, val) do{}while(0)
35856 +#ifndef device_init_wakeup
35857 +#define device_init_wakeup(dev,val) do {} while (0)
35859 +#endif /* < 2.6.15 */
35861 +/*****************************************************************************/
35862 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
35863 +#undef DEFINE_MUTEX
35864 +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
35865 +#define mutex_lock(x) down_interruptible(x)
35866 +#define mutex_unlock(x) up(x)
35868 +#ifndef ____cacheline_internodealigned_in_smp
35870 +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
35872 +#define ____cacheline_internodealigned_in_smp
35873 +#endif /* CONFIG_SMP */
35874 +#endif /* ____cacheline_internodealigned_in_smp */
35875 +#undef HAVE_PCI_ERS
35876 +#else /* 2.6.16 and above */
35877 +#undef HAVE_PCI_ERS
35878 +#define HAVE_PCI_ERS
35879 +#endif /* < 2.6.16 */
35881 +/*****************************************************************************/
35882 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
35883 +#ifndef first_online_node
35884 +#define first_online_node 0
35886 +#ifndef NET_SKB_PAD
35887 +#define NET_SKB_PAD 16
35889 +#endif /* < 2.6.17 */
35891 +/*****************************************************************************/
35892 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
35894 +#ifndef IRQ_HANDLED
35895 +#define irqreturn_t void
35896 +#define IRQ_HANDLED
35900 +#ifndef IRQF_PROBE_SHARED
35901 +#ifdef SA_PROBEIRQ
35902 +#define IRQF_PROBE_SHARED SA_PROBEIRQ
35904 +#define IRQF_PROBE_SHARED 0
35908 +#ifndef IRQF_SHARED
35909 +#define IRQF_SHARED SA_SHIRQ
35912 +#ifndef ARRAY_SIZE
35913 +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
35916 +#ifndef FIELD_SIZEOF
35917 +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
35920 +#ifndef skb_is_gso
35921 +#ifdef NETIF_F_TSO
35922 +#define skb_is_gso _kc_skb_is_gso
35923 +static inline int _kc_skb_is_gso(const struct sk_buff *skb)
35925 + return skb_shinfo(skb)->gso_size;
35928 +#define skb_is_gso(a) 0
35932 +#ifndef resource_size_t
35933 +#define resource_size_t unsigned long
35936 +#endif /* < 2.6.18 */
35938 +/*****************************************************************************/
35939 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
35941 +#ifndef DIV_ROUND_UP
35942 +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
35944 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
35945 +#if (!(( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) ) && ( RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0) ) || ( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0) ) || (AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0))))
35946 +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
35948 +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
35949 +#undef CONFIG_INET_LRO
35950 +#undef CONFIG_INET_LRO_MODULE
35951 +#undef CONFIG_FCOE
35952 +#undef CONFIG_FCOE_MODULE
35954 +typedef irqreturn_t (*new_handler_t)(int, void*);
35955 +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
35957 +typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
35958 +typedef void (*new_handler_t)(int, void*);
35959 +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
35960 +#endif /* >= 2.5.x */
35962 + irq_handler_t new_handler = (irq_handler_t) handler;
35963 + return request_irq(irq, new_handler, flags, devname, dev_id);
35966 +#undef request_irq
35967 +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
35969 +#define irq_handler_t new_handler_t
35970 +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
35971 +#define PCIE_CONFIG_SPACE_LEN 256
35972 +#define PCI_CONFIG_SPACE_LEN 64
35973 +#define PCIE_LINK_STATUS 0x12
35974 +#define pci_config_space_ich8lan() do {} while(0)
35975 +#undef pci_save_state
35976 +extern int _kc_pci_save_state(struct pci_dev *);
35977 +#define pci_save_state(pdev) _kc_pci_save_state(pdev)
35978 +#undef pci_restore_state
35979 +extern void _kc_pci_restore_state(struct pci_dev *);
35980 +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
35981 +#ifdef HAVE_PCI_ERS
35982 +#undef free_netdev
35983 +extern void _kc_free_netdev(struct net_device *);
35984 +#define free_netdev(netdev) _kc_free_netdev(netdev)
35986 +static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
35990 +#define pci_disable_pcie_error_reporting(dev) do {} while (0)
35991 +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
35992 +#else /* 2.6.19 */
35993 +#include <linux/aer.h>
35994 +#endif /* < 2.6.19 */
35996 +/*****************************************************************************/
35997 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
35998 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
36000 +#define INIT_WORK(_work, _func) \
36002 + INIT_LIST_HEAD(&(_work)->entry); \
36003 + (_work)->pending = 0; \
36004 + (_work)->func = (void (*)(void *))_func; \
36005 + (_work)->data = _work; \
36006 + init_timer(&(_work)->timer); \
36010 +#ifndef PCI_VDEVICE
36011 +#define PCI_VDEVICE(ven, dev) \
36012 + PCI_VENDOR_ID_##ven, (dev), \
36013 + PCI_ANY_ID, PCI_ANY_ID, 0, 0
36016 +#ifndef round_jiffies
36017 +#define round_jiffies(x) x
36020 +#define csum_offset csum
36022 +#define HAVE_EARLY_VMALLOC_NODE
36023 +#define dev_to_node(dev) -1
36024 +#undef set_dev_node
36025 +/* remove compiler warning with b=b, for unused variable */
36026 +#define set_dev_node(a, b) do { (b) = (b); } while(0)
36027 +#else /* < 2.6.20 */
36028 +#define HAVE_DEVICE_NUMA_NODE
36029 +#endif /* < 2.6.20 */
36031 +/*****************************************************************************/
36032 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
36033 +#define to_net_dev(class) container_of(class, struct net_device, class_dev)
36034 +#define NETDEV_CLASS_DEV
36035 +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
36036 +#define vlan_group_set_device(vg, id, dev) if (vg) vg->vlan_devices[id] = dev;
36037 +#define pci_channel_offline(pdev) (pdev->error_state && \
36038 + pdev->error_state != pci_channel_io_normal)
36039 +#define pci_request_selected_regions(pdev, bars, name) \
36040 + pci_request_regions(pdev, name)
36041 +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
36042 +#endif /* < 2.6.21 */
36044 +/*****************************************************************************/
36045 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
36046 +#define tcp_hdr(skb) (skb->h.th)
36047 +#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
36048 +#define skb_transport_offset(skb) (skb->h.raw - skb->data)
36049 +#define skb_transport_header(skb) (skb->h.raw)
36050 +#define ipv6_hdr(skb) (skb->nh.ipv6h)
36051 +#define ip_hdr(skb) (skb->nh.iph)
36052 +#define skb_network_offset(skb) (skb->nh.raw - skb->data)
36053 +#define skb_network_header(skb) (skb->nh.raw)
36054 +#define skb_tail_pointer(skb) skb->tail
36055 +#define skb_reset_tail_pointer(skb) \
36057 + skb->tail = skb->data; \
36059 +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
36060 + memcpy(skb->data + offset, from, len)
36061 +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
36062 +#define pci_register_driver pci_module_init
36063 +#define skb_mac_header(skb) skb->mac.raw
36065 +#ifdef NETIF_F_MULTI_QUEUE
36066 +#ifndef alloc_etherdev_mq
36067 +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
36069 +#endif /* NETIF_F_MULTI_QUEUE */
36071 +#ifndef ETH_FCS_LEN
36072 +#define ETH_FCS_LEN 4
36074 +#define cancel_work_sync(x) flush_scheduled_work()
36076 +#define udp_hdr _udp_hdr
36077 +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
36079 + return (struct udphdr *)skb_transport_header(skb);
36083 +#ifdef cpu_to_be16
36084 +#undef cpu_to_be16
36086 +#define cpu_to_be16(x) __constant_htons(x)
36088 +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
36090 + DUMP_PREFIX_NONE,
36091 + DUMP_PREFIX_ADDRESS,
36092 + DUMP_PREFIX_OFFSET
36094 +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
36096 +#define hex_asc(x) "0123456789abcdef"[x]
36098 +#include <linux/ctype.h>
36099 +extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
36100 + int prefix_type, int rowsize, int groupsize,
36101 + const void *buf, size_t len, bool ascii);
36102 +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
36103 + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
36104 +#else /* 2.6.22 */
36105 +#define ETH_TYPE_TRANS_SETS_DEV
36106 +#define HAVE_NETDEV_STATS_IN_NETDEV
36107 +#endif /* < 2.6.22 */
36109 +/*****************************************************************************/
36110 +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
36111 +#undef ETHTOOL_GPERMADDR
36112 +#endif /* > 2.6.22 */
36114 +/*****************************************************************************/
36115 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
36116 +#define netif_subqueue_stopped(_a, _b) 0
36118 +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
36121 +#ifndef CONFIG_PM_SLEEP
36122 +#define CONFIG_PM_SLEEP CONFIG_PM
36124 +#endif /* < 2.6.23 */
36126 +/*****************************************************************************/
36127 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
36128 +/* if GRO is supported then the napi struct must already exist */
36129 +#ifndef NETIF_F_GRO
36130 +/* NAPI API changes in 2.6.24 break everything */
36131 +struct napi_struct {
36132 + /* used to look up the real NAPI polling routine */
36133 + int (*poll)(struct napi_struct *, int);
36134 + struct net_device *dev;
36140 +extern int __kc_adapter_clean(struct net_device *, int *);
36141 +extern struct net_device *napi_to_poll_dev(struct napi_struct *napi);
36142 +#define netif_napi_add(_netdev, _napi, _poll, _weight) \
36144 + struct napi_struct *__napi = (_napi); \
36145 + struct net_device *poll_dev = napi_to_poll_dev(__napi); \
36146 + poll_dev->poll = &(__kc_adapter_clean); \
36147 + poll_dev->priv = (_napi); \
36148 + poll_dev->weight = (_weight); \
36149 + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
36150 + set_bit(__LINK_STATE_START, &poll_dev->state);\
36151 + dev_hold(poll_dev); \
36152 + __napi->poll = &(_poll); \
36153 + __napi->weight = (_weight); \
36154 + __napi->dev = (_netdev); \
36156 +#define netif_napi_del(_napi) \
36158 + struct net_device *poll_dev = napi_to_poll_dev(_napi); \
36159 + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
36160 + dev_put(poll_dev); \
36161 + memset(poll_dev, 0, sizeof(struct net_device));\
36163 +#define napi_schedule_prep(_napi) \
36164 + (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
36165 +#define napi_schedule(_napi) \
36167 + if (napi_schedule_prep(_napi)) \
36168 + __netif_rx_schedule(napi_to_poll_dev(_napi)); \
36170 +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
36171 +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
36172 +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
36173 +#ifndef NETIF_F_GRO
36174 +#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
36176 +#define napi_complete(_napi) \
36178 + napi_gro_flush(_napi); \
36179 + netif_rx_complete(napi_to_poll_dev(_napi)); \
36181 +#endif /* NETIF_F_GRO */
36183 +#define netif_napi_add(_netdev, _napi, _poll, _weight) \
36185 + struct napi_struct *__napi = _napi; \
36186 + _netdev->poll = &(_poll); \
36187 + _netdev->weight = (_weight); \
36188 + __napi->poll = &(_poll); \
36189 + __napi->weight = (_weight); \
36190 + __napi->dev = (_netdev); \
36192 +#define netif_napi_del(_a) do {} while (0)
36195 +#undef dev_get_by_name
36196 +#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
36197 +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
36198 +#ifndef DMA_BIT_MASK
36199 +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
36202 +#ifdef NETIF_F_TSO6
36203 +#define skb_is_gso_v6 _kc_skb_is_gso_v6
36204 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
36206 + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
36208 +#endif /* NETIF_F_TSO6 */
36211 +#define KERN_CONT ""
36213 +#else /* < 2.6.24 */
36214 +#define HAVE_ETHTOOL_GET_SSET_COUNT
36215 +#define HAVE_NETDEV_NAPI_LIST
36216 +#endif /* < 2.6.24 */
36218 +/*****************************************************************************/
36219 +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
36220 +#include <linux/pm_qos_params.h>
36221 +#endif /* > 2.6.24 */
36223 +/*****************************************************************************/
36224 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
36225 +#define PM_QOS_CPU_DMA_LATENCY 1
36227 +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
36228 +#include <linux/latency.h>
36229 +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
36230 +#define pm_qos_add_requirement(pm_qos_class, name, value) \
36231 + set_acceptable_latency(name, value)
36232 +#define pm_qos_remove_requirement(pm_qos_class, name) \
36233 + remove_acceptable_latency(name)
36234 +#define pm_qos_update_requirement(pm_qos_class, name, value) \
36235 + modify_acceptable_latency(name, value)
36237 +#define PM_QOS_DEFAULT_VALUE -1
36238 +#define pm_qos_add_requirement(pm_qos_class, name, value)
36239 +#define pm_qos_remove_requirement(pm_qos_class, name)
36240 +#define pm_qos_update_requirement(pm_qos_class, name, value) { \
36241 + if (value != PM_QOS_DEFAULT_VALUE) { \
36242 + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
36243 + pci_name(adapter->pdev)); \
36246 +#endif /* > 2.6.18 */
36248 +#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
36250 +#ifndef DEFINE_PCI_DEVICE_TABLE
36251 +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
36252 +#endif /* DEFINE_PCI_DEVICE_TABLE */
36254 +#endif /* < 2.6.25 */
36256 +/*****************************************************************************/
36257 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
36258 +#ifdef NETIF_F_TSO
36259 +#ifdef NETIF_F_TSO6
36260 +#define netif_set_gso_max_size(_netdev, size) \
36262 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { \
36263 + _netdev->features &= ~NETIF_F_TSO; \
36264 + _netdev->features &= ~NETIF_F_TSO6; \
36266 + _netdev->features |= NETIF_F_TSO; \
36267 + _netdev->features |= NETIF_F_TSO6; \
36270 +#else /* NETIF_F_TSO6 */
36271 +#define netif_set_gso_max_size(_netdev, size) \
36273 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
36274 + _netdev->features &= ~NETIF_F_TSO; \
36276 + _netdev->features |= NETIF_F_TSO; \
36278 +#endif /* NETIF_F_TSO6 */
36280 +#define netif_set_gso_max_size(_netdev, size) do {} while (0)
36281 +#endif /* NETIF_F_TSO */
36282 +#undef kzalloc_node
36283 +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
36285 +extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
36286 +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
36287 +#else /* < 2.6.26 */
36288 +#include <linux/pci-aspm.h>
36289 +#define HAVE_NETDEV_VLAN_FEATURES
36290 +#endif /* < 2.6.26 */
36291 +/*****************************************************************************/
36292 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
36293 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
36294 +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && defined(CONFIG_PM_SLEEP)))
36295 +#undef device_set_wakeup_enable
36296 +#define device_set_wakeup_enable(dev, val) \
36299 + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
36301 + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
36304 + (dev)->power.can_wakeup = !!(pmc >> 11); \
36305 + (dev)->power.should_wakeup = (val && (pmc >> 11)); \
36307 +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
36308 +#endif /* 2.6.15 through 2.6.27 */
36309 +#ifndef netif_napi_del
36310 +#define netif_napi_del(_a) do {} while (0)
36312 +#ifdef CONFIG_NETPOLL
36313 +#undef netif_napi_del
36314 +#define netif_napi_del(_a) list_del(&(_a)->dev_list);
36317 +#endif /* netif_napi_del */
36318 +#ifdef dma_mapping_error
36319 +#undef dma_mapping_error
36321 +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
36323 +#ifdef CONFIG_NETDEVICES_MULTIQUEUE
36324 +#define HAVE_TX_MQ
36328 +extern void _kc_netif_tx_stop_all_queues(struct net_device *);
36329 +extern void _kc_netif_tx_wake_all_queues(struct net_device *);
36330 +extern void _kc_netif_tx_start_all_queues(struct net_device *);
36331 +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
36332 +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
36333 +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
36334 +#undef netif_stop_subqueue
36335 +#define netif_stop_subqueue(_ndev,_qi) do { \
36336 + if (netif_is_multiqueue((_ndev))) \
36337 + netif_stop_subqueue((_ndev), (_qi)); \
36339 + netif_stop_queue((_ndev)); \
36341 +#undef netif_start_subqueue
36342 +#define netif_start_subqueue(_ndev,_qi) do { \
36343 + if (netif_is_multiqueue((_ndev))) \
36344 + netif_start_subqueue((_ndev), (_qi)); \
36346 + netif_start_queue((_ndev)); \
36348 +#else /* HAVE_TX_MQ */
36349 +#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
36350 +#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
36351 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
36352 +#define netif_tx_start_all_queues(a) netif_start_queue(a)
36354 +#define netif_tx_start_all_queues(a) do {} while (0)
36356 +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
36357 +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
36358 +#endif /* HAVE_TX_MQ */
36359 +#ifndef NETIF_F_MULTI_QUEUE
36360 +#define NETIF_F_MULTI_QUEUE 0
36361 +#define netif_is_multiqueue(a) 0
36362 +#define netif_wake_subqueue(a, b)
36363 +#endif /* NETIF_F_MULTI_QUEUE */
36364 +#else /* < 2.6.27 */
36365 +#define HAVE_TX_MQ
36366 +#define HAVE_NETDEV_SELECT_QUEUE
36367 +#endif /* < 2.6.27 */
36369 +/*****************************************************************************/
36370 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
36371 +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \
36372 + pci_resource_len(pdev, bar))
36373 +#define pci_wake_from_d3 _kc_pci_wake_from_d3
36374 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
36375 +extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
36376 +extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
36377 +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
36378 +#endif /* < 2.6.28 */
36380 +/*****************************************************************************/
36381 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
36382 +#define pci_request_selected_regions_exclusive(pdev, bars, name) \
36383 + pci_request_selected_regions(pdev, bars, name)
36384 +#ifndef CONFIG_NR_CPUS
36385 +#define CONFIG_NR_CPUS 1
36386 +#endif /* CONFIG_NR_CPUS */
36387 +#ifndef pcie_aspm_enabled
36388 +#define pcie_aspm_enabled() (1)
36389 +#endif /* pcie_aspm_enabled */
36390 +#else /* < 2.6.29 */
36392 +#define HAVE_PFC_MODE_ENABLE
36393 +#endif /* CONFIG_DCB */
36394 +#endif /* < 2.6.29 */
36396 +/*****************************************************************************/
36397 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
36398 +#undef CONFIG_FCOE
36399 +#undef CONFIG_FCOE_MODULE
36400 +extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb);
36401 +#define skb_tx_hash(n, s) _kc_skb_tx_hash(n, s)
36402 +#define skb_record_rx_queue(a, b) do {} while (0)
36403 +#ifndef CONFIG_PCI_IOV
36404 +#undef pci_enable_sriov
36405 +#define pci_enable_sriov(a, b) -ENOTSUPP
36406 +#undef pci_disable_sriov
36407 +#define pci_disable_sriov(a) do {} while (0)
36408 +#endif /* CONFIG_PCI_IOV */
36410 +#define HAVE_ASPM_QUIRKS
36411 +#endif /* < 2.6.30 */
36413 +/*****************************************************************************/
36414 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
36415 +#define ETH_P_1588 0x88F7
36416 +#define ETH_P_FIP 0x8914
36417 +#ifndef netdev_uc_count
36418 +#define netdev_uc_count(dev) ((dev)->uc_count)
36420 +#ifndef netdev_for_each_uc_addr
36421 +#define netdev_for_each_uc_addr(uclist, dev) \
36422 + for (uclist = dev->uc_list; uclist; uclist = uclist->next)
36425 +#ifndef HAVE_NETDEV_STORAGE_ADDRESS
36426 +#define HAVE_NETDEV_STORAGE_ADDRESS
36428 +#ifndef HAVE_NETDEV_HW_ADDR
36429 +#define HAVE_NETDEV_HW_ADDR
36431 +#ifndef HAVE_TRANS_START_IN_QUEUE
36432 +#define HAVE_TRANS_START_IN_QUEUE
36434 +#endif /* < 2.6.31 */
36436 +/*****************************************************************************/
36437 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
36438 +#undef netdev_tx_t
36439 +#define netdev_tx_t int
36440 +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
36441 +#ifndef NETIF_F_FCOE_MTU
36442 +#define NETIF_F_FCOE_MTU (1 << 26)
36444 +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
36446 +#ifndef pm_runtime_get_sync
36447 +#define pm_runtime_get_sync(dev) do {} while (0)
36449 +#ifndef pm_runtime_put
36450 +#define pm_runtime_put(dev) do {} while (0)
36452 +#ifndef pm_runtime_put_sync
36453 +#define pm_runtime_put_sync(dev) do {} while (0)
36455 +#ifndef pm_runtime_resume
36456 +#define pm_runtime_resume(dev) do {} while (0)
36458 +#ifndef pm_schedule_suspend
36459 +#define pm_schedule_suspend(dev, t) do {} while (0)
36461 +#ifndef pm_runtime_set_suspended
36462 +#define pm_runtime_set_suspended(dev) do {} while (0)
36464 +#ifndef pm_runtime_disable
36465 +#define pm_runtime_disable(dev) do {} while (0)
36467 +#ifndef pm_runtime_put_noidle
36468 +#define pm_runtime_put_noidle(dev) do {} while (0)
36470 +#ifndef pm_runtime_set_active
36471 +#define pm_runtime_set_active(dev) do {} while (0)
36473 +#ifndef pm_runtime_enable
36474 +#define pm_runtime_enable(dev) do {} while (0)
36476 +#ifndef pm_runtime_get_noresume
36477 +#define pm_runtime_get_noresume(dev) do {} while (0)
36479 +#else /* < 2.6.32 */
36480 +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
36481 +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
36482 +#define HAVE_NETDEV_OPS_FCOE_ENABLE
36484 +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
36486 +#ifndef HAVE_DCBNL_OPS_GETAPP
36487 +#define HAVE_DCBNL_OPS_GETAPP
36489 +#endif /* CONFIG_DCB */
36490 +#include <linux/pm_runtime.h>
36491 +#endif /* < 2.6.32 */
36493 +/*****************************************************************************/
36494 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
36495 +#ifndef netdev_alloc_skb_ip_align
36496 +extern struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
36497 + unsigned int length);
36498 +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
36500 +#ifndef pci_pcie_cap
36501 +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
36503 +#else /* < 2.6.33 */
36504 +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
36505 +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
36506 +#define HAVE_NETDEV_OPS_FCOE_GETWWN
36508 +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
36509 +#define HAVE_ETHTOOL_SFP_DISPLAY_PORT
36510 +#endif /* < 2.6.33 */
36512 +/*****************************************************************************/
36513 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
36514 +#ifndef netdev_mc_count
36515 +#define netdev_mc_count(dev) ((dev)->mc_count)
36517 +#ifndef netdev_mc_empty
36518 +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
36520 +#ifndef netdev_for_each_mc_addr
36521 +#define netdev_for_each_mc_addr(mclist, dev) \
36522 + for (mclist = dev->mc_list; mclist; mclist = mclist->next)
36524 +#ifndef netdev_uc_count
36525 +#define netdev_uc_count(dev) ((dev)->uc.count)
36527 +#ifndef netdev_uc_empty
36528 +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
36530 +#ifndef netdev_for_each_uc_addr
36531 +#define netdev_for_each_uc_addr(ha, dev) \
36532 + list_for_each_entry(ha, &dev->uc.list, list)
36534 +#ifndef dma_set_coherent_mask
36535 +#define dma_set_coherent_mask(dev,mask) \
36536 + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
36538 +#ifndef pci_dev_run_wake
36539 +#define pci_dev_run_wake(pdev) (0)
36542 +/* netdev logging taken from include/linux/netdevice.h */
36543 +#ifndef netdev_name
36544 +static inline const char *_kc_netdev_name(const struct net_device *dev)
36546 + if (dev->reg_state != NETREG_REGISTERED)
36547 + return "(unregistered net_device)";
36548 + return dev->name;
36550 +#define netdev_name(netdev) _kc_netdev_name(netdev)
36551 +#endif /* netdev_name */
36553 +#undef netdev_printk
36554 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
36555 +#define netdev_printk(level, netdev, format, args...) \
36557 + struct adapter_struct *kc_adapter = netdev_priv(netdev);\
36558 + struct pci_dev *pdev = kc_adapter->pdev; \
36559 + printk("%s %s: " format, level, pci_name(pdev), \
36562 +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
36563 +#define netdev_printk(level, netdev, format, args...) \
36565 + struct adapter_struct *kc_adapter = netdev_priv(netdev);\
36566 + struct pci_dev *pdev = kc_adapter->pdev; \
36567 + struct device *dev = pci_dev_to_dev(pdev); \
36568 + dev_printk(level, dev->parent, "%s: " format, \
36569 + netdev_name(netdev), ##args); \
36571 +#else /* 2.6.21 => 2.6.34 */
36572 +#define netdev_printk(level, netdev, format, args...) \
36573 + dev_printk(level, (netdev)->dev.parent, \
36575 + netdev_name(netdev), ##args)
36576 +#endif /* <2.6.0 <2.6.21 <2.6.34 */
36577 +#undef netdev_emerg
36578 +#define netdev_emerg(dev, format, args...) \
36579 + netdev_printk(KERN_EMERG, dev, format, ##args)
36580 +#undef netdev_alert
36581 +#define netdev_alert(dev, format, args...) \
36582 + netdev_printk(KERN_ALERT, dev, format, ##args)
36583 +#undef netdev_crit
36584 +#define netdev_crit(dev, format, args...) \
36585 + netdev_printk(KERN_CRIT, dev, format, ##args)
36587 +#define netdev_err(dev, format, args...) \
36588 + netdev_printk(KERN_ERR, dev, format, ##args)
36589 +#undef netdev_warn
36590 +#define netdev_warn(dev, format, args...) \
36591 + netdev_printk(KERN_WARNING, dev, format, ##args)
36592 +#undef netdev_notice
36593 +#define netdev_notice(dev, format, args...) \
36594 + netdev_printk(KERN_NOTICE, dev, format, ##args)
36595 +#undef netdev_info
36596 +#define netdev_info(dev, format, args...) \
36597 + netdev_printk(KERN_INFO, dev, format, ##args)
36599 +#if defined(DEBUG)
36600 +#define netdev_dbg(__dev, format, args...) \
36601 + netdev_printk(KERN_DEBUG, __dev, format, ##args)
36602 +#elif defined(CONFIG_DYNAMIC_DEBUG)
36603 +#define netdev_dbg(__dev, format, args...) \
36605 + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
36606 + netdev_name(__dev), ##args); \
36609 +#define netdev_dbg(__dev, format, args...) \
36612 + netdev_printk(KERN_DEBUG, __dev, format, ##args); \
36615 +#endif /* DEBUG */
36617 +#if !defined(CONFIG_PM_OPS) && defined(CONFIG_PM_SLEEP)
36618 +#define CONFIG_PM_OPS
36620 +#ifdef SET_SYSTEM_SLEEP_PM_OPS
36621 +#define HAVE_SYSTEM_SLEEP_PM_OPS
36623 +#else /* < 2.6.34 */
36624 +#define HAVE_SYSTEM_SLEEP_PM_OPS
36625 +#ifndef HAVE_SET_RX_MODE
36626 +#define HAVE_SET_RX_MODE
36628 +#define HAVE_IPLINK_VF_CONFIG
36629 +#endif /* < 2.6.34 */
36631 +/*****************************************************************************/
36632 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
36633 +#else /* < 2.6.35 */
36634 +#define HAVE_PM_QOS_REQUEST_LIST
36635 +#endif /* < 2.6.35 */
36637 +/*****************************************************************************/
36638 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
36639 +extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
36640 +#define ethtool_op_set_flags _kc_ethtool_op_set_flags
36641 +#else /* < 2.6.36 */
36642 +#define HAVE_PM_QOS_REQUEST_ACTIVE
36643 +#endif /* < 2.6.36 */
36644 +#endif /* _KCOMPAT_H_ */
36645 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/Makefile linux-2.6.22-50/drivers/net/ixgbe/Makefile
36646 --- linux-2.6.22-40/drivers/net/ixgbe/Makefile 1969-12-31 19:00:00.000000000 -0500
36647 +++ linux-2.6.22-50/drivers/net/ixgbe/Makefile 2010-09-18 07:50:14.000000000 -0400
36649 +obj-$(CONFIG_IXGBE) += ixgbe.o
36651 +ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_api.o ixgbe_param.o \
36652 + ixgbe_ethtool.o kcompat.o ixgbe_82598.o ixgbe_82599.o \
36653 + ixgbe_sriov.o ixgbe_mbx.o ixgbe_dcb.o ixgbe_dcb_82598.o \
36654 + ixgbe_dcb_82599.o ixgbe_phy.o
36655 +ifeq ($(CONFIG_FCOE),y)
36656 +ixgbe-objs += ixgbe_fcoe.o
36658 +ifeq ($(CONFIG_FCOE),m)
36659 +ixgbe-objs += ixgbe_fcoe.o
36661 diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/Module.supported linux-2.6.22-50/drivers/net/ixgbe/Module.supported
36662 --- linux-2.6.22-40/drivers/net/ixgbe/Module.supported 1969-12-31 19:00:00.000000000 -0500
36663 +++ linux-2.6.22-50/drivers/net/ixgbe/Module.supported 2010-08-25 17:56:31.000000000 -0400
36666 diff -Nurp linux-2.6.22-40/drivers/net/Kconfig linux-2.6.22-50/drivers/net/Kconfig
36667 --- linux-2.6.22-40/drivers/net/Kconfig 2010-09-18 07:37:31.000000000 -0400
36668 +++ linux-2.6.22-50/drivers/net/Kconfig 2010-09-18 07:52:50.000000000 -0400
36669 @@ -2490,6 +2490,25 @@ config IXGB_NAPI
36671 If in doubt, say N.
36674 + tristate "Intel(R) PRO/10GbE support"
36677 + This driver supports Intel(R) PRO/10GbE family of
36678 + adapters. For more information on how to identify your adapter, go
36679 + to the Adapter & Driver ID Guide at:
36681 + <http://support.intel.com/support/network/adapter/pro100/21397.htm>
36683 + For general information and support, go to the Intel support
36686 + <http://support.intel.com>
36688 + To compile this driver as a module, choose M here and read
36689 + <file:Documentation/networking/net-modules.txt>. The module
36690 + will be called ixgbe.
36693 tristate "S2IO 10Gbe XFrame NIC"
36695 diff -Nurp linux-2.6.22-40/drivers/net/Makefile linux-2.6.22-50/drivers/net/Makefile
36696 --- linux-2.6.22-40/drivers/net/Makefile 2010-09-18 07:37:31.000000000 -0400
36697 +++ linux-2.6.22-50/drivers/net/Makefile 2010-09-18 07:39:51.000000000 -0400
36698 @@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/
36699 obj-$(CONFIG_E1000E) += e1000e/
36700 obj-$(CONFIG_IBM_EMAC) += ibm_emac/
36701 obj-$(CONFIG_IXGB) += ixgb/
36702 +obj-$(CONFIG_IXGBE) += ixgbe/
36703 obj-$(CONFIG_CHELSIO_T1) += chelsio/
36704 obj-$(CONFIG_CHELSIO_T3) += cxgb3/
36705 obj-$(CONFIG_EHEA) += ehea/