From 237d20e12c17c4265ce573b966726aa24dcec35e Mon Sep 17 00:00:00 2001 From: Daniel Hokka Zakrisson Date: Sat, 18 Sep 2010 11:42:19 -0400 Subject: [PATCH] Add ixgbe driver. --- coblitz-2.6.22-i686.config | 1 + coblitz-2.6.22-x86_64.config | 1 + kernel-2.6.spec | 2 + linux-2.6-050-ixgbe.patch | 36705 +++++++++++++++++++++++++++++++++ 4 files changed, 36709 insertions(+) create mode 100644 linux-2.6-050-ixgbe.patch diff --git a/coblitz-2.6.22-i686.config b/coblitz-2.6.22-i686.config index ab7067c64..2ff2dba83 100644 --- a/coblitz-2.6.22-i686.config +++ b/coblitz-2.6.22-i686.config @@ -1169,6 +1169,7 @@ CONFIG_CHELSIO_T1_NAPI=y CONFIG_CHELSIO_T3=m CONFIG_IXGB=m CONFIG_IXGB_NAPI=y +CONFIG_IXGBE=m CONFIG_S2IO=m CONFIG_S2IO_NAPI=y CONFIG_MYRI10GE=m diff --git a/coblitz-2.6.22-x86_64.config b/coblitz-2.6.22-x86_64.config index 4c296d27f..8a75457c7 100644 --- a/coblitz-2.6.22-x86_64.config +++ b/coblitz-2.6.22-x86_64.config @@ -1043,6 +1043,7 @@ CONFIG_CHELSIO_T1_NAPI=y CONFIG_CHELSIO_T3=m CONFIG_IXGB=m CONFIG_IXGB_NAPI=y +CONFIG_IXGBE=m CONFIG_S2IO=m CONFIG_S2IO_NAPI=y CONFIG_MYRI10GE=m diff --git a/kernel-2.6.spec b/kernel-2.6.spec index fac30a67b..ae35ccc18 100644 --- a/kernel-2.6.spec +++ b/kernel-2.6.spec @@ -150,6 +150,7 @@ Patch015: linux-2.6-015-igb.patch Patch020: linux-2.6-020-build-id.patch Patch030: linux-2.6-030-netns.patch Patch040: linux-2.6-040-i_mutex-check.patch +Patch050: linux-2.6-050-ixgbe.patch # These are patches picked up from Fedora/RHEL Patch100: linux-2.6-100-build-nonintconfig.patch @@ -376,6 +377,7 @@ KERNEL_PREVIOUS=vanilla %ApplyPatch 30 %endif %ApplyPatch 40 +%ApplyPatch 50 %ApplyPatch 100 diff --git a/linux-2.6-050-ixgbe.patch b/linux-2.6-050-ixgbe.patch new file mode 100644 index 000000000..4c079f290 --- /dev/null +++ b/linux-2.6-050-ixgbe.patch @@ -0,0 +1,36705 @@ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_82598.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_82598.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_82598.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_82598.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,1316 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe_type.h" ++#include "ixgbe_api.h" ++#include "ixgbe_common.h" ++#include "ixgbe_phy.h" ++ ++u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw); ++s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); ++static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *autoneg); ++static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); ++s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num); ++static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, ++ bool autoneg_wait_to_complete); ++static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, bool *link_up, ++ bool link_up_wait_to_complete); ++static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg, ++ bool autoneg_wait_to_complete); ++static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg, ++ bool autoneg_wait_to_complete); ++static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); ++s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw); ++void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw); ++s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); ++static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); ++s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, ++ u32 vind, bool vlan_on); ++static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); ++s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); ++s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); ++s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 *eeprom_data); ++u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); ++s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw); ++void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw); ++void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw); ++static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw); ++ ++/** ++ * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout ++ * @hw: pointer to the HW structure ++ * ++ * The defaults for 82598 should be in the range of 50us to 50ms, ++ * however the hardware default for these parts is 500us to 1ms which is less ++ * than the 10ms recommended by the pci-e spec. To address this we need to ++ * increase the value to either 10ms to 250ms for capability version 1 config, ++ * or 16ms to 55ms for version 2. ++ **/ ++void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) ++{ ++ u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); ++ u16 pcie_devctl2; ++ ++ /* only take action if timeout value is defaulted to 0 */ ++ if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) ++ goto out; ++ ++ /* ++ * if capababilities version is type 1 we can write the ++ * timeout of 10ms to 250ms through the GCR register ++ */ ++ if (!(gcr & IXGBE_GCR_CAP_VER2)) { ++ gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; ++ goto out; ++ } ++ ++ /* ++ * for version 2 capabilities we need to write the config space ++ * directly in order to set the completion timeout value for ++ * 16ms to 55ms ++ */ ++ pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); ++ pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; ++ IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); ++out: ++ /* disable completion timeout resend */ ++ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; ++ IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); ++} ++ ++/** ++ * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count ++ * @hw: pointer to hardware structure ++ * ++ * Read PCIe configuration space, and get the MSI-X vector count from ++ * the capabilities table. ++ **/ ++u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw) ++{ ++ u32 msix_count = 18; ++ ++ if (hw->mac.msix_vectors_from_pcie) { ++ msix_count = IXGBE_READ_PCIE_WORD(hw, ++ IXGBE_PCIE_MSIX_82598_CAPS); ++ msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; ++ ++ /* MSI-X count is zero-based in HW, so increment to give ++ * proper value */ ++ msix_count++; ++ } ++ return msix_count; ++} ++ ++/** ++ * ixgbe_init_ops_82598 - Inits func ptrs and MAC type ++ * @hw: pointer to hardware structure ++ * ++ * Initialize the function pointers and assign the MAC type for 82598. ++ * Does not touch the hardware. ++ **/ ++s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ ++ ret_val = ixgbe_init_phy_ops_generic(hw); ++ ret_val = ixgbe_init_ops_generic(hw); ++ ++ /* PHY */ ++ phy->ops.init = &ixgbe_init_phy_ops_82598; ++ ++ /* MAC */ ++ mac->ops.start_hw = &ixgbe_start_hw_82598; ++ mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598; ++ mac->ops.reset_hw = &ixgbe_reset_hw_82598; ++ mac->ops.get_media_type = &ixgbe_get_media_type_82598; ++ mac->ops.get_supported_physical_layer = ++ &ixgbe_get_supported_physical_layer_82598; ++ mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598; ++ mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598; ++ mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598; ++ ++ /* RAR, Multicast, VLAN */ ++ mac->ops.set_vmdq = &ixgbe_set_vmdq_82598; ++ mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598; ++ mac->ops.set_vfta = &ixgbe_set_vfta_82598; ++ mac->ops.clear_vfta = &ixgbe_clear_vfta_82598; ++ ++ /* Flow Control */ ++ mac->ops.fc_enable = &ixgbe_fc_enable_82598; ++ ++ mac->mcft_size = 128; ++ mac->vft_size = 128; ++ mac->num_rar_entries = 16; ++ mac->rx_pb_size = 512; ++ mac->max_tx_queues = 32; ++ mac->max_rx_queues = 64; ++ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); ++ ++ /* SFP+ Module */ ++ phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598; ++ ++ /* Link */ ++ mac->ops.check_link = &ixgbe_check_mac_link_82598; ++ mac->ops.setup_link = &ixgbe_setup_mac_link_82598; ++ mac->ops.flap_tx_laser = NULL; ++ mac->ops.get_link_capabilities = ++ &ixgbe_get_link_capabilities_82598; ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_init_phy_ops_82598 - PHY/SFP specific init ++ * @hw: pointer to hardware structure ++ * ++ * Initialize any function pointers that were not able to be ++ * set during init_shared_code because the PHY/SFP type was ++ * not known. Perform the SFP init if necessary. ++ * ++ **/ ++s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_phy_info *phy = &hw->phy; ++ s32 ret_val = 0; ++ u16 list_offset, data_offset; ++ ++ /* Identify the PHY */ ++ phy->ops.identify(hw); ++ ++ /* Overwrite the link function pointers if copper PHY */ ++ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { ++ mac->ops.setup_link = &ixgbe_setup_copper_link_82598; ++ mac->ops.get_link_capabilities = ++ &ixgbe_get_copper_link_capabilities_generic; ++ } ++ ++ switch (hw->phy.type) { ++ case ixgbe_phy_tn: ++ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; ++ phy->ops.check_link = &ixgbe_check_phy_link_tnx; ++ phy->ops.get_firmware_version = ++ &ixgbe_get_phy_firmware_version_tnx; ++ break; ++ case ixgbe_phy_aq: ++ phy->ops.get_firmware_version = ++ &ixgbe_get_phy_firmware_version_generic; ++ break; ++ case ixgbe_phy_nl: ++ phy->ops.reset = &ixgbe_reset_phy_nl; ++ ++ /* Call SFP+ identify routine to get the SFP+ module type */ ++ ret_val = phy->ops.identify_sfp(hw); ++ if (ret_val != 0) ++ goto out; ++ else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { ++ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; ++ goto out; ++ } ++ ++ /* Check to see if SFP+ module is supported */ ++ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, ++ &list_offset, ++ &data_offset); ++ if (ret_val != 0) { ++ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; ++ goto out; ++ } ++ break; ++ default: ++ break; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx ++ * @hw: pointer to hardware structure ++ * ++ * Starts the hardware using the generic start_hw function. ++ * Disables relaxed ordering Then set pcie completion timeout ++ * ++ **/ ++s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) ++{ ++ u32 regval; ++ u32 i; ++ s32 ret_val = 0; ++ ++ ret_val = ixgbe_start_hw_generic(hw); ++ ++ /* Disable relaxed ordering */ ++ for (i = 0; ((i < hw->mac.max_tx_queues) && ++ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { ++ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); ++ regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; ++ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); ++ } ++ ++ for (i = 0; ((i < hw->mac.max_rx_queues) && ++ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { ++ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); ++ regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | ++ IXGBE_DCA_RXCTRL_DESC_HSRO_EN); ++ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); ++ } ++ ++ /* set the completion timeout for interface */ ++ if (ret_val == 0) ++ ixgbe_set_pcie_completion_timeout(hw); ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_get_link_capabilities_82598 - Determines link capabilities ++ * @hw: pointer to hardware structure ++ * @speed: pointer to link speed ++ * @autoneg: boolean auto-negotiation value ++ * ++ * Determines the link capabilities by reading the AUTOC register. ++ **/ ++static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *autoneg) ++{ ++ s32 status = 0; ++ u32 autoc = 0; ++ ++ /* ++ * Determine link capabilities based on the stored value of AUTOC, ++ * which represents EEPROM defaults. If AUTOC value has not been ++ * stored, use the current register value. ++ */ ++ if (hw->mac.orig_link_settings_stored) ++ autoc = hw->mac.orig_autoc; ++ else ++ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ ++ switch (autoc & IXGBE_AUTOC_LMS_MASK) { ++ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ *autoneg = false; ++ break; ++ ++ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: ++ *speed = IXGBE_LINK_SPEED_10GB_FULL; ++ *autoneg = false; ++ break; ++ ++ case IXGBE_AUTOC_LMS_1G_AN: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ *autoneg = true; ++ break; ++ ++ case IXGBE_AUTOC_LMS_KX4_AN: ++ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: ++ *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ if (autoc & IXGBE_AUTOC_KX4_SUPP) ++ *speed |= IXGBE_LINK_SPEED_10GB_FULL; ++ if (autoc & IXGBE_AUTOC_KX_SUPP) ++ *speed |= IXGBE_LINK_SPEED_1GB_FULL; ++ *autoneg = true; ++ break; ++ ++ default: ++ status = IXGBE_ERR_LINK_SETUP; ++ break; ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_get_media_type_82598 - Determines media type ++ * @hw: pointer to hardware structure ++ * ++ * Returns the media type (fiber, copper, backplane) ++ **/ ++static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) ++{ ++ enum ixgbe_media_type media_type; ++ ++ /* Detect if there is a copper PHY attached. */ ++ if (hw->phy.type == ixgbe_phy_cu_unknown || ++ hw->phy.type == ixgbe_phy_tn || ++ hw->phy.type == ixgbe_phy_aq) { ++ media_type = ixgbe_media_type_copper; ++ goto out; ++ } ++ ++ /* Media type for I82598 is based on device ID */ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_82598: ++ case IXGBE_DEV_ID_82598_BX: ++ /* Default device ID is mezzanine card KX/KX4 */ ++ media_type = ixgbe_media_type_backplane; ++ break; ++ case IXGBE_DEV_ID_82598AF_DUAL_PORT: ++ case IXGBE_DEV_ID_82598AF_SINGLE_PORT: ++ case IXGBE_DEV_ID_82598_DA_DUAL_PORT: ++ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: ++ case IXGBE_DEV_ID_82598EB_XF_LR: ++ case IXGBE_DEV_ID_82598EB_SFP_LOM: ++ media_type = ixgbe_media_type_fiber; ++ break; ++ case IXGBE_DEV_ID_82598EB_CX4: ++ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: ++ media_type = ixgbe_media_type_cx4; ++ break; ++ case IXGBE_DEV_ID_82598AT: ++ case IXGBE_DEV_ID_82598AT2: ++ media_type = ixgbe_media_type_copper; ++ break; ++ default: ++ media_type = ixgbe_media_type_unknown; ++ break; ++ } ++out: ++ return media_type; ++} ++ ++/** ++ * ixgbe_fc_enable_82598 - Enable flow control ++ * @hw: pointer to hardware structure ++ * @packetbuf_num: packet buffer number (0-7) ++ * ++ * Enable flow control according to the current settings. ++ **/ ++s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) ++{ ++ s32 ret_val = 0; ++ u32 fctrl_reg; ++ u32 rmcs_reg; ++ u32 reg; ++ u32 link_speed = 0; ++ bool link_up; ++ ++#ifdef CONFIG_DCB ++ if (hw->fc.requested_mode == ixgbe_fc_pfc) ++ goto out; ++ ++#endif /* CONFIG_DCB */ ++ /* ++ * On 82598 having Rx FC on causes resets while doing 1G ++ * so if it's on turn it off once we know link_speed. For ++ * more details see 82598 Specification update. ++ */ ++ hw->mac.ops.check_link(hw, &link_speed, &link_up, false); ++ if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { ++ switch (hw->fc.requested_mode) { ++ case ixgbe_fc_full: ++ hw->fc.requested_mode = ixgbe_fc_tx_pause; ++ break; ++ case ixgbe_fc_rx_pause: ++ hw->fc.requested_mode = ixgbe_fc_none; ++ break; ++ default: ++ /* no change */ ++ break; ++ } ++ } ++ ++ /* Negotiate the fc mode to use */ ++ ret_val = ixgbe_fc_autoneg(hw); ++ if (ret_val == IXGBE_ERR_FLOW_CONTROL) ++ goto out; ++ ++ /* Disable any previous flow control settings */ ++ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++ fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); ++ ++ rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); ++ rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); ++ ++ /* ++ * The possible values of fc.current_mode are: ++ * 0: Flow control is completely disabled ++ * 1: Rx flow control is enabled (we can receive pause frames, ++ * but not send pause frames). ++ * 2: Tx flow control is enabled (we can send pause frames but ++ * we do not support receiving pause frames). ++ * 3: Both Rx and Tx flow control (symmetric) are enabled. ++#ifdef CONFIG_DCB ++ * 4: Priority Flow Control is enabled. ++#endif ++ * other: Invalid. ++ */ ++ switch (hw->fc.current_mode) { ++ case ixgbe_fc_none: ++ /* Flow control is disabled by software override or autoneg. ++ * The code below will actually disable it in the HW. ++ */ ++ break; ++ case ixgbe_fc_rx_pause: ++ /* ++ * Rx Flow control is enabled and Tx Flow control is ++ * disabled by software override. Since there really ++ * isn't a way to advertise that we are capable of RX ++ * Pause ONLY, we will advertise that we support both ++ * symmetric and asymmetric Rx PAUSE. Later, we will ++ * disable the adapter's ability to send PAUSE frames. ++ */ ++ fctrl_reg |= IXGBE_FCTRL_RFCE; ++ break; ++ case ixgbe_fc_tx_pause: ++ /* ++ * Tx Flow control is enabled, and Rx Flow control is ++ * disabled by software override. ++ */ ++ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; ++ break; ++ case ixgbe_fc_full: ++ /* Flow control (both Rx and Tx) is enabled by SW override. */ ++ fctrl_reg |= IXGBE_FCTRL_RFCE; ++ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; ++ break; ++#ifdef CONFIG_DCB ++ case ixgbe_fc_pfc: ++ goto out; ++ break; ++#endif /* CONFIG_DCB */ ++ default: ++ hw_dbg(hw, "Flow control param set incorrectly\n"); ++ ret_val = IXGBE_ERR_CONFIG; ++ goto out; ++ break; ++ } ++ ++ /* Set 802.3x based flow control settings. */ ++ fctrl_reg |= IXGBE_FCTRL_DPF; ++ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); ++ IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); ++ ++ /* Set up and enable Rx high/low water mark thresholds, enable XON. */ ++ if (hw->fc.current_mode & ixgbe_fc_tx_pause) { ++ if (hw->fc.send_xon) { ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), ++ (hw->fc.low_water | IXGBE_FCRTL_XONE)); ++ } else { ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), ++ hw->fc.low_water); ++ } ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), ++ (hw->fc.high_water | IXGBE_FCRTH_FCEN)); ++ } ++ ++ /* Configure pause time (2 TCs per register) */ ++ reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); ++ if ((packetbuf_num & 1) == 0) ++ reg = (reg & 0xFFFF0000) | hw->fc.pause_time; ++ else ++ reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); ++ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_start_mac_link_82598 - Configures MAC link settings ++ * @hw: pointer to hardware structure ++ * ++ * Configures link settings based on values in the ixgbe_hw struct. ++ * Restarts the link. Performs autonegotiation if needed. ++ **/ ++static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, ++ bool autoneg_wait_to_complete) ++{ ++ u32 autoc_reg; ++ u32 links_reg; ++ u32 i; ++ s32 status = 0; ++ ++ /* Restart link */ ++ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ autoc_reg |= IXGBE_AUTOC_AN_RESTART; ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); ++ ++ /* Only poll for autoneg to complete if specified to do so */ ++ if (autoneg_wait_to_complete) { ++ if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == ++ IXGBE_AUTOC_LMS_KX4_AN || ++ (autoc_reg & IXGBE_AUTOC_LMS_MASK) == ++ IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { ++ links_reg = 0; /* Just in case Autoneg time = 0 */ ++ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { ++ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); ++ if (links_reg & IXGBE_LINKS_KX_AN_COMP) ++ break; ++ msleep(100); ++ } ++ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { ++ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; ++ hw_dbg(hw, "Autonegotiation did not complete.\n"); ++ } ++ } ++ } ++ ++ /* Add delay to filter out noises during initial link setup */ ++ msleep(50); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_check_mac_link_82598 - Get link/speed status ++ * @hw: pointer to hardware structure ++ * @speed: pointer to link speed ++ * @link_up: true is link is up, false otherwise ++ * @link_up_wait_to_complete: bool used to wait for link up or not ++ * ++ * Reads the links register to determine if link is up and the current speed ++ **/ ++static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, bool *link_up, ++ bool link_up_wait_to_complete) ++{ ++ u32 links_reg; ++ u32 i; ++ u16 link_reg, adapt_comp_reg; ++ ++ /* ++ * SERDES PHY requires us to read link status from undocumented ++ * register 0xC79F. Bit 0 set indicates link is up/ready; clear ++ * indicates link down. OxC00C is read to check that the XAUI lanes ++ * are active. Bit 0 clear indicates active; set indicates inactive. ++ */ ++ if (hw->phy.type == ixgbe_phy_nl) { ++ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); ++ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); ++ hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, ++ &adapt_comp_reg); ++ if (link_up_wait_to_complete) { ++ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { ++ if ((link_reg & 1) && ++ ((adapt_comp_reg & 1) == 0)) { ++ *link_up = true; ++ break; ++ } else { ++ *link_up = false; ++ } ++ msleep(100); ++ hw->phy.ops.read_reg(hw, 0xC79F, ++ IXGBE_TWINAX_DEV, ++ &link_reg); ++ hw->phy.ops.read_reg(hw, 0xC00C, ++ IXGBE_TWINAX_DEV, ++ &adapt_comp_reg); ++ } ++ } else { ++ if ((link_reg & 1) && ++ ((adapt_comp_reg & 1) == 0)) ++ *link_up = true; ++ else ++ *link_up = false; ++ } ++ ++ if (*link_up == false) ++ goto out; ++ } ++ ++ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); ++ if (link_up_wait_to_complete) { ++ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { ++ if (links_reg & IXGBE_LINKS_UP) { ++ *link_up = true; ++ break; ++ } else { ++ *link_up = false; ++ } ++ msleep(100); ++ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); ++ } ++ } else { ++ if (links_reg & IXGBE_LINKS_UP) ++ *link_up = true; ++ else ++ *link_up = false; ++ } ++ ++ if (links_reg & IXGBE_LINKS_SPEED) ++ *speed = IXGBE_LINK_SPEED_10GB_FULL; ++ else ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ ++ if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && ++ (ixgbe_validate_link_ready(hw) != 0)) ++ *link_up = false; ++ ++ /* if link is down, zero out the current_mode */ ++ if (*link_up == false) { ++ hw->fc.current_mode = ixgbe_fc_none; ++ hw->fc.fc_was_autonegged = false; ++ } ++ ++out: ++ return 0; ++} ++ ++/** ++ * ixgbe_setup_mac_link_82598 - Set MAC link speed ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * @autoneg: true if autonegotiation enabled ++ * @autoneg_wait_to_complete: true when waiting for completion is needed ++ * ++ * Set the link speed in the AUTOC register and restarts link. ++ **/ ++static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, bool autoneg, ++ bool autoneg_wait_to_complete) ++{ ++ s32 status = 0; ++ ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; ++ u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ u32 autoc = curr_autoc; ++ u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; ++ ++ /* Check to see if speed passed in is supported. */ ++ ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); ++ speed &= link_capabilities; ++ ++ if (speed == IXGBE_LINK_SPEED_UNKNOWN) ++ status = IXGBE_ERR_LINK_SETUP; ++ ++ /* Set KX4/KX support according to speed requested */ ++ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || ++ link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { ++ autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) ++ autoc |= IXGBE_AUTOC_KX4_SUPP; ++ if (speed & IXGBE_LINK_SPEED_1GB_FULL) ++ autoc |= IXGBE_AUTOC_KX_SUPP; ++ if (autoc != curr_autoc) ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); ++ } ++ ++ if (status == 0) { ++ /* ++ * Setup and restart the link based on the new values in ++ * ixgbe_hw This will write the AUTOC register based on the new ++ * stored values ++ */ ++ status = ixgbe_start_mac_link_82598(hw, ++ autoneg_wait_to_complete); ++ } ++ ++ return status; ++} ++ ++ ++/** ++ * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * @autoneg: true if autonegotiation enabled ++ * @autoneg_wait_to_complete: true if waiting is needed to complete ++ * ++ * Sets the link speed in the AUTOC register in the MAC and restarts link. ++ **/ ++static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg, ++ bool autoneg_wait_to_complete) ++{ ++ s32 status; ++ ++ /* Setup the PHY according to input speed */ ++ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, ++ autoneg_wait_to_complete); ++ /* Set up MAC */ ++ ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_reset_hw_82598 - Performs hardware reset ++ * @hw: pointer to hardware structure ++ * ++ * Resets the hardware by resetting the transmit and receive units, masks and ++ * clears all interrupts, performing a PHY reset, and performing a link (MAC) ++ * reset. ++ **/ ++static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ s32 phy_status = 0; ++ u32 ctrl; ++ u32 gheccr; ++ u32 i; ++ u32 autoc; ++ u8 analog_val; ++ ++ /* Call adapter stop to disable tx/rx and clear interrupts */ ++ hw->mac.ops.stop_adapter(hw); ++ ++ /* ++ * Power up the Atlas Tx lanes if they are currently powered down. ++ * Atlas Tx lanes are powered down for MAC loopback tests, but ++ * they are not automatically restored on reset. ++ */ ++ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); ++ if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { ++ /* Enable Tx Atlas so packets can be transmitted again */ ++ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, ++ &analog_val); ++ analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; ++ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, ++ analog_val); ++ ++ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, ++ &analog_val); ++ analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; ++ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, ++ analog_val); ++ ++ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, ++ &analog_val); ++ analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; ++ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, ++ analog_val); ++ ++ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, ++ &analog_val); ++ analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; ++ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, ++ analog_val); ++ } ++ ++ /* Reset PHY */ ++ if (hw->phy.reset_disable == false) { ++ /* PHY ops must be identified and initialized prior to reset */ ++ ++ /* Init PHY and function pointers, perform SFP setup */ ++ phy_status = hw->phy.ops.init(hw); ++ if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) ++ goto reset_hw_out; ++ else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) ++ goto no_phy_reset; ++ ++ hw->phy.ops.reset(hw); ++ } ++ ++no_phy_reset: ++ /* ++ * Prevent the PCI-E bus from from hanging by disabling PCI-E master ++ * access and verify no pending requests before reset ++ */ ++ ixgbe_disable_pcie_master(hw); ++ ++mac_reset_top: ++ /* ++ * Issue global reset to the MAC. This needs to be a SW reset. ++ * If link reset is used, it might reset the MAC when mng is using it ++ */ ++ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); ++ IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ /* Poll for reset bit to self-clear indicating reset is complete */ ++ for (i = 0; i < 10; i++) { ++ udelay(1); ++ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); ++ if (!(ctrl & IXGBE_CTRL_RST)) ++ break; ++ } ++ if (ctrl & IXGBE_CTRL_RST) { ++ status = IXGBE_ERR_RESET_FAILED; ++ hw_dbg(hw, "Reset polling failed to complete.\n"); ++ } ++ ++ /* ++ * Double resets are required for recovery from certain error ++ * conditions. Between resets, it is necessary to stall to allow time ++ * for any pending HW events to complete. We use 1usec since that is ++ * what is needed for ixgbe_disable_pcie_master(). The second reset ++ * then clears out any effects of those events. ++ */ ++ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { ++ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; ++ udelay(1); ++ goto mac_reset_top; ++ } ++ ++ msleep(50); ++ ++ gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); ++ gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); ++ IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); ++ ++ /* ++ * Store the original AUTOC value if it has not been ++ * stored off yet. Otherwise restore the stored original ++ * AUTOC value since the reset operation sets back to deaults. ++ */ ++ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ if (hw->mac.orig_link_settings_stored == false) { ++ hw->mac.orig_autoc = autoc; ++ hw->mac.orig_link_settings_stored = true; ++ } else if (autoc != hw->mac.orig_autoc) ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); ++ ++ /* Store the permanent mac address */ ++ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); ++ ++ /* ++ * Store MAC address from RAR0, clear receive address registers, and ++ * clear the multicast table ++ */ ++ hw->mac.ops.init_rx_addrs(hw); ++ ++ ++ ++reset_hw_out: ++ if (phy_status != 0) ++ status = phy_status; ++ return status; ++} ++ ++/** ++ * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address ++ * @hw: pointer to hardware struct ++ * @rar: receive address register index to associate with a VMDq index ++ * @vmdq: VMDq set index ++ **/ ++s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ++{ ++ u32 rar_high; ++ ++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); ++ rar_high &= ~IXGBE_RAH_VIND_MASK; ++ rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); ++ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); ++ return 0; ++} ++ ++/** ++ * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address ++ * @hw: pointer to hardware struct ++ * @rar: receive address register index to associate with a VMDq index ++ * @vmdq: VMDq clear index (not used in 82598, but elsewhere) ++ **/ ++static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ++{ ++ u32 rar_high; ++ u32 rar_entries = hw->mac.num_rar_entries; ++ ++ ++ if (rar < rar_entries) { ++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); ++ if (rar_high & IXGBE_RAH_VIND_MASK) { ++ rar_high &= ~IXGBE_RAH_VIND_MASK; ++ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); ++ } ++ } else { ++ hw_dbg(hw, "RAR index %d is out of range.\n", rar); ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_set_vfta_82598 - Set VLAN filter table ++ * @hw: pointer to hardware structure ++ * @vlan: VLAN id to write to VLAN filter ++ * @vind: VMDq output index that maps queue to VLAN id in VFTA ++ * @vlan_on: boolean flag to turn on/off VLAN in VFTA ++ * ++ * Turn on/off specified VLAN in the VLAN filter table. ++ **/ ++s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, ++ bool vlan_on) ++{ ++ u32 regindex; ++ u32 bitindex; ++ u32 bits; ++ u32 vftabyte; ++ ++ if (vlan > 4095) ++ return IXGBE_ERR_PARAM; ++ ++ /* Determine 32-bit word position in array */ ++ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ ++ ++ /* Determine the location of the (VMD) queue index */ ++ vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ ++ bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ ++ ++ /* Set the nibble for VMD queue index */ ++ bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); ++ bits &= (~(0x0F << bitindex)); ++ bits |= (vind << bitindex); ++ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); ++ ++ /* Determine the location of the bit for this VLAN id */ ++ bitindex = vlan & 0x1F; /* lower five bits */ ++ ++ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); ++ if (vlan_on) ++ /* Turn on this VLAN id */ ++ bits |= (1 << bitindex); ++ else ++ /* Turn off this VLAN id */ ++ bits &= ~(1 << bitindex); ++ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_clear_vfta_82598 - Clear VLAN filter table ++ * @hw: pointer to hardware structure ++ * ++ * Clears the VLAN filer table, and the VMDq index associated with the filter ++ **/ ++static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) ++{ ++ u32 offset; ++ u32 vlanbyte; ++ ++ for (offset = 0; offset < hw->mac.vft_size; offset++) ++ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); ++ ++ for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) ++ for (offset = 0; offset < hw->mac.vft_size; offset++) ++ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), ++ 0); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register ++ * @hw: pointer to hardware structure ++ * @reg: analog register to read ++ * @val: read value ++ * ++ * Performs read operation to Atlas analog register specified. ++ **/ ++s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) ++{ ++ u32 atlas_ctl; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, ++ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); ++ IXGBE_WRITE_FLUSH(hw); ++ udelay(10); ++ atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); ++ *val = (u8)atlas_ctl; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register ++ * @hw: pointer to hardware structure ++ * @reg: atlas register to write ++ * @val: value to write ++ * ++ * Performs write operation to Atlas analog register specified. ++ **/ ++s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) ++{ ++ u32 atlas_ctl; ++ ++ atlas_ctl = (reg << 8) | val; ++ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); ++ IXGBE_WRITE_FLUSH(hw); ++ udelay(10); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. ++ * @hw: pointer to hardware structure ++ * @byte_offset: EEPROM byte offset to read ++ * @eeprom_data: value read ++ * ++ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. ++ **/ ++s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 *eeprom_data) ++{ ++ s32 status = 0; ++ u16 sfp_addr = 0; ++ u16 sfp_data = 0; ++ u16 sfp_stat = 0; ++ u32 i; ++ ++ if (hw->phy.type == ixgbe_phy_nl) { ++ /* ++ * NetLogic phy SDA/SCL registers are at addresses 0xC30A to ++ * 0xC30D. These registers are used to talk to the SFP+ ++ * module's EEPROM through the SDA/SCL (I2C) interface. ++ */ ++ sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset; ++ sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); ++ hw->phy.ops.write_reg(hw, ++ IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, ++ sfp_addr); ++ ++ /* Poll status */ ++ for (i = 0; i < 100; i++) { ++ hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, ++ &sfp_stat); ++ sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; ++ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) ++ break; ++ msleep(10); ++ } ++ ++ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { ++ hw_dbg(hw, "EEPROM read did not pass.\n"); ++ status = IXGBE_ERR_SFP_NOT_PRESENT; ++ goto out; ++ } ++ ++ /* Read data */ ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); ++ ++ *eeprom_data = (u8)(sfp_data >> 8); ++ } else { ++ status = IXGBE_ERR_PHY; ++ goto out; ++ } ++ ++out: ++ return status; ++} ++ ++/** ++ * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type ++ * @hw: pointer to hardware structure ++ * ++ * Determines physical layer capabilities of the current configuration. ++ **/ ++u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) ++{ ++ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; ++ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; ++ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; ++ u16 ext_ability = 0; ++ ++ hw->phy.ops.identify(hw); ++ ++ /* Copper PHY must be checked before AUTOC LMS to determine correct ++ * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ ++ if (hw->phy.type == ixgbe_phy_tn || ++ hw->phy.type == ixgbe_phy_cu_unknown) { ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); ++ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; ++ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; ++ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; ++ goto out; ++ } ++ ++ switch (autoc & IXGBE_AUTOC_LMS_MASK) { ++ case IXGBE_AUTOC_LMS_1G_AN: ++ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: ++ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) ++ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; ++ else ++ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; ++ break; ++ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: ++ if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; ++ else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; ++ else /* XAUI */ ++ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; ++ break; ++ case IXGBE_AUTOC_LMS_KX4_AN: ++ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: ++ if (autoc & IXGBE_AUTOC_KX_SUPP) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; ++ if (autoc & IXGBE_AUTOC_KX4_SUPP) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; ++ break; ++ default: ++ break; ++ } ++ ++ if (hw->phy.type == ixgbe_phy_nl) { ++ hw->phy.ops.identify_sfp(hw); ++ ++ switch (hw->phy.sfp_type) { ++ case ixgbe_sfp_type_da_cu: ++ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; ++ break; ++ case ixgbe_sfp_type_sr: ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; ++ break; ++ case ixgbe_sfp_type_lr: ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; ++ break; ++ default: ++ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; ++ break; ++ } ++ } ++ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_82598_DA_DUAL_PORT: ++ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; ++ break; ++ case IXGBE_DEV_ID_82598AF_DUAL_PORT: ++ case IXGBE_DEV_ID_82598AF_SINGLE_PORT: ++ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; ++ break; ++ case IXGBE_DEV_ID_82598EB_XF_LR: ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; ++ break; ++ default: ++ break; ++ } ++ ++out: ++ return physical_layer; ++} ++ ++/** ++ * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple ++ * port devices. ++ * @hw: pointer to the HW structure ++ * ++ * Calls common function and corrects issue with some single port devices ++ * that enable LAN1 but not LAN0. ++ **/ ++void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_bus_info *bus = &hw->bus; ++ u16 pci_gen, pci_ctrl2; ++ ++ ixgbe_set_lan_id_multi_port_pcie(hw); ++ ++ /* check if LAN0 is disabled */ ++ hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); ++ if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { ++ ++ hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); ++ ++ /* if LAN0 is completely disabled force function to 0 */ ++ if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && ++ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && ++ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { ++ ++ bus->func = 0; ++ } ++ } ++} ++ ++/** ++ * ixgbe_validate_link_ready - Function looks for phy link ++ * @hw: pointer to hardware structure ++ * ++ * Function indicates success when phy link is available. If phy is not ready ++ * within 5 seconds of MAC indicating link, the function returns error. ++ **/ ++static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) ++{ ++ u32 timeout; ++ u16 an_reg; ++ ++ if (hw->device_id != IXGBE_DEV_ID_82598AT2) ++ return 0; ++ ++ for (timeout = 0; ++ timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); ++ ++ if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && ++ (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) ++ break; ++ ++ msleep(100); ++ } ++ ++ if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { ++ hw_dbg(hw, "Link was indicated but link is down\n"); ++ return IXGBE_ERR_LINK_SETUP; ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering ++ * @hw: pointer to hardware structure ++ * ++ **/ ++void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw) ++{ ++ u32 regval; ++ u32 i; ++ ++ /* Enable relaxed ordering */ ++ for (i = 0; ((i < hw->mac.max_tx_queues) && ++ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { ++ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); ++ regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN; ++ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); ++ } ++ ++ for (i = 0; ((i < hw->mac.max_rx_queues) && ++ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { ++ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); ++ regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN | ++ IXGBE_DCA_RXCTRL_DESC_HSRO_EN); ++ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); ++ } ++ ++} +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_82599.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_82599.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_82599.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_82599.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,2425 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe_type.h" ++#include "ixgbe_api.h" ++#include "ixgbe_common.h" ++#include "ixgbe_phy.h" ++ ++s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); ++s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *autoneg); ++enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); ++void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); ++void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); ++void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); ++s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, bool autoneg, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, bool autoneg, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg, ++ bool autoneg_wait_to_complete); ++static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); ++void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); ++s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); ++s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); ++s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); ++s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw); ++void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw); ++s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); ++s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); ++u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); ++s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); ++s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps); ++static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); ++ ++void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ ++ if (hw->phy.multispeed_fiber) { ++ /* Set up dual speed SFP+ support */ ++ mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; ++ mac->ops.disable_tx_laser = ++ &ixgbe_disable_tx_laser_multispeed_fiber; ++ mac->ops.enable_tx_laser = ++ &ixgbe_enable_tx_laser_multispeed_fiber; ++ mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; ++ } else { ++ mac->ops.disable_tx_laser = NULL; ++ mac->ops.enable_tx_laser = NULL; ++ mac->ops.flap_tx_laser = NULL; ++ if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && ++ (hw->phy.smart_speed == ixgbe_smart_speed_auto || ++ hw->phy.smart_speed == ixgbe_smart_speed_on)) ++ mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; ++ else ++ mac->ops.setup_link = &ixgbe_setup_mac_link_82599; ++ } ++} ++ ++/** ++ * ixgbe_init_phy_ops_82599 - PHY/SFP specific init ++ * @hw: pointer to hardware structure ++ * ++ * Initialize any function pointers that were not able to be ++ * set during init_shared_code because the PHY/SFP type was ++ * not known. Perform the SFP init if necessary. ++ * ++ **/ ++s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_phy_info *phy = &hw->phy; ++ s32 ret_val = 0; ++ ++ /* Identify the PHY or SFP module */ ++ ret_val = phy->ops.identify(hw); ++ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) ++ goto init_phy_ops_out; ++ ++ /* Setup function pointers based on detected SFP module and speeds */ ++ ixgbe_init_mac_link_ops_82599(hw); ++ if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) ++ hw->phy.ops.reset = NULL; ++ ++ /* If copper media, overwrite with copper function pointers */ ++ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { ++ mac->ops.setup_link = &ixgbe_setup_copper_link_82599; ++ mac->ops.get_link_capabilities = ++ &ixgbe_get_copper_link_capabilities_generic; ++ } ++ ++ /* Set necessary function pointers based on phy type */ ++ switch (hw->phy.type) { ++ case ixgbe_phy_tn: ++ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; ++ phy->ops.check_link = &ixgbe_check_phy_link_tnx; ++ phy->ops.get_firmware_version = ++ &ixgbe_get_phy_firmware_version_tnx; ++ break; ++ case ixgbe_phy_aq: ++ phy->ops.get_firmware_version = ++ &ixgbe_get_phy_firmware_version_generic; ++ break; ++ default: ++ break; ++ } ++init_phy_ops_out: ++ return ret_val; ++} ++ ++s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) ++{ ++ s32 ret_val = 0; ++ u32 reg_anlp1 = 0; ++ u32 i = 0; ++ u16 list_offset, data_offset, data_value; ++ ++ if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { ++ ixgbe_init_mac_link_ops_82599(hw); ++ ++ hw->phy.ops.reset = NULL; ++ ++ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, ++ &data_offset); ++ if (ret_val != 0) ++ goto setup_sfp_out; ++ ++ /* PHY config will finish before releasing the semaphore */ ++ ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); ++ if (ret_val != 0) { ++ ret_val = IXGBE_ERR_SWFW_SYNC; ++ goto setup_sfp_out; ++ } ++ ++ hw->eeprom.ops.read(hw, ++data_offset, &data_value); ++ while (data_value != 0xffff) { ++ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); ++ IXGBE_WRITE_FLUSH(hw); ++ hw->eeprom.ops.read(hw, ++data_offset, &data_value); ++ } ++ ++ /* Release the semaphore */ ++ ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); ++ /* Delay obtaining semaphore again to allow FW access */ ++ msleep(hw->eeprom.semaphore_delay); ++ ++ /* Now restart DSP by setting Restart_AN and clearing LMS */ ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, ++ IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | ++ IXGBE_AUTOC_AN_RESTART)); ++ ++ /* Wait for AN to leave state 0 */ ++ for (i = 0; i < 10; i++) { ++ msleep(4); ++ reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); ++ if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) ++ break; ++ } ++ if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { ++ hw_dbg(hw, "sfp module setup not complete\n"); ++ ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; ++ goto setup_sfp_out; ++ } ++ ++ /* Restart DSP by setting Restart_AN and return to SFI mode */ ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, ++ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | ++ IXGBE_AUTOC_AN_RESTART)); ++ } ++ ++setup_sfp_out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_init_ops_82599 - Inits func ptrs and MAC type ++ * @hw: pointer to hardware structure ++ * ++ * Initialize the function pointers and assign the MAC type for 82599. ++ * Does not touch the hardware. ++ **/ ++ ++s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ ++ ret_val = ixgbe_init_phy_ops_generic(hw); ++ ret_val = ixgbe_init_ops_generic(hw); ++ ++ /* PHY */ ++ phy->ops.identify = &ixgbe_identify_phy_82599; ++ phy->ops.init = &ixgbe_init_phy_ops_82599; ++ ++ /* MAC */ ++ mac->ops.reset_hw = &ixgbe_reset_hw_82599; ++ mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82599; ++ mac->ops.get_media_type = &ixgbe_get_media_type_82599; ++ mac->ops.get_supported_physical_layer = ++ &ixgbe_get_supported_physical_layer_82599; ++ mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; ++ mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; ++ mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; ++ mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599; ++ mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; ++ mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; ++ mac->ops.get_device_caps = &ixgbe_get_device_caps_82599; ++ mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; ++ mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; ++ ++ /* RAR, Multicast, VLAN */ ++ mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; ++ mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; ++ mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; ++ mac->rar_highwater = 1; ++ mac->ops.set_vfta = &ixgbe_set_vfta_generic; ++ mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; ++ mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; ++ mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; ++ ++ /* Link */ ++ mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; ++ mac->ops.check_link = &ixgbe_check_mac_link_generic; ++ ixgbe_init_mac_link_ops_82599(hw); ++ ++ mac->mcft_size = 128; ++ mac->vft_size = 128; ++ mac->num_rar_entries = 128; ++ mac->rx_pb_size = 512; ++ mac->max_tx_queues = 128; ++ mac->max_rx_queues = 128; ++ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); ++ ++ hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_get_link_capabilities_82599 - Determines link capabilities ++ * @hw: pointer to hardware structure ++ * @speed: pointer to link speed ++ * @negotiation: true when autoneg or autotry is enabled ++ * ++ * Determines the link capabilities by reading the AUTOC register. ++ **/ ++s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *negotiation) ++{ ++ s32 status = 0; ++ u32 autoc = 0; ++ ++ /* Check if 1G SFP module. */ ++ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || ++ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ *negotiation = true; ++ goto out; ++ } ++ ++ /* ++ * Determine link capabilities based on the stored value of AUTOC, ++ * which represents EEPROM defaults. If AUTOC value has not ++ * been stored, use the current register values. ++ */ ++ if (hw->mac.orig_link_settings_stored) ++ autoc = hw->mac.orig_autoc; ++ else ++ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ ++ switch (autoc & IXGBE_AUTOC_LMS_MASK) { ++ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ *negotiation = false; ++ break; ++ ++ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: ++ *speed = IXGBE_LINK_SPEED_10GB_FULL; ++ *negotiation = false; ++ break; ++ ++ case IXGBE_AUTOC_LMS_1G_AN: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ *negotiation = true; ++ break; ++ ++ case IXGBE_AUTOC_LMS_10G_SERIAL: ++ *speed = IXGBE_LINK_SPEED_10GB_FULL; ++ *negotiation = false; ++ break; ++ ++ case IXGBE_AUTOC_LMS_KX4_KX_KR: ++ case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: ++ *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ if (autoc & IXGBE_AUTOC_KR_SUPP) ++ *speed |= IXGBE_LINK_SPEED_10GB_FULL; ++ if (autoc & IXGBE_AUTOC_KX4_SUPP) ++ *speed |= IXGBE_LINK_SPEED_10GB_FULL; ++ if (autoc & IXGBE_AUTOC_KX_SUPP) ++ *speed |= IXGBE_LINK_SPEED_1GB_FULL; ++ *negotiation = true; ++ break; ++ ++ case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: ++ *speed = IXGBE_LINK_SPEED_100_FULL; ++ if (autoc & IXGBE_AUTOC_KR_SUPP) ++ *speed |= IXGBE_LINK_SPEED_10GB_FULL; ++ if (autoc & IXGBE_AUTOC_KX4_SUPP) ++ *speed |= IXGBE_LINK_SPEED_10GB_FULL; ++ if (autoc & IXGBE_AUTOC_KX_SUPP) ++ *speed |= IXGBE_LINK_SPEED_1GB_FULL; ++ *negotiation = true; ++ break; ++ ++ case IXGBE_AUTOC_LMS_SGMII_1G_100M: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; ++ *negotiation = false; ++ break; ++ ++ default: ++ status = IXGBE_ERR_LINK_SETUP; ++ goto out; ++ break; ++ } ++ ++ if (hw->phy.multispeed_fiber) { ++ *speed |= IXGBE_LINK_SPEED_10GB_FULL | ++ IXGBE_LINK_SPEED_1GB_FULL; ++ *negotiation = true; ++ } ++ ++out: ++ return status; ++} ++ ++/** ++ * ixgbe_get_media_type_82599 - Get media type ++ * @hw: pointer to hardware structure ++ * ++ * Returns the media type (fiber, copper, backplane) ++ **/ ++enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) ++{ ++ enum ixgbe_media_type media_type; ++ ++ /* Detect if there is a copper PHY attached. */ ++ if (hw->phy.type == ixgbe_phy_cu_unknown || ++ hw->phy.type == ixgbe_phy_tn || ++ hw->phy.type == ixgbe_phy_aq) { ++ media_type = ixgbe_media_type_copper; ++ goto out; ++ } ++ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_82599_KX4: ++ case IXGBE_DEV_ID_82599_KX4_MEZZ: ++ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: ++ case IXGBE_DEV_ID_82599_KR: ++ case IXGBE_DEV_ID_82599_XAUI_LOM: ++ /* Default device ID is mezzanine card KX/KX4 */ ++ media_type = ixgbe_media_type_backplane; ++ break; ++ case IXGBE_DEV_ID_82599_SFP: ++ case IXGBE_DEV_ID_82599_SFP_EM: ++ media_type = ixgbe_media_type_fiber; ++ break; ++ case IXGBE_DEV_ID_82599_CX4: ++ media_type = ixgbe_media_type_cx4; ++ break; ++ case IXGBE_DEV_ID_82599_T3_LOM: ++ media_type = ixgbe_media_type_copper; ++ break; ++ default: ++ media_type = ixgbe_media_type_unknown; ++ break; ++ } ++out: ++ return media_type; ++} ++ ++/** ++ * ixgbe_start_mac_link_82599 - Setup MAC link settings ++ * @hw: pointer to hardware structure ++ * ++ * Configures link settings based on values in the ixgbe_hw struct. ++ * Restarts the link. Performs autonegotiation if needed. ++ **/ ++s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, ++ bool autoneg_wait_to_complete) ++{ ++ u32 autoc_reg; ++ u32 links_reg; ++ u32 i; ++ s32 status = 0; ++ ++ /* Restart link */ ++ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ autoc_reg |= IXGBE_AUTOC_AN_RESTART; ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); ++ ++ /* Only poll for autoneg to complete if specified to do so */ ++ if (autoneg_wait_to_complete) { ++ if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == ++ IXGBE_AUTOC_LMS_KX4_KX_KR || ++ (autoc_reg & IXGBE_AUTOC_LMS_MASK) == ++ IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ++ || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == ++ IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { ++ links_reg = 0; /* Just in case Autoneg time = 0 */ ++ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { ++ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); ++ if (links_reg & IXGBE_LINKS_KX_AN_COMP) ++ break; ++ msleep(100); ++ } ++ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { ++ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; ++ hw_dbg(hw, "Autoneg did not complete.\n"); ++ } ++ } ++ } ++ ++ /* Add delay to filter out noises during initial link setup */ ++ msleep(50); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser ++ * @hw: pointer to hardware structure ++ * ++ * The base drivers may require better control over SFP+ module ++ * PHY states. This includes selectively shutting down the Tx ++ * laser on the PHY, effectively halting physical link. ++ **/ ++void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) ++{ ++ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); ++ ++ /* Disable tx laser; allow 100us to go dark per spec */ ++ esdp_reg |= IXGBE_ESDP_SDP3; ++ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ udelay(100); ++} ++ ++/** ++ * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser ++ * @hw: pointer to hardware structure ++ * ++ * The base drivers may require better control over SFP+ module ++ * PHY states. This includes selectively turning on the Tx ++ * laser on the PHY, effectively starting physical link. ++ **/ ++void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) ++{ ++ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); ++ ++ /* Enable tx laser; allow 100ms to light up */ ++ esdp_reg &= ~IXGBE_ESDP_SDP3; ++ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ msleep(100); ++} ++ ++/** ++ * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser ++ * @hw: pointer to hardware structure ++ * ++ * When the driver changes the link speeds that it can support, ++ * it sets autotry_restart to true to indicate that we need to ++ * initiate a new autotry session with the link partner. To do ++ * so, we set the speed then disable and re-enable the tx laser, to ++ * alert the link partner that it also needs to restart autotry on its ++ * end. This is consistent with true clause 37 autoneg, which also ++ * involves a loss of signal. ++ **/ ++void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.autotry_restart) { ++ ixgbe_disable_tx_laser_multispeed_fiber(hw); ++ ixgbe_enable_tx_laser_multispeed_fiber(hw); ++ hw->mac.autotry_restart = false; ++ } ++} ++ ++/** ++ * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * @autoneg: true if autonegotiation enabled ++ * @autoneg_wait_to_complete: true when waiting for completion is needed ++ * ++ * Set the link speed in the AUTOC register and restarts link. ++ **/ ++s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, bool autoneg, ++ bool autoneg_wait_to_complete) ++{ ++ s32 status = 0; ++ ixgbe_link_speed link_speed; ++ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; ++ u32 speedcnt = 0; ++ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); ++ u32 i = 0; ++ bool link_up = false; ++ bool negotiation; ++ ++ /* Mask off requested but non-supported speeds */ ++ status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); ++ if (status != 0) ++ return status; ++ ++ speed &= link_speed; ++ ++ /* ++ * Try each speed one by one, highest priority first. We do this in ++ * software because 10gb fiber doesn't support speed autonegotiation. ++ */ ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) { ++ speedcnt++; ++ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; ++ ++ /* If we already have link at this speed, just jump out */ ++ status = ixgbe_check_link(hw, &link_speed, &link_up, false); ++ if (status != 0) ++ return status; ++ ++ if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) ++ goto out; ++ ++ /* Set the module link speed */ ++ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); ++ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ /* Allow module to change analog characteristics (1G->10G) */ ++ msleep(40); ++ ++ status = ixgbe_setup_mac_link_82599( ++ hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg, ++ autoneg_wait_to_complete); ++ if (status != 0) ++ return status; ++ ++ /* Flap the tx laser if it has not already been done */ ++ ixgbe_flap_tx_laser(hw); ++ ++ /* ++ * Wait for the controller to acquire link. Per IEEE 802.3ap, ++ * Section 73.10.2, we may have to wait up to 500ms if KR is ++ * attempted. 82599 uses the same timing for 10g SFI. ++ */ ++ for (i = 0; i < 5; i++) { ++ /* Wait for the link partner to also set speed */ ++ msleep(100); ++ ++ /* If we have link, just jump out */ ++ status = ixgbe_check_link(hw, &link_speed, ++ &link_up, false); ++ if (status != 0) ++ return status; ++ ++ if (link_up) ++ goto out; ++ } ++ } ++ ++ if (speed & IXGBE_LINK_SPEED_1GB_FULL) { ++ speedcnt++; ++ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) ++ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; ++ ++ /* If we already have link at this speed, just jump out */ ++ status = ixgbe_check_link(hw, &link_speed, &link_up, false); ++ if (status != 0) ++ return status; ++ ++ if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) ++ goto out; ++ ++ /* Set the module link speed */ ++ esdp_reg &= ~IXGBE_ESDP_SDP5; ++ esdp_reg |= IXGBE_ESDP_SDP5_DIR; ++ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ /* Allow module to change analog characteristics (10G->1G) */ ++ msleep(40); ++ ++ status = ixgbe_setup_mac_link_82599( ++ hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg, ++ autoneg_wait_to_complete); ++ if (status != 0) ++ return status; ++ ++ /* Flap the tx laser if it has not already been done */ ++ ixgbe_flap_tx_laser(hw); ++ ++ /* Wait for the link partner to also set speed */ ++ msleep(100); ++ ++ /* If we have link, just jump out */ ++ status = ixgbe_check_link(hw, &link_speed, &link_up, false); ++ if (status != 0) ++ return status; ++ ++ if (link_up) ++ goto out; ++ } ++ ++ /* ++ * We didn't get link. Configure back to the highest speed we tried, ++ * (if there was more than one). We call ourselves back with just the ++ * single highest speed that the user requested. ++ */ ++ if (speedcnt > 1) ++ status = ixgbe_setup_mac_link_multispeed_fiber(hw, ++ highest_link_speed, autoneg, autoneg_wait_to_complete); ++ ++out: ++ /* Set autoneg_advertised value based on input link speed */ ++ hw->phy.autoneg_advertised = 0; ++ ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; ++ ++ if (speed & IXGBE_LINK_SPEED_1GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; ++ ++ return status; ++} ++ ++/** ++ * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * @autoneg: true if autonegotiation enabled ++ * @autoneg_wait_to_complete: true when waiting for completion is needed ++ * ++ * Implements the Intel SmartSpeed algorithm. ++ **/ ++s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, bool autoneg, ++ bool autoneg_wait_to_complete) ++{ ++ s32 status = 0; ++ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; ++ s32 i, j; ++ bool link_up = false; ++ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ ++ /* Set autoneg_advertised value based on input link speed */ ++ hw->phy.autoneg_advertised = 0; ++ ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; ++ ++ if (speed & IXGBE_LINK_SPEED_1GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; ++ ++ if (speed & IXGBE_LINK_SPEED_100_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; ++ ++ /* ++ * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the ++ * autoneg advertisement if link is unable to be established at the ++ * highest negotiated rate. This can sometimes happen due to integrity ++ * issues with the physical media connection. ++ */ ++ ++ /* First, try to get link with full advertisement */ ++ hw->phy.smart_speed_active = false; ++ for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { ++ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, ++ autoneg_wait_to_complete); ++ if (status != 0) ++ goto out; ++ ++ /* ++ * Wait for the controller to acquire link. Per IEEE 802.3ap, ++ * Section 73.10.2, we may have to wait up to 500ms if KR is ++ * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per ++ * Table 9 in the AN MAS. ++ */ ++ for (i = 0; i < 5; i++) { ++ msleep(100); ++ ++ /* If we have link, just jump out */ ++ status = ixgbe_check_link(hw, &link_speed, &link_up, ++ false); ++ if (status != 0) ++ goto out; ++ ++ if (link_up) ++ goto out; ++ } ++ } ++ ++ /* ++ * We didn't get link. If we advertised KR plus one of KX4/KX ++ * (or BX4/BX), then disable KR and try again. ++ */ ++ if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || ++ ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) ++ goto out; ++ ++ /* Turn SmartSpeed on to disable KR support */ ++ hw->phy.smart_speed_active = true; ++ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, ++ autoneg_wait_to_complete); ++ if (status != 0) ++ goto out; ++ ++ /* ++ * Wait for the controller to acquire link. 600ms will allow for ++ * the AN link_fail_inhibit_timer as well for multiple cycles of ++ * parallel detect, both 10g and 1g. This allows for the maximum ++ * connect attempts as defined in the AN MAS table 73-7. ++ */ ++ for (i = 0; i < 6; i++) { ++ msleep(100); ++ ++ /* If we have link, just jump out */ ++ status = ixgbe_check_link(hw, &link_speed, &link_up, false); ++ if (status != 0) ++ goto out; ++ ++ if (link_up) ++ goto out; ++ } ++ ++ /* We didn't get link. Turn SmartSpeed back off. */ ++ hw->phy.smart_speed_active = false; ++ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, ++ autoneg_wait_to_complete); ++ ++out: ++ if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) ++ hw_dbg(hw, "Smartspeed has downgraded the link speed " ++ "from the maximum advertised\n"); ++ return status; ++} ++ ++/** ++ * ixgbe_setup_mac_link_82599 - Set MAC link speed ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * @autoneg: true if autonegotiation enabled ++ * @autoneg_wait_to_complete: true when waiting for completion is needed ++ * ++ * Set the link speed in the AUTOC register and restarts link. ++ **/ ++s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, bool autoneg, ++ bool autoneg_wait_to_complete) ++{ ++ s32 status = 0; ++ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); ++ u32 start_autoc = autoc; ++ u32 orig_autoc = 0; ++ u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; ++ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; ++ u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; ++ u32 links_reg; ++ u32 i; ++ ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; ++ ++ /* Check to see if speed passed in is supported. */ ++ status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); ++ if (status != 0) ++ goto out; ++ ++ speed &= link_capabilities; ++ ++ if (speed == IXGBE_LINK_SPEED_UNKNOWN) { ++ status = IXGBE_ERR_LINK_SETUP; ++ goto out; ++ } ++ ++ /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ ++ if (hw->mac.orig_link_settings_stored) ++ orig_autoc = hw->mac.orig_autoc; ++ else ++ orig_autoc = autoc; ++ ++ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || ++ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || ++ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { ++ /* Set KX4/KX/KR support according to speed requested */ ++ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) ++ if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) ++ autoc |= IXGBE_AUTOC_KX4_SUPP; ++ if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && ++ (hw->phy.smart_speed_active == false)) ++ autoc |= IXGBE_AUTOC_KR_SUPP; ++ if (speed & IXGBE_LINK_SPEED_1GB_FULL) ++ autoc |= IXGBE_AUTOC_KX_SUPP; ++ } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && ++ (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || ++ link_mode == IXGBE_AUTOC_LMS_1G_AN)) { ++ /* Switch from 1G SFI to 10G SFI if requested */ ++ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && ++ (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { ++ autoc &= ~IXGBE_AUTOC_LMS_MASK; ++ autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; ++ } ++ } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && ++ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { ++ /* Switch from 10G SFI to 1G SFI if requested */ ++ if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && ++ (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { ++ autoc &= ~IXGBE_AUTOC_LMS_MASK; ++ if (autoneg) ++ autoc |= IXGBE_AUTOC_LMS_1G_AN; ++ else ++ autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; ++ } ++ } ++ ++ if (autoc != start_autoc) { ++ ++ /* Restart link */ ++ autoc |= IXGBE_AUTOC_AN_RESTART; ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); ++ ++ /* Only poll for autoneg to complete if specified to do so */ ++ if (autoneg_wait_to_complete) { ++ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || ++ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || ++ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { ++ links_reg = 0; /*Just in case Autoneg time=0*/ ++ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { ++ links_reg = ++ IXGBE_READ_REG(hw, IXGBE_LINKS); ++ if (links_reg & IXGBE_LINKS_KX_AN_COMP) ++ break; ++ msleep(100); ++ } ++ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { ++ status = ++ IXGBE_ERR_AUTONEG_NOT_COMPLETE; ++ hw_dbg(hw, "Autoneg did not complete.\n"); ++ } ++ } ++ } ++ ++ /* Add delay to filter out noises during initial link setup */ ++ msleep(50); ++ } ++ ++out: ++ return status; ++} ++ ++/** ++ * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * @autoneg: true if autonegotiation enabled ++ * @autoneg_wait_to_complete: true if waiting is needed to complete ++ * ++ * Restarts link on PHY and MAC based on settings passed in. ++ **/ ++static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg, ++ bool autoneg_wait_to_complete) ++{ ++ s32 status; ++ ++ /* Setup the PHY according to input speed */ ++ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, ++ autoneg_wait_to_complete); ++ /* Set up MAC */ ++ ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); ++ ++ return status; ++} ++/** ++ * ixgbe_reset_hw_82599 - Perform hardware reset ++ * @hw: pointer to hardware structure ++ * ++ * Resets the hardware by resetting the transmit and receive units, masks ++ * and clears all interrupts, perform a PHY reset, and perform a link (MAC) ++ * reset. ++ **/ ++s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ u32 ctrl; ++ u32 i; ++ u32 autoc; ++ u32 autoc2; ++ ++ /* Call adapter stop to disable tx/rx and clear interrupts */ ++ hw->mac.ops.stop_adapter(hw); ++ ++ /* PHY ops must be identified and initialized prior to reset */ ++ ++ /* Identify PHY and related function pointers */ ++ status = hw->phy.ops.init(hw); ++ ++ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) ++ goto reset_hw_out; ++ ++ /* Setup SFP module if there is one present. */ ++ if (hw->phy.sfp_setup_needed) { ++ status = hw->mac.ops.setup_sfp(hw); ++ hw->phy.sfp_setup_needed = false; ++ } ++ ++ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) ++ goto reset_hw_out; ++ ++ /* Reset PHY */ ++ if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) ++ hw->phy.ops.reset(hw); ++ ++ /* ++ * Prevent the PCI-E bus from from hanging by disabling PCI-E master ++ * access and verify no pending requests before reset ++ */ ++ ixgbe_disable_pcie_master(hw); ++ ++mac_reset_top: ++ /* ++ * Issue global reset to the MAC. This needs to be a SW reset. ++ * If link reset is used, it might reset the MAC when mng is using it ++ */ ++ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); ++ IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ /* Poll for reset bit to self-clear indicating reset is complete */ ++ for (i = 0; i < 10; i++) { ++ udelay(1); ++ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); ++ if (!(ctrl & IXGBE_CTRL_RST)) ++ break; ++ } ++ if (ctrl & IXGBE_CTRL_RST) { ++ status = IXGBE_ERR_RESET_FAILED; ++ hw_dbg(hw, "Reset polling failed to complete.\n"); ++ } ++ ++ /* ++ * Double resets are required for recovery from certain error ++ * conditions. Between resets, it is necessary to stall to allow time ++ * for any pending HW events to complete. We use 1usec since that is ++ * what is needed for ixgbe_disable_pcie_master(). The second reset ++ * then clears out any effects of those events. ++ */ ++ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { ++ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; ++ udelay(1); ++ goto mac_reset_top; ++ } ++ ++ msleep(50); ++ ++ /* ++ * Store the original AUTOC/AUTOC2 values if they have not been ++ * stored off yet. Otherwise restore the stored original ++ * values since the reset operation sets back to defaults. ++ */ ++ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); ++ if (hw->mac.orig_link_settings_stored == false) { ++ hw->mac.orig_autoc = autoc; ++ hw->mac.orig_autoc2 = autoc2; ++ hw->mac.orig_link_settings_stored = true; ++ } else { ++ if (autoc != hw->mac.orig_autoc) ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | ++ IXGBE_AUTOC_AN_RESTART)); ++ ++ if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != ++ (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { ++ autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; ++ autoc2 |= (hw->mac.orig_autoc2 & ++ IXGBE_AUTOC2_UPPER_MASK); ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); ++ } ++ } ++ ++ /* Store the permanent mac address */ ++ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); ++ ++ /* ++ * Store MAC address from RAR0, clear receive address registers, and ++ * clear the multicast table. Also reset num_rar_entries to 128, ++ * since we modify this value when programming the SAN MAC address. ++ */ ++ hw->mac.num_rar_entries = 128; ++ hw->mac.ops.init_rx_addrs(hw); ++ ++ /* Store the permanent SAN mac address */ ++ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); ++ ++ /* Add the SAN MAC address to the RAR only if it's a valid address */ ++ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { ++ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, ++ hw->mac.san_addr, 0, IXGBE_RAH_AV); ++ ++ /* Reserve the last RAR for the SAN MAC address */ ++ hw->mac.num_rar_entries--; ++ } ++ ++ /* Store the alternative WWNN/WWPN prefix */ ++ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, ++ &hw->mac.wwpn_prefix); ++ ++reset_hw_out: ++ return status; ++} ++ ++/** ++ * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. ++ * @hw: pointer to hardware structure ++ **/ ++s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) ++{ ++ int i; ++ u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); ++ fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; ++ ++ /* ++ * Before starting reinitialization process, ++ * FDIRCMD.CMD must be zero. ++ */ ++ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { ++ if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & ++ IXGBE_FDIRCMD_CMD_MASK)) ++ break; ++ udelay(10); ++ } ++ if (i >= IXGBE_FDIRCMD_CMD_POLL) { ++ hw_dbg(hw, "Flow Director previous command isn't complete, " ++ "aborting table re-initialization. \n"); ++ return IXGBE_ERR_FDIR_REINIT_FAILED; ++ } ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); ++ IXGBE_WRITE_FLUSH(hw); ++ /* ++ * 82599 adapters flow director init flow cannot be restarted, ++ * Workaround 82599 silicon errata by performing the following steps ++ * before re-writing the FDIRCTRL control register with the same value. ++ * - write 1 to bit 8 of FDIRCMD register & ++ * - write 0 to bit 8 of FDIRCMD register ++ */ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | ++ IXGBE_FDIRCMD_CLEARHT)); ++ IXGBE_WRITE_FLUSH(hw); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & ++ ~IXGBE_FDIRCMD_CLEARHT)); ++ IXGBE_WRITE_FLUSH(hw); ++ /* ++ * Clear FDIR Hash register to clear any leftover hashes ++ * waiting to be programmed. ++ */ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ /* Poll init-done after we write FDIRCTRL register */ ++ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { ++ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & ++ IXGBE_FDIRCTRL_INIT_DONE) ++ break; ++ udelay(10); ++ } ++ if (i >= IXGBE_FDIR_INIT_DONE_POLL) { ++ hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); ++ return IXGBE_ERR_FDIR_REINIT_FAILED; ++ } ++ ++ /* Clear FDIR statistics registers (read to clear) */ ++ IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); ++ IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); ++ IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); ++ IXGBE_READ_REG(hw, IXGBE_FDIRMISS); ++ IXGBE_READ_REG(hw, IXGBE_FDIRLEN); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters ++ * @hw: pointer to hardware structure ++ * @pballoc: which mode to allocate filters with ++ **/ ++s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) ++{ ++ u32 fdirctrl = 0; ++ u32 pbsize; ++ int i; ++ ++ /* ++ * Before enabling Flow Director, the Rx Packet Buffer size ++ * must be reduced. The new value is the current size minus ++ * flow director memory usage size. ++ */ ++ pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), ++ (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); ++ ++ /* ++ * The defaults in the HW for RX PB 1-7 are not zero and so should be ++ * intialized to zero for non DCB mode otherwise actual total RX PB ++ * would be bigger than programmed and filter space would run into ++ * the PB 0 region. ++ */ ++ for (i = 1; i < 8; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); ++ ++ /* Send interrupt when 64 filters are left */ ++ fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; ++ ++ /* Set the maximum length per hash bucket to 0xA filters */ ++ fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT; ++ ++ switch (pballoc) { ++ case IXGBE_FDIR_PBALLOC_64K: ++ /* 8k - 1 signature filters */ ++ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; ++ break; ++ case IXGBE_FDIR_PBALLOC_128K: ++ /* 16k - 1 signature filters */ ++ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; ++ break; ++ case IXGBE_FDIR_PBALLOC_256K: ++ /* 32k - 1 signature filters */ ++ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; ++ break; ++ default: ++ /* bad value */ ++ return IXGBE_ERR_CONFIG; ++ }; ++ ++ /* Move the flexible bytes to use the ethertype - shift 6 words */ ++ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); ++ ++ ++ /* Prime the keys for hashing */ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, ++ IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY)); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, ++ IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY)); ++ ++ /* ++ * Poll init-done after we write the register. Estimated times: ++ * 10G: PBALLOC = 11b, timing is 60us ++ * 1G: PBALLOC = 11b, timing is 600us ++ * 100M: PBALLOC = 11b, timing is 6ms ++ * ++ * Multiple these timings by 4 if under full Rx load ++ * ++ * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for ++ * 1 msec per poll time. If we're at line rate and drop to 100M, then ++ * this might not finish in our poll time, but we can live with that ++ * for now. ++ */ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); ++ IXGBE_WRITE_FLUSH(hw); ++ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { ++ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & ++ IXGBE_FDIRCTRL_INIT_DONE) ++ break; ++ msleep(1); ++ } ++ if (i >= IXGBE_FDIR_INIT_DONE_POLL) ++ hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters ++ * @hw: pointer to hardware structure ++ * @pballoc: which mode to allocate filters with ++ **/ ++s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) ++{ ++ u32 fdirctrl = 0; ++ u32 pbsize; ++ int i; ++ ++ /* ++ * Before enabling Flow Director, the Rx Packet Buffer size ++ * must be reduced. The new value is the current size minus ++ * flow director memory usage size. ++ */ ++ ++ pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), ++ (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); ++ ++ /* ++ * The defaults in the HW for RX PB 1-7 are not zero and so should be ++ * intialized to zero for non DCB mode otherwise actual total RX PB ++ * would be bigger than programmed and filter space would run into ++ * the PB 0 region. ++ */ ++ for (i = 1; i < 8; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); ++ ++ /* Send interrupt when 64 filters are left */ ++ fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; ++ ++ /* Initialize the drop queue to Rx queue 127 */ ++ fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT); ++ ++ switch (pballoc) { ++ case IXGBE_FDIR_PBALLOC_64K: ++ /* 2k - 1 perfect filters */ ++ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; ++ break; ++ case IXGBE_FDIR_PBALLOC_128K: ++ /* 4k - 1 perfect filters */ ++ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; ++ break; ++ case IXGBE_FDIR_PBALLOC_256K: ++ /* 8k - 1 perfect filters */ ++ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; ++ break; ++ default: ++ /* bad value */ ++ return IXGBE_ERR_CONFIG; ++ }; ++ ++ /* Turn perfect match filtering on */ ++ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; ++ fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; ++ ++ /* Move the flexible bytes to use the ethertype - shift 6 words */ ++ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); ++ ++ /* Prime the keys for hashing */ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, ++ IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY)); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, ++ IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY)); ++ ++ /* ++ * Poll init-done after we write the register. Estimated times: ++ * 10G: PBALLOC = 11b, timing is 60us ++ * 1G: PBALLOC = 11b, timing is 600us ++ * 100M: PBALLOC = 11b, timing is 6ms ++ * ++ * Multiple these timings by 4 if under full Rx load ++ * ++ * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for ++ * 1 msec per poll time. If we're at line rate and drop to 100M, then ++ * this might not finish in our poll time, but we can live with that ++ * for now. ++ */ ++ ++ /* Set the maximum length per hash bucket to 0xA filters */ ++ fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); ++ IXGBE_WRITE_FLUSH(hw); ++ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { ++ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & ++ IXGBE_FDIRCTRL_INIT_DONE) ++ break; ++ msleep(1); ++ } ++ if (i >= IXGBE_FDIR_INIT_DONE_POLL) ++ hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n"); ++ ++ return 0; ++} ++ ++ ++/** ++ * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR ++ * @stream: input bitstream to compute the hash on ++ * @key: 32-bit hash key ++ **/ ++u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key) ++{ ++ /* ++ * The algorithm is as follows: ++ * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 ++ * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] ++ * and A[n] x B[n] is bitwise AND between same length strings ++ * ++ * K[n] is 16 bits, defined as: ++ * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] ++ * for n modulo 32 < 15, K[n] = ++ * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] ++ * ++ * S[n] is 16 bits, defined as: ++ * for n >= 15, S[n] = S[n:n - 15] ++ * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] ++ * ++ * To simplify for programming, the algorithm is implemented ++ * in software this way: ++ * ++ * Key[31:0], Stream[335:0] ++ * ++ * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times ++ * int_key[350:0] = tmp_key[351:1] ++ * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321] ++ * ++ * hash[15:0] = 0; ++ * for (i = 0; i < 351; i++) { ++ * if (int_key[i]) ++ * hash ^= int_stream[(i + 15):i]; ++ * } ++ */ ++ ++ union { ++ u64 fill[6]; ++ u32 key[11]; ++ u8 key_stream[44]; ++ } tmp_key; ++ ++ u8 *stream = (u8 *)atr_input; ++ u8 int_key[44]; /* upper-most bit unused */ ++ u8 hash_str[46]; /* upper-most 2 bits unused */ ++ u16 hash_result = 0; ++ int i, j, k, h; ++ ++ /* ++ * Initialize the fill member to prevent warnings ++ * on some compilers ++ */ ++ tmp_key.fill[0] = 0; ++ ++ /* First load the temporary key stream */ ++ for (i = 0; i < 6; i++) { ++ u64 fillkey = ((u64)key << 32) | key; ++ tmp_key.fill[i] = fillkey; ++ } ++ ++ /* ++ * Set the interim key for the hashing. Bit 352 is unused, so we must ++ * shift and compensate when building the key. ++ */ ++ ++ int_key[0] = tmp_key.key_stream[0] >> 1; ++ for (i = 1, j = 0; i < 44; i++) { ++ unsigned int this_key = tmp_key.key_stream[j] << 7; ++ j++; ++ int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1)); ++ } ++ ++ /* ++ * Set the interim bit string for the hashing. Bits 368 and 367 are ++ * unused, so shift and compensate when building the string. ++ */ ++ hash_str[0] = (stream[40] & 0x7f) >> 1; ++ for (i = 1, j = 40; i < 46; i++) { ++ unsigned int this_str = stream[j] << 7; ++ j++; ++ if (j > 41) ++ j = 0; ++ hash_str[i] = (u8)(this_str | (stream[j] >> 1)); ++ } ++ ++ /* ++ * Now compute the hash. i is the index into hash_str, j is into our ++ * key stream, k is counting the number of bits, and h interates within ++ * each byte. ++ */ ++ for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { ++ for (h = 0; h < 8 && k < 351; h++, k++) { ++ if (int_key[j] & (1 << h)) { ++ /* ++ * Key bit is set, XOR in the current 16-bit ++ * string. Example of processing: ++ * h = 0, ++ * tmp = (hash_str[i - 2] & 0 << 16) | ++ * (hash_str[i - 1] & 0xff << 8) | ++ * (hash_str[i] & 0xff >> 0) ++ * So tmp = hash_str[15 + k:k], since the ++ * i + 2 clause rolls off the 16-bit value ++ * h = 7, ++ * tmp = (hash_str[i - 2] & 0x7f << 9) | ++ * (hash_str[i - 1] & 0xff << 1) | ++ * (hash_str[i] & 0x80 >> 7) ++ */ ++ int tmp = (hash_str[i] >> h); ++ tmp |= (hash_str[i - 1] << (8 - h)); ++ tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1)) ++ << (16 - h); ++ hash_result ^= (u16)tmp; ++ } ++ } ++ } ++ ++ return hash_result; ++} ++ ++/** ++ * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream ++ * @input: input stream to modify ++ * @vlan: the VLAN id to load ++ **/ ++s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan) ++{ ++ input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8; ++ input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address ++ * @input: input stream to modify ++ * @src_addr: the IP address to load ++ **/ ++s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr) ++{ ++ input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24; ++ input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] = ++ (src_addr >> 16) & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] = ++ (src_addr >> 8) & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address ++ * @input: input stream to modify ++ * @dst_addr: the IP address to load ++ **/ ++s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr) ++{ ++ input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24; ++ input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] = ++ (dst_addr >> 16) & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] = ++ (dst_addr >> 8) & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address ++ * @input: input stream to modify ++ * @src_addr_1: the first 4 bytes of the IP address to load ++ * @src_addr_2: the second 4 bytes of the IP address to load ++ * @src_addr_3: the third 4 bytes of the IP address to load ++ * @src_addr_4: the fourth 4 bytes of the IP address to load ++ **/ ++s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, ++ u32 src_addr_1, u32 src_addr_2, ++ u32 src_addr_3, u32 src_addr_4) ++{ ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] = ++ (src_addr_4 >> 8) & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] = ++ (src_addr_4 >> 16) & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24; ++ ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] = ++ (src_addr_3 >> 8) & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] = ++ (src_addr_3 >> 16) & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24; ++ ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] = ++ (src_addr_2 >> 8) & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] = ++ (src_addr_2 >> 16) & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24; ++ ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] = ++ (src_addr_1 >> 8) & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] = ++ (src_addr_1 >> 16) & 0xff; ++ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address ++ * @input: input stream to modify ++ * @dst_addr_1: the first 4 bytes of the IP address to load ++ * @dst_addr_2: the second 4 bytes of the IP address to load ++ * @dst_addr_3: the third 4 bytes of the IP address to load ++ * @dst_addr_4: the fourth 4 bytes of the IP address to load ++ **/ ++s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, ++ u32 dst_addr_1, u32 dst_addr_2, ++ u32 dst_addr_3, u32 dst_addr_4) ++{ ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] = ++ (dst_addr_4 >> 8) & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] = ++ (dst_addr_4 >> 16) & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24; ++ ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] = ++ (dst_addr_3 >> 8) & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] = ++ (dst_addr_3 >> 16) & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24; ++ ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] = ++ (dst_addr_2 >> 8) & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] = ++ (dst_addr_2 >> 16) & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24; ++ ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] = ++ (dst_addr_1 >> 8) & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] = ++ (dst_addr_1 >> 16) & 0xff; ++ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_set_src_port_82599 - Sets the source port ++ * @input: input stream to modify ++ * @src_port: the source port to load ++ **/ ++s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port) ++{ ++ input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8; ++ input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_set_dst_port_82599 - Sets the destination port ++ * @input: input stream to modify ++ * @dst_port: the destination port to load ++ **/ ++s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port) ++{ ++ input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8; ++ input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes ++ * @input: input stream to modify ++ * @flex_bytes: the flexible bytes to load ++ **/ ++s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte) ++{ ++ input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8; ++ input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool ++ * @input: input stream to modify ++ * @vm_pool: the Virtual Machine pool to load ++ **/ ++s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool) ++{ ++ input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type ++ * @input: input stream to modify ++ * @l4type: the layer 4 type value to load ++ **/ ++s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type) ++{ ++ input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream ++ * @input: input stream to search ++ * @vlan: the VLAN id to load ++ **/ ++s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan) ++{ ++ *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; ++ *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address ++ * @input: input stream to search ++ * @src_addr: the IP address to load ++ **/ ++s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr) ++{ ++ *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET]; ++ *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8; ++ *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16; ++ *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address ++ * @input: input stream to search ++ * @dst_addr: the IP address to load ++ **/ ++s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr) ++{ ++ *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET]; ++ *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8; ++ *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16; ++ *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address ++ * @input: input stream to search ++ * @src_addr_1: the first 4 bytes of the IP address to load ++ * @src_addr_2: the second 4 bytes of the IP address to load ++ * @src_addr_3: the third 4 bytes of the IP address to load ++ * @src_addr_4: the fourth 4 bytes of the IP address to load ++ **/ ++s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, ++ u32 *src_addr_1, u32 *src_addr_2, ++ u32 *src_addr_3, u32 *src_addr_4) ++{ ++ *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12]; ++ *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8; ++ *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16; ++ *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24; ++ ++ *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8]; ++ *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8; ++ *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16; ++ *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24; ++ ++ *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4]; ++ *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8; ++ *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16; ++ *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24; ++ ++ *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET]; ++ *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8; ++ *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16; ++ *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address ++ * @input: input stream to search ++ * @dst_addr_1: the first 4 bytes of the IP address to load ++ * @dst_addr_2: the second 4 bytes of the IP address to load ++ * @dst_addr_3: the third 4 bytes of the IP address to load ++ * @dst_addr_4: the fourth 4 bytes of the IP address to load ++ **/ ++s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, ++ u32 *dst_addr_1, u32 *dst_addr_2, ++ u32 *dst_addr_3, u32 *dst_addr_4) ++{ ++ *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12]; ++ *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8; ++ *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16; ++ *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24; ++ ++ *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8]; ++ *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8; ++ *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16; ++ *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24; ++ ++ *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4]; ++ *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8; ++ *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16; ++ *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24; ++ ++ *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET]; ++ *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8; ++ *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16; ++ *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_get_src_port_82599 - Gets the source port ++ * @input: input stream to modify ++ * @src_port: the source port to load ++ * ++ * Even though the input is given in big-endian, the FDIRPORT registers ++ * expect the ports to be programmed in little-endian. Hence the need to swap ++ * endianness when retrieving the data. This can be confusing since the ++ * internal hash engine expects it to be big-endian. ++ **/ ++s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port) ++{ ++ *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; ++ *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_get_dst_port_82599 - Gets the destination port ++ * @input: input stream to modify ++ * @dst_port: the destination port to load ++ * ++ * Even though the input is given in big-endian, the FDIRPORT registers ++ * expect the ports to be programmed in little-endian. Hence the need to swap ++ * endianness when retrieving the data. This can be confusing since the ++ * internal hash engine expects it to be big-endian. ++ **/ ++s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port) ++{ ++ *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8; ++ *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1]; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes ++ * @input: input stream to modify ++ * @flex_bytes: the flexible bytes to load ++ **/ ++s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte) ++{ ++ *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET]; ++ *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool ++ * @input: input stream to modify ++ * @vm_pool: the Virtual Machine pool to load ++ **/ ++s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool) ++{ ++ *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET]; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type ++ * @input: input stream to modify ++ * @l4type: the layer 4 type value to load ++ **/ ++s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type) ++{ ++ *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter ++ * @hw: pointer to hardware structure ++ * @stream: input bitstream ++ * @queue: queue index to direct traffic to ++ **/ ++s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, ++ struct ixgbe_atr_input *input, ++ u8 queue) ++{ ++ u64 fdirhashcmd; ++ u64 fdircmd; ++ u32 fdirhash; ++ u16 bucket_hash, sig_hash; ++ u8 l4type; ++ ++ bucket_hash = ixgbe_atr_compute_hash_82599(input, ++ IXGBE_ATR_BUCKET_HASH_KEY); ++ ++ /* bucket_hash is only 15 bits */ ++ bucket_hash &= IXGBE_ATR_HASH_MASK; ++ ++ sig_hash = ixgbe_atr_compute_hash_82599(input, ++ IXGBE_ATR_SIGNATURE_HASH_KEY); ++ ++ /* Get the l4type in order to program FDIRCMD properly */ ++ /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ ++ ixgbe_atr_get_l4type_82599(input, &l4type); ++ ++ /* ++ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits ++ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. ++ */ ++ fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; ++ ++ fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | ++ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); ++ ++ switch (l4type & IXGBE_ATR_L4TYPE_MASK) { ++ case IXGBE_ATR_L4TYPE_TCP: ++ fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; ++ break; ++ case IXGBE_ATR_L4TYPE_UDP: ++ fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; ++ break; ++ case IXGBE_ATR_L4TYPE_SCTP: ++ fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; ++ break; ++ default: ++ hw_dbg(hw, " Error on l4type input\n"); ++ return IXGBE_ERR_CONFIG; ++ } ++ ++ if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) ++ fdircmd |= IXGBE_FDIRCMD_IPV6; ++ ++ fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); ++ fdirhashcmd = ((fdircmd << 32) | fdirhash); ++ ++ hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF); ++ IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter ++ * @hw: pointer to hardware structure ++ * @input: input bitstream ++ * @input_masks: masks for the input bitstream ++ * @soft_id: software index for the filters ++ * @queue: queue index to direct traffic to ++ * ++ * Note that the caller to this function must lock before calling, since the ++ * hardware writes must be protected from one another. ++ **/ ++s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, ++ struct ixgbe_atr_input *input, ++ struct ixgbe_atr_input_masks *input_masks, ++ u16 soft_id, u8 queue) ++{ ++ u32 fdircmd = 0; ++ u32 fdirhash; ++ u32 src_ipv4 = 0, dst_ipv4 = 0; ++ u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; ++ u16 src_port, dst_port, vlan_id, flex_bytes; ++ u16 bucket_hash; ++ u8 l4type; ++ u8 fdirm = 0; ++ ++ /* Get our input values */ ++ ixgbe_atr_get_l4type_82599(input, &l4type); ++ ++ /* ++ * Check l4type formatting, and bail out before we touch the hardware ++ * if there's a configuration issue ++ */ ++ switch (l4type & IXGBE_ATR_L4TYPE_MASK) { ++ case IXGBE_ATR_L4TYPE_TCP: ++ fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; ++ break; ++ case IXGBE_ATR_L4TYPE_UDP: ++ fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; ++ break; ++ case IXGBE_ATR_L4TYPE_SCTP: ++ fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; ++ break; ++ default: ++ hw_dbg(hw, " Error on l4type input\n"); ++ return IXGBE_ERR_CONFIG; ++ } ++ ++ bucket_hash = ixgbe_atr_compute_hash_82599(input, ++ IXGBE_ATR_BUCKET_HASH_KEY); ++ ++ /* bucket_hash is only 15 bits */ ++ bucket_hash &= IXGBE_ATR_HASH_MASK; ++ ++ ixgbe_atr_get_vlan_id_82599(input, &vlan_id); ++ ixgbe_atr_get_src_port_82599(input, &src_port); ++ ixgbe_atr_get_dst_port_82599(input, &dst_port); ++ ixgbe_atr_get_flex_byte_82599(input, &flex_bytes); ++ ++ fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; ++ ++ /* Now figure out if we're IPv4 or IPv6 */ ++ if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) { ++ /* IPv6 */ ++ ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2, ++ &src_ipv6_3, &src_ipv6_4); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3); ++ /* The last 4 bytes is the same register as IPv4 */ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4); ++ ++ fdircmd |= IXGBE_FDIRCMD_IPV6; ++ fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH; ++ } else { ++ /* IPv4 */ ++ ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); ++ } ++ ++ ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | ++ (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | ++ (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); ++ ++ /* ++ * Program the relevant mask registers. If src/dst_port or src/dst_addr ++ * are zero, then assume a full mask for that field. Also assume that ++ * a VLAN of 0 is unspecified, so mask that out as well. L4type ++ * cannot be masked out in this implementation. ++ * ++ * This also assumes IPv4 only. IPv6 masking isn't supported at this ++ * point in time. ++ */ ++ if (src_ipv4 == 0) ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff); ++ else ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask); ++ ++ if (dst_ipv4 == 0) ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff); ++ else ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask); ++ ++ switch (l4type & IXGBE_ATR_L4TYPE_MASK) { ++ case IXGBE_ATR_L4TYPE_TCP: ++ if (src_port == 0) ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff); ++ else ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ++ input_masks->src_port_mask); ++ ++ if (dst_port == 0) ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | ++ (0xffff << 16))); ++ else ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | ++ (input_masks->dst_port_mask << 16))); ++ break; ++ case IXGBE_ATR_L4TYPE_UDP: ++ if (src_port == 0) ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff); ++ else ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ++ input_masks->src_port_mask); ++ ++ if (dst_port == 0) ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | ++ (0xffff << 16))); ++ else ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | ++ (input_masks->src_port_mask << 16))); ++ break; ++ default: ++ /* this already would have failed above */ ++ break; ++ } ++ ++ /* Program the last mask register, FDIRM */ ++ if (input_masks->vlan_id_mask || !vlan_id) ++ /* Mask both VLAN and VLANP - bits 0 and 1 */ ++ fdirm |= (IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP); ++ ++ if (input_masks->data_mask || !flex_bytes) ++ /* Flex bytes need masking, so mask the whole thing - bit 4 */ ++ fdirm |= IXGBE_FDIRM_FLEX; ++ ++ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ ++ fdirm |= (IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); ++ ++ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; ++ fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; ++ fdircmd |= IXGBE_FDIRCMD_LAST; ++ fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; ++ fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register ++ * @hw: pointer to hardware structure ++ * @reg: analog register to read ++ * @val: read value ++ * ++ * Performs read operation to Omer analog register specified. ++ **/ ++s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) ++{ ++ u32 core_ctl; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | ++ (reg << 8)); ++ IXGBE_WRITE_FLUSH(hw); ++ udelay(10); ++ core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); ++ *val = (u8)core_ctl; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register ++ * @hw: pointer to hardware structure ++ * @reg: atlas register to write ++ * @val: value to write ++ * ++ * Performs write operation to Omer analog register specified. ++ **/ ++s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) ++{ ++ u32 core_ctl; ++ ++ core_ctl = (reg << 8) | val; ++ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); ++ IXGBE_WRITE_FLUSH(hw); ++ udelay(10); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx ++ * @hw: pointer to hardware structure ++ * ++ * Starts the hardware using the generic start_hw function. ++ * Then performs revision-specific operations: ++ * Clears the rate limiter registers. ++ **/ ++s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw) ++{ ++ u32 i; ++ u32 regval; ++ s32 ret_val = 0; ++ ++ ret_val = ixgbe_start_hw_generic(hw); ++ ++ /* Clear the rate limiters */ ++ for (i = 0; i < hw->mac.max_tx_queues; i++) { ++ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); ++ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); ++ } ++ IXGBE_WRITE_FLUSH(hw); ++ ++ /* Disable relaxed ordering */ ++ for (i = 0; i < hw->mac.max_tx_queues; i++) { ++ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); ++ regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; ++ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); ++ } ++ ++ for (i = 0; i < hw->mac.max_rx_queues; i++) { ++ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); ++ regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | ++ IXGBE_DCA_RXCTRL_DESC_HSRO_EN); ++ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); ++ } ++ ++ /* We need to run link autotry after the driver loads */ ++ hw->mac.autotry_restart = true; ++ ++ if (ret_val == 0) ++ ret_val = ixgbe_verify_fw_version_82599(hw); ++ return ret_val; ++} ++ ++/** ++ * ixgbe_identify_phy_82599 - Get physical layer module ++ * @hw: pointer to hardware structure ++ * ++ * Determines the physical layer module found on the current adapter. ++ * If PHY already detected, maintains current PHY type in hw struct, ++ * otherwise executes the PHY detection routine. ++ **/ ++s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_ERR_PHY_ADDR_INVALID; ++ ++ /* Detect PHY if not unknown - returns success if already detected. */ ++ status = ixgbe_identify_phy_generic(hw); ++ if (status != 0) ++ status = ixgbe_identify_sfp_module_generic(hw); ++ /* Set PHY type none if no PHY detected */ ++ if (hw->phy.type == ixgbe_phy_unknown) { ++ hw->phy.type = ixgbe_phy_none; ++ status = 0; ++ } ++ ++ /* Return error if SFP module has been detected but is not supported */ ++ if (hw->phy.type == ixgbe_phy_sfp_unsupported) ++ status = IXGBE_ERR_SFP_NOT_SUPPORTED; ++ ++ return status; ++} ++ ++/** ++ * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type ++ * @hw: pointer to hardware structure ++ * ++ * Determines physical layer capabilities of the current configuration. ++ **/ ++u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) ++{ ++ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; ++ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); ++ u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; ++ u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; ++ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; ++ u16 ext_ability = 0; ++ u8 comp_codes_10g = 0; ++ u8 comp_codes_1g = 0; ++ ++ hw->phy.ops.identify(hw); ++ ++ if (hw->phy.type == ixgbe_phy_tn || ++ hw->phy.type == ixgbe_phy_aq || ++ hw->phy.type == ixgbe_phy_cu_unknown) { ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); ++ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; ++ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; ++ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; ++ goto out; ++ } ++ ++ switch (autoc & IXGBE_AUTOC_LMS_MASK) { ++ case IXGBE_AUTOC_LMS_1G_AN: ++ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: ++ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { ++ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | ++ IXGBE_PHYSICAL_LAYER_1000BASE_BX; ++ goto out; ++ } else ++ /* SFI mode so read SFP module */ ++ goto sfp_check; ++ break; ++ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: ++ if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; ++ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; ++ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; ++ goto out; ++ break; ++ case IXGBE_AUTOC_LMS_10G_SERIAL: ++ if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; ++ goto out; ++ } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) ++ goto sfp_check; ++ break; ++ case IXGBE_AUTOC_LMS_KX4_KX_KR: ++ case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: ++ if (autoc & IXGBE_AUTOC_KX_SUPP) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; ++ if (autoc & IXGBE_AUTOC_KX4_SUPP) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; ++ if (autoc & IXGBE_AUTOC_KR_SUPP) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; ++ goto out; ++ break; ++ default: ++ goto out; ++ break; ++ } ++ ++sfp_check: ++ /* SFP check must be done last since DA modules are sometimes used to ++ * test KR mode - we need to id KR mode correctly before SFP module. ++ * Call identify_sfp because the pluggable module may have changed */ ++ hw->phy.ops.identify_sfp(hw); ++ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) ++ goto out; ++ ++ switch (hw->phy.type) { ++ case ixgbe_phy_sfp_passive_tyco: ++ case ixgbe_phy_sfp_passive_unknown: ++ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; ++ break; ++ case ixgbe_phy_sfp_ftl_active: ++ case ixgbe_phy_sfp_active_unknown: ++ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; ++ break; ++ case ixgbe_phy_sfp_avago: ++ case ixgbe_phy_sfp_ftl: ++ case ixgbe_phy_sfp_intel: ++ case ixgbe_phy_sfp_unknown: ++ hw->phy.ops.read_i2c_eeprom(hw, ++ IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); ++ hw->phy.ops.read_i2c_eeprom(hw, ++ IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); ++ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; ++ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; ++ else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) ++ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; ++ break; ++ default: ++ break; ++ } ++ ++out: ++ return physical_layer; ++} ++ ++/** ++ * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 ++ * @hw: pointer to hardware structure ++ * @regval: register value to write to RXCTRL ++ * ++ * Enables the Rx DMA unit for 82599 ++ **/ ++s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) ++{ ++#define IXGBE_MAX_SECRX_POLL 30 ++ int i; ++ int secrxreg; ++ ++ /* ++ * Workaround for 82599 silicon errata when enabling the Rx datapath. ++ * If traffic is incoming before we enable the Rx unit, it could hang ++ * the Rx DMA unit. Therefore, make sure the security engine is ++ * completely disabled prior to enabling the Rx unit. ++ */ ++ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); ++ secrxreg |= IXGBE_SECRXCTRL_RX_DIS; ++ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); ++ for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { ++ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); ++ if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) ++ break; ++ else ++ /* Use interrupt-safe sleep just in case */ ++ udelay(10); ++ } ++ ++ /* For informational purposes only */ ++ if (i >= IXGBE_MAX_SECRX_POLL) ++ hw_dbg(hw, "Rx unit being enabled before security " ++ "path fully disabled. Continuing with init.\n"); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); ++ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); ++ secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; ++ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_get_device_caps_82599 - Get additional device capabilities ++ * @hw: pointer to hardware structure ++ * @device_caps: the EEPROM word with the extra device capabilities ++ * ++ * This function will read the EEPROM location for the device capabilities, ++ * and return the word through device_caps. ++ **/ ++s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps) ++{ ++ hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_verify_fw_version_82599 - verify fw version for 82599 ++ * @hw: pointer to hardware structure ++ * ++ * Verifies that installed the firmware version is 0.6 or higher ++ * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. ++ * ++ * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or ++ * if the FW version is not supported. ++ **/ ++static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_ERR_EEPROM_VERSION; ++ u16 fw_offset, fw_ptp_cfg_offset; ++ u16 fw_version = 0; ++ ++ /* firmware check is only necessary for SFI devices */ ++ if (hw->phy.media_type != ixgbe_media_type_fiber) { ++ status = 0; ++ goto fw_version_out; ++ } ++ ++ /* get the offset to the Firmware Module block */ ++ hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); ++ ++ if ((fw_offset == 0) || (fw_offset == 0xFFFF)) ++ goto fw_version_out; ++ ++ /* get the offset to the Pass Through Patch Configuration block */ ++ hw->eeprom.ops.read(hw, (fw_offset + ++ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), ++ &fw_ptp_cfg_offset); ++ ++ if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) ++ goto fw_version_out; ++ ++ /* get the firmware version */ ++ hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + ++ IXGBE_FW_PATCH_VERSION_4), ++ &fw_version); ++ ++ if (fw_version > 0x5) ++ status = 0; ++ ++fw_version_out: ++ return status; ++} ++/** ++ * ixgbe_enable_relaxed_ordering_82599 - Enable relaxed ordering ++ * @hw: pointer to hardware structure ++ * ++ **/ ++void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw) ++{ ++ u32 regval; ++ u32 i; ++ ++ /* Enable relaxed ordering */ ++ for (i = 0; i < hw->mac.max_tx_queues; i++) { ++ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); ++ regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN; ++ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); ++ } ++ ++ for (i = 0; i < hw->mac.max_rx_queues; i++) { ++ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); ++ regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN | ++ IXGBE_DCA_RXCTRL_DESC_HSRO_EN); ++ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); ++ } ++ ++} +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_api.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_api.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_api.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_api.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,1050 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe_api.h" ++#include "ixgbe_common.h" ++ ++extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); ++extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); ++ ++/** ++ * ixgbe_init_shared_code - Initialize the shared code ++ * @hw: pointer to hardware structure ++ * ++ * This will assign function pointers and assign the MAC type and PHY code. ++ * Does not touch the hardware. This function must be called prior to any ++ * other function in the shared code. The ixgbe_hw structure should be ++ * memset to 0 prior to calling this function. The following fields in ++ * hw structure should be filled in prior to calling this function: ++ * hw_addr, back, device_id, vendor_id, subsystem_device_id, ++ * subsystem_vendor_id, and revision_id ++ **/ ++s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ ++ /* ++ * Set the mac type ++ */ ++ ixgbe_set_mac_type(hw); ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ status = ixgbe_init_ops_82598(hw); ++ break; ++ case ixgbe_mac_82599EB: ++ status = ixgbe_init_ops_82599(hw); ++ break; ++ default: ++ status = IXGBE_ERR_DEVICE_NOT_SUPPORTED; ++ break; ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_set_mac_type - Sets MAC type ++ * @hw: pointer to the HW structure ++ * ++ * This function sets the mac type of the adapter based on the ++ * vendor ID and device ID stored in the hw structure. ++ **/ ++s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) ++{ ++ s32 ret_val = 0; ++ ++ if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) { ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_82598: ++ case IXGBE_DEV_ID_82598_BX: ++ case IXGBE_DEV_ID_82598AF_SINGLE_PORT: ++ case IXGBE_DEV_ID_82598AF_DUAL_PORT: ++ case IXGBE_DEV_ID_82598AT: ++ case IXGBE_DEV_ID_82598AT2: ++ case IXGBE_DEV_ID_82598EB_CX4: ++ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: ++ case IXGBE_DEV_ID_82598_DA_DUAL_PORT: ++ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: ++ case IXGBE_DEV_ID_82598EB_XF_LR: ++ case IXGBE_DEV_ID_82598EB_SFP_LOM: ++ hw->mac.type = ixgbe_mac_82598EB; ++ break; ++ case IXGBE_DEV_ID_82599_KX4: ++ case IXGBE_DEV_ID_82599_KX4_MEZZ: ++ case IXGBE_DEV_ID_82599_XAUI_LOM: ++ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: ++ case IXGBE_DEV_ID_82599_KR: ++ case IXGBE_DEV_ID_82599_SFP: ++ case IXGBE_DEV_ID_82599_SFP_EM: ++ case IXGBE_DEV_ID_82599_CX4: ++ case IXGBE_DEV_ID_82599_T3_LOM: ++ hw->mac.type = ixgbe_mac_82599EB; ++ break; ++ default: ++ ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; ++ break; ++ } ++ } else { ++ ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; ++ } ++ ++ hw_dbg(hw, "ixgbe_set_mac_type found mac: %d, returns: %d\n", ++ hw->mac.type, ret_val); ++ return ret_val; ++} ++ ++/** ++ * ixgbe_init_hw - Initialize the hardware ++ * @hw: pointer to hardware structure ++ * ++ * Initialize the hardware by resetting and then starting the hardware ++ **/ ++s32 ixgbe_init_hw(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_reset_hw - Performs a hardware reset ++ * @hw: pointer to hardware structure ++ * ++ * Resets the hardware by resetting the transmit and receive units, masks and ++ * clears all interrupts, performs a PHY reset, and performs a MAC reset ++ **/ ++s32 ixgbe_reset_hw(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_start_hw - Prepares hardware for Rx/Tx ++ * @hw: pointer to hardware structure ++ * ++ * Starts the hardware by filling the bus info structure and media type, ++ * clears all on chip counters, initializes receive address registers, ++ * multicast table, VLAN filter table, calls routine to setup link and ++ * flow control settings, and leaves transmit and receive units disabled ++ * and uninitialized. ++ **/ ++s32 ixgbe_start_hw(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering, ++ * which is disabled by default in ixgbe_start_hw(); ++ * ++ * @hw: pointer to hardware structure ++ * ++ * Enable relaxed ordering; ++ **/ ++void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.enable_relaxed_ordering) ++ hw->mac.ops.enable_relaxed_ordering(hw); ++} ++ ++/** ++ * ixgbe_clear_hw_cntrs - Clear hardware counters ++ * @hw: pointer to hardware structure ++ * ++ * Clears all hardware statistics counters by reading them from the hardware ++ * Statistics counters are clear on read. ++ **/ ++s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_media_type - Get media type ++ * @hw: pointer to hardware structure ++ * ++ * Returns the media type (fiber, copper, backplane) ++ **/ ++enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw), ++ ixgbe_media_type_unknown); ++} ++ ++/** ++ * ixgbe_get_mac_addr - Get MAC address ++ * @hw: pointer to hardware structure ++ * @mac_addr: Adapter MAC address ++ * ++ * Reads the adapter's MAC address from the first Receive Address Register ++ * (RAR0) A reset of the adapter must have been performed prior to calling ++ * this function in order for the MAC address to have been loaded from the ++ * EEPROM into RAR0 ++ **/ ++s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr, ++ (hw, mac_addr), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_san_mac_addr - Get SAN MAC address ++ * @hw: pointer to hardware structure ++ * @san_mac_addr: SAN MAC address ++ * ++ * Reads the SAN MAC address from the EEPROM, if it's available. This is ++ * per-port, so set_lan_id() must be called before reading the addresses. ++ **/ ++s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr, ++ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_san_mac_addr - Write a SAN MAC address ++ * @hw: pointer to hardware structure ++ * @san_mac_addr: SAN MAC address ++ * ++ * Writes A SAN MAC address to the EEPROM. ++ **/ ++s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr, ++ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_device_caps - Get additional device capabilities ++ * @hw: pointer to hardware structure ++ * @device_caps: the EEPROM word for device capabilities ++ * ++ * Reads the extra device capabilities from the EEPROM ++ **/ ++s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_device_caps, ++ (hw, device_caps), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM ++ * @hw: pointer to hardware structure ++ * @wwnn_prefix: the alternative WWNN prefix ++ * @wwpn_prefix: the alternative WWPN prefix ++ * ++ * This function will read the EEPROM from the alternative SAN MAC address ++ * block to check the support for the alternative WWNN/WWPN prefix support. ++ **/ ++s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, ++ u16 *wwpn_prefix) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix, ++ (hw, wwnn_prefix, wwpn_prefix), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM ++ * @hw: pointer to hardware structure ++ * @bs: the fcoe boot status ++ * ++ * This function will read the FCOE boot status from the iSCSI FCOE block ++ **/ ++s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status, ++ (hw, bs), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_bus_info - Set PCI bus info ++ * @hw: pointer to hardware structure ++ * ++ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure ++ **/ ++s32 ixgbe_get_bus_info(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_num_of_tx_queues - Get Tx queues ++ * @hw: pointer to hardware structure ++ * ++ * Returns the number of transmit queues for the given adapter. ++ **/ ++u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw) ++{ ++ return hw->mac.max_tx_queues; ++} ++ ++/** ++ * ixgbe_get_num_of_rx_queues - Get Rx queues ++ * @hw: pointer to hardware structure ++ * ++ * Returns the number of receive queues for the given adapter. ++ **/ ++u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw) ++{ ++ return hw->mac.max_rx_queues; ++} ++ ++/** ++ * ixgbe_stop_adapter - Disable Rx/Tx units ++ * @hw: pointer to hardware structure ++ * ++ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, ++ * disables transmit and receive units. The adapter_stopped flag is used by ++ * the shared code and drivers to determine if the adapter is in a stopped ++ * state and should not touch the hardware. ++ **/ ++s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_pba_string - Reads part number string from EEPROM ++ * @hw: pointer to hardware structure ++ * @pba_num: stores the part number string from the EEPROM ++ * @pba_num_size: part number string buffer length ++ * ++ * Reads the part number string from the EEPROM. ++ * Returns expected buffer size in pba_num_size if passed in buffer was too ++ * small. ++ **/ ++s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 *pba_num_size) ++{ ++ return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size); ++} ++ ++/** ++ * ixgbe_read_pba_num - Reads part number from EEPROM ++ * @hw: pointer to hardware structure ++ * @pba_num: stores the part number from the EEPROM ++ * ++ * Reads the part number from the EEPROM. ++ **/ ++s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num) ++{ ++ return ixgbe_read_pba_num_generic(hw, pba_num); ++} ++ ++/** ++ * ixgbe_identify_phy - Get PHY type ++ * @hw: pointer to hardware structure ++ * ++ * Determines the physical layer module found on the current adapter. ++ **/ ++s32 ixgbe_identify_phy(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ ++ if (hw->phy.type == ixgbe_phy_unknown) { ++ status = ixgbe_call_func(hw, ++ hw->phy.ops.identify, ++ (hw), ++ IXGBE_NOT_IMPLEMENTED); ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_reset_phy - Perform a PHY reset ++ * @hw: pointer to hardware structure ++ **/ ++s32 ixgbe_reset_phy(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ ++ if (hw->phy.type == ixgbe_phy_unknown) { ++ if (ixgbe_identify_phy(hw) != 0) ++ status = IXGBE_ERR_PHY; ++ } ++ ++ if (status == 0) { ++ status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++ } ++ return status; ++} ++ ++/** ++ * ixgbe_get_phy_firmware_version - ++ * @hw: pointer to hardware structure ++ * @firmware_version: pointer to firmware version ++ **/ ++s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version) ++{ ++ s32 status = 0; ++ ++ status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version, ++ (hw, firmware_version), ++ IXGBE_NOT_IMPLEMENTED); ++ return status; ++} ++ ++/** ++ * ixgbe_read_phy_reg - Read PHY register ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit address of PHY register to read ++ * @phy_data: Pointer to read data from PHY register ++ * ++ * Reads a value from a specified PHY register ++ **/ ++s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 *phy_data) ++{ ++ if (hw->phy.id == 0) ++ ixgbe_identify_phy(hw); ++ ++ return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr, ++ device_type, phy_data), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_phy_reg - Write PHY register ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @phy_data: Data to write to the PHY register ++ * ++ * Writes a value to specified PHY register ++ **/ ++s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 phy_data) ++{ ++ if (hw->phy.id == 0) ++ ixgbe_identify_phy(hw); ++ ++ return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr, ++ device_type, phy_data), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_setup_phy_link - Restart PHY autoneg ++ * @hw: pointer to hardware structure ++ * ++ * Restart autonegotiation and PHY and waits for completion. ++ **/ ++s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_check_phy_link - Determine link and speed status ++ * @hw: pointer to hardware structure ++ * ++ * Reads a PHY register to determine if link is up and the current speed for ++ * the PHY. ++ **/ ++s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed, ++ link_up), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_setup_phy_link_speed - Set auto advertise ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * @autoneg: true if autonegotiation enabled ++ * ++ * Sets the auto advertised capabilities ++ **/ ++s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg, ++ bool autoneg_wait_to_complete) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed, ++ autoneg, autoneg_wait_to_complete), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_check_link - Get link and speed status ++ * @hw: pointer to hardware structure ++ * ++ * Reads the links register to determine if link is up and the current speed ++ **/ ++s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up, bool link_up_wait_to_complete) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed, ++ link_up, link_up_wait_to_complete), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_disable_tx_laser - Disable Tx laser ++ * @hw: pointer to hardware structure ++ * ++ * If the driver needs to disable the laser on SFI optics. ++ **/ ++void ixgbe_disable_tx_laser(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.disable_tx_laser) ++ hw->mac.ops.disable_tx_laser(hw); ++} ++ ++/** ++ * ixgbe_enable_tx_laser - Enable Tx laser ++ * @hw: pointer to hardware structure ++ * ++ * If the driver needs to enable the laser on SFI optics. ++ **/ ++void ixgbe_enable_tx_laser(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.enable_tx_laser) ++ hw->mac.ops.enable_tx_laser(hw); ++} ++ ++/** ++ * ixgbe_flap_tx_laser - flap Tx laser to start autotry process ++ * @hw: pointer to hardware structure ++ * ++ * When the driver changes the link speeds that it can support then ++ * flap the tx laser to alert the link partner to start autotry ++ * process on its end. ++ **/ ++void ixgbe_flap_tx_laser(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.flap_tx_laser) ++ hw->mac.ops.flap_tx_laser(hw); ++} ++ ++/** ++ * ixgbe_setup_link - Set link speed ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * @autoneg: true if autonegotiation enabled ++ * ++ * Configures link settings. Restarts the link. ++ * Performs autonegotiation if needed. ++ **/ ++s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg, ++ bool autoneg_wait_to_complete) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed, ++ autoneg, autoneg_wait_to_complete), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_link_capabilities - Returns link capabilities ++ * @hw: pointer to hardware structure ++ * ++ * Determines the link capabilities of the current configuration. ++ **/ ++s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *autoneg) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw, ++ speed, autoneg), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_led_on - Turn on LEDs ++ * @hw: pointer to hardware structure ++ * @index: led number to turn on ++ * ++ * Turns on the software controllable LEDs. ++ **/ ++s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_led_off - Turn off LEDs ++ * @hw: pointer to hardware structure ++ * @index: led number to turn off ++ * ++ * Turns off the software controllable LEDs. ++ **/ ++s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_blink_led_start - Blink LEDs ++ * @hw: pointer to hardware structure ++ * @index: led number to blink ++ * ++ * Blink LED based on index. ++ **/ ++s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_blink_led_stop - Stop blinking LEDs ++ * @hw: pointer to hardware structure ++ * ++ * Stop blinking LED based on index. ++ **/ ++s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_init_eeprom_params - Initialize EEPROM parameters ++ * @hw: pointer to hardware structure ++ * ++ * Initializes the EEPROM parameters ixgbe_eeprom_info within the ++ * ixgbe_hw struct in order to set up EEPROM access. ++ **/ ++s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++ ++/** ++ * ixgbe_write_eeprom - Write word to EEPROM ++ * @hw: pointer to hardware structure ++ * @offset: offset within the EEPROM to be written to ++ * @data: 16 bit word to be written to the EEPROM ++ * ++ * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not ++ * called after this function, the EEPROM will most likely contain an ++ * invalid checksum. ++ **/ ++s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_eeprom - Read word from EEPROM ++ * @hw: pointer to hardware structure ++ * @offset: offset within the EEPROM to be read ++ * @data: read 16 bit value from EEPROM ++ * ++ * Reads 16 bit value from EEPROM ++ **/ ++s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum ++ * @hw: pointer to hardware structure ++ * @checksum_val: calculated checksum ++ * ++ * Performs checksum calculation and validates the EEPROM checksum ++ **/ ++s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum, ++ (hw, checksum_val), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum ++ * @hw: pointer to hardware structure ++ **/ ++s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_insert_mac_addr - Find a RAR for this mac address ++ * @hw: pointer to hardware structure ++ * @addr: Address to put into receive address register ++ * @vmdq: VMDq pool to assign ++ * ++ * Puts an ethernet address into a receive address register, or ++ * finds the rar that it is aleady in; adds to the pool list ++ **/ ++s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr, ++ (hw, addr, vmdq), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_rar - Set Rx address register ++ * @hw: pointer to hardware structure ++ * @index: Receive address register to write ++ * @addr: Address to put into receive address register ++ * @vmdq: VMDq "set" ++ * @enable_addr: set flag that address is active ++ * ++ * Puts an ethernet address into a receive address register. ++ **/ ++s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, ++ u32 enable_addr) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq, ++ enable_addr), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_clear_rar - Clear Rx address register ++ * @hw: pointer to hardware structure ++ * @index: Receive address register to write ++ * ++ * Puts an ethernet address into a receive address register. ++ **/ ++s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_vmdq - Associate a VMDq index with a receive address ++ * @hw: pointer to hardware structure ++ * @rar: receive address register index to associate with VMDq index ++ * @vmdq: VMDq set or pool index ++ **/ ++s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address ++ * @hw: pointer to hardware structure ++ * @rar: receive address register index to disassociate with VMDq index ++ * @vmdq: VMDq set or pool index ++ **/ ++s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_init_rx_addrs - Initializes receive address filters. ++ * @hw: pointer to hardware structure ++ * ++ * Places the MAC address in receive address register 0 and clears the rest ++ * of the receive address registers. Clears the multicast table. Assumes ++ * the receiver is in reset when the routine is called. ++ **/ ++s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_num_rx_addrs - Returns the number of RAR entries. ++ * @hw: pointer to hardware structure ++ **/ ++u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw) ++{ ++ return hw->mac.num_rar_entries; ++} ++ ++/** ++ * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses ++ * @hw: pointer to hardware structure ++ * @addr_list: the list of new multicast addresses ++ * @addr_count: number of addresses ++ * @func: iterator function to walk the multicast address list ++ * ++ * The given list replaces any existing list. Clears the secondary addrs from ++ * receive address registers. Uses unused receive address registers for the ++ * first secondary addresses, and falls back to promiscuous mode as needed. ++ **/ ++s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, ++ u32 addr_count, ixgbe_mc_addr_itr func) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw, ++ addr_list, addr_count, func), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses ++ * @hw: pointer to hardware structure ++ * @mc_addr_list: the list of new multicast addresses ++ * @mc_addr_count: number of addresses ++ * @func: iterator function to walk the multicast address list ++ * ++ * The given list replaces any existing list. Clears the MC addrs from receive ++ * address registers and the multicast table. Uses unused receive address ++ * registers for the first multicast addresses, and hashes the rest into the ++ * multicast table. ++ **/ ++s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count, ixgbe_mc_addr_itr func) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw, ++ mc_addr_list, mc_addr_count, func), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_enable_mc - Enable multicast address in RAR ++ * @hw: pointer to hardware structure ++ * ++ * Enables multicast address in RAR and the use of the multicast hash table. ++ **/ ++s32 ixgbe_enable_mc(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_disable_mc - Disable multicast address in RAR ++ * @hw: pointer to hardware structure ++ * ++ * Disables multicast address in RAR and the use of the multicast hash table. ++ **/ ++s32 ixgbe_disable_mc(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_clear_vfta - Clear VLAN filter table ++ * @hw: pointer to hardware structure ++ * ++ * Clears the VLAN filer table, and the VMDq index associated with the filter ++ **/ ++s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_vfta - Set VLAN filter table ++ * @hw: pointer to hardware structure ++ * @vlan: VLAN id to write to VLAN filter ++ * @vind: VMDq output index that maps queue to VLAN id in VFTA ++ * @vlan_on: boolean flag to turn on/off VLAN in VFTA ++ * ++ * Turn on/off specified VLAN in the VLAN filter table. ++ **/ ++s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind, ++ vlan_on), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_fc_enable - Enable flow control ++ * @hw: pointer to hardware structure ++ * @packetbuf_num: packet buffer number (0-7) ++ * ++ * Configures the flow control settings based on SW configuration. ++ **/ ++s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw, packetbuf_num), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_analog_reg8 - Reads 8 bit analog register ++ * @hw: pointer to hardware structure ++ * @reg: analog register to read ++ * @val: read value ++ * ++ * Performs write operation to analog register specified. ++ **/ ++s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg, ++ val), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_analog_reg8 - Writes 8 bit analog register ++ * @hw: pointer to hardware structure ++ * @reg: analog register to write ++ * @val: value to write ++ * ++ * Performs write operation to Atlas analog register specified. ++ **/ ++s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg, ++ val), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_init_uta_tables - Initializes Unicast Table Arrays. ++ * @hw: pointer to hardware structure ++ * ++ * Initializes the Unicast Table Arrays to zero on device load. This ++ * is part of the Rx init addr execution path. ++ **/ ++s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to read ++ * @data: value read ++ * ++ * Performs byte read operation to SFP module's EEPROM over I2C interface. ++ **/ ++s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, ++ u8 *data) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset, ++ dev_addr, data), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_i2c_byte - Writes 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @data: value to write ++ * ++ * Performs byte write operation to SFP module's EEPROM over I2C interface ++ * at a specified device address. ++ **/ ++s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, ++ u8 data) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset, ++ dev_addr, data), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface ++ * @hw: pointer to hardware structure ++ * @byte_offset: EEPROM byte offset to write ++ * @eeprom_data: value to write ++ * ++ * Performs byte write operation to SFP module's EEPROM over I2C interface. ++ **/ ++s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, ++ u8 byte_offset, u8 eeprom_data) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom, ++ (hw, byte_offset, eeprom_data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface ++ * @hw: pointer to hardware structure ++ * @byte_offset: EEPROM byte offset to read ++ * @eeprom_data: value read ++ * ++ * Performs byte read operation to SFP module's EEPROM over I2C interface. ++ **/ ++s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom, ++ (hw, byte_offset, eeprom_data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_supported_physical_layer - Returns physical layer type ++ * @hw: pointer to hardware structure ++ * ++ * Determines physical layer capabilities of the current configuration. ++ **/ ++u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer, ++ (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN); ++} ++ ++/** ++ * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependant on device specifics ++ * @hw: pointer to hardware structure ++ * @regval: bitfield to write to the Rx DMA register ++ * ++ * Enables the Rx DMA unit of the device. ++ **/ ++s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma, ++ (hw, regval), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to acquire ++ * ++ * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified ++ * function (CSR, PHY0, PHY1, EEPROM, Flash) ++ **/ ++s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync, ++ (hw, mask), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_release_swfw_semaphore - Release SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to release ++ * ++ * Releases the SWFW semaphore through SW_FW_SYNC register for the specified ++ * function (CSR, PHY0, PHY1, EEPROM, Flash) ++ **/ ++void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask) ++{ ++ if (hw->mac.ops.release_swfw_sync) ++ hw->mac.ops.release_swfw_sync(hw, mask); ++} ++ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_api.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_api.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_api.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_api.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,171 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_API_H_ ++#define _IXGBE_API_H_ ++ ++#include "ixgbe_type.h" ++ ++s32 ixgbe_init_shared_code(struct ixgbe_hw *hw); ++ ++s32 ixgbe_set_mac_type(struct ixgbe_hw *hw); ++s32 ixgbe_init_hw(struct ixgbe_hw *hw); ++s32 ixgbe_reset_hw(struct ixgbe_hw *hw); ++s32 ixgbe_start_hw(struct ixgbe_hw *hw); ++void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw); ++s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); ++enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw); ++s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); ++s32 ixgbe_get_bus_info(struct ixgbe_hw *hw); ++u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw); ++u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw); ++s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); ++s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num); ++s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 *pba_num_size); ++ ++s32 ixgbe_identify_phy(struct ixgbe_hw *hw); ++s32 ixgbe_reset_phy(struct ixgbe_hw *hw); ++s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 *phy_data); ++s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 phy_data); ++ ++s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); ++s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *link_up); ++s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg, ++ bool autoneg_wait_to_complete); ++void ixgbe_disable_tx_laser(struct ixgbe_hw *hw); ++void ixgbe_enable_tx_laser(struct ixgbe_hw *hw); ++void ixgbe_flap_tx_laser(struct ixgbe_hw *hw); ++s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg, bool autoneg_wait_to_complete); ++s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up, bool link_up_wait_to_complete); ++s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *autoneg); ++s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index); ++ ++s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw); ++s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data); ++s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); ++s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); ++s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw); ++ ++s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); ++s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, ++ u32 enable_addr); ++s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); ++s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); ++s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); ++u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw); ++s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, ++ u32 addr_count, ixgbe_mc_addr_itr func); ++s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count, ixgbe_mc_addr_itr func); ++void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq); ++s32 ixgbe_enable_mc(struct ixgbe_hw *hw); ++s32 ixgbe_disable_mc(struct ixgbe_hw *hw); ++s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); ++s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, ++ u32 vind, bool vlan_on); ++ ++s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num); ++ ++void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr); ++s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, ++ u16 *firmware_version); ++s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); ++s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); ++s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw); ++s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); ++u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw); ++s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval); ++s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); ++s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); ++s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); ++s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, ++ struct ixgbe_atr_input *input, ++ u8 queue); ++s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, ++ struct ixgbe_atr_input *input, ++ struct ixgbe_atr_input_masks *masks, ++ u16 soft_id, ++ u8 queue); ++u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *input, u32 key); ++s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan_id); ++s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr); ++s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr); ++s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, u32 src_addr_1, ++ u32 src_addr_2, u32 src_addr_3, ++ u32 src_addr_4); ++s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, u32 dst_addr_1, ++ u32 dst_addr_2, u32 dst_addr_3, ++ u32 dst_addr_4); ++s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port); ++s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port); ++s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte); ++s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool); ++s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type); ++s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan_id); ++s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr); ++s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr); ++s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, u32 *src_addr_1, ++ u32 *src_addr_2, u32 *src_addr_3, ++ u32 *src_addr_4); ++s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, u32 *dst_addr_1, ++ u32 *dst_addr_2, u32 *dst_addr_3, ++ u32 *dst_addr_4); ++s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port); ++s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port); ++s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, ++ u16 *flex_byte); ++s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool); ++s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type); ++s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, ++ u8 *data); ++s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, ++ u8 data); ++s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); ++s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); ++s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); ++s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps); ++s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask); ++void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask); ++s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, ++ u16 *wwpn_prefix); ++s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs); ++ ++ ++#endif /* _IXGBE_API_H_ */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_common.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_common.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_common.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_common.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,3146 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe_common.h" ++#include "ixgbe_phy.h" ++#include "ixgbe_api.h" ++ ++static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); ++static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); ++static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); ++static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); ++static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); ++static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, ++ u16 count); ++static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); ++static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); ++static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); ++static void ixgbe_release_eeprom(struct ixgbe_hw *hw); ++ ++static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); ++static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, ++ u16 *san_mac_offset); ++static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw); ++static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw); ++static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw); ++static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); ++static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, ++ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); ++ ++s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan); ++ ++/** ++ * ixgbe_init_ops_generic - Inits function ptrs ++ * @hw: pointer to the hardware structure ++ * ++ * Initialize the function pointers. ++ **/ ++s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_eeprom_info *eeprom = &hw->eeprom; ++ struct ixgbe_mac_info *mac = &hw->mac; ++ u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ ++ /* EEPROM */ ++ eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic; ++ /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ ++ if (eec & (1 << 8)) ++ eeprom->ops.read = &ixgbe_read_eerd_generic; ++ else ++ eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic; ++ eeprom->ops.write = &ixgbe_write_eeprom_generic; ++ eeprom->ops.validate_checksum = ++ &ixgbe_validate_eeprom_checksum_generic; ++ eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic; ++ eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic; ++ ++ /* MAC */ ++ mac->ops.init_hw = &ixgbe_init_hw_generic; ++ mac->ops.reset_hw = NULL; ++ mac->ops.start_hw = &ixgbe_start_hw_generic; ++ mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic; ++ mac->ops.get_media_type = NULL; ++ mac->ops.get_supported_physical_layer = NULL; ++ mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic; ++ mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic; ++ mac->ops.stop_adapter = &ixgbe_stop_adapter_generic; ++ mac->ops.get_bus_info = &ixgbe_get_bus_info_generic; ++ mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie; ++ mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync; ++ mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync; ++ ++ /* LEDs */ ++ mac->ops.led_on = &ixgbe_led_on_generic; ++ mac->ops.led_off = &ixgbe_led_off_generic; ++ mac->ops.blink_led_start = &ixgbe_blink_led_start_generic; ++ mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic; ++ ++ /* RAR, Multicast, VLAN */ ++ mac->ops.set_rar = &ixgbe_set_rar_generic; ++ mac->ops.clear_rar = &ixgbe_clear_rar_generic; ++ mac->ops.insert_mac_addr = NULL; ++ mac->ops.set_vmdq = NULL; ++ mac->ops.clear_vmdq = NULL; ++ mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic; ++ mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic; ++ mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic; ++ mac->ops.enable_mc = &ixgbe_enable_mc_generic; ++ mac->ops.disable_mc = &ixgbe_disable_mc_generic; ++ mac->ops.clear_vfta = NULL; ++ mac->ops.set_vfta = NULL; ++ mac->ops.init_uta_tables = NULL; ++ ++ /* Flow Control */ ++ mac->ops.fc_enable = &ixgbe_fc_enable_generic; ++ ++ /* Link */ ++ mac->ops.get_link_capabilities = NULL; ++ mac->ops.setup_link = NULL; ++ mac->ops.check_link = NULL; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx ++ * @hw: pointer to hardware structure ++ * ++ * Starts the hardware by filling the bus info structure and media type, clears ++ * all on chip counters, initializes receive address registers, multicast ++ * table, VLAN filter table, calls routine to set up link and flow control ++ * settings, and leaves transmit and receive units disabled and uninitialized ++ **/ ++s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) ++{ ++ u32 ctrl_ext; ++ s32 ret_val = 0; ++ ++ /* Set the media type */ ++ hw->phy.media_type = hw->mac.ops.get_media_type(hw); ++ ++ /* PHY ops initialization must be done in reset_hw() */ ++ ++ /* Clear the VLAN filter table */ ++ hw->mac.ops.clear_vfta(hw); ++ ++ /* Clear statistics registers */ ++ hw->mac.ops.clear_hw_cntrs(hw); ++ ++ /* Set No Snoop Disable */ ++ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ++ ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; ++ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ /* Setup flow control */ ++ ixgbe_setup_fc(hw, 0); ++ ++ /* Clear adapter stopped flag */ ++ hw->adapter_stopped = false; ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_init_hw_generic - Generic hardware initialization ++ * @hw: pointer to hardware structure ++ * ++ * Initialize the hardware by resetting the hardware, filling the bus info ++ * structure and media type, clears all on chip counters, initializes receive ++ * address registers, multicast table, VLAN filter table, calls routine to set ++ * up link and flow control settings, and leaves transmit and receive units ++ * disabled and uninitialized ++ **/ ++s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ ++ /* Reset the hardware */ ++ status = hw->mac.ops.reset_hw(hw); ++ ++ if (status == 0) { ++ /* Start the HW */ ++ status = hw->mac.ops.start_hw(hw); ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters ++ * @hw: pointer to hardware structure ++ * ++ * Clears all hardware statistics counters by reading them from the hardware ++ * Statistics counters are clear on read. ++ **/ ++s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) ++{ ++ u16 i = 0; ++ ++ IXGBE_READ_REG(hw, IXGBE_CRCERRS); ++ IXGBE_READ_REG(hw, IXGBE_ILLERRC); ++ IXGBE_READ_REG(hw, IXGBE_ERRBC); ++ IXGBE_READ_REG(hw, IXGBE_MSPDC); ++ for (i = 0; i < 8; i++) ++ IXGBE_READ_REG(hw, IXGBE_MPC(i)); ++ ++ IXGBE_READ_REG(hw, IXGBE_MLFC); ++ IXGBE_READ_REG(hw, IXGBE_MRFC); ++ IXGBE_READ_REG(hw, IXGBE_RLEC); ++ IXGBE_READ_REG(hw, IXGBE_LXONTXC); ++ IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); ++ if (hw->mac.type >= ixgbe_mac_82599EB) { ++ IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); ++ IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); ++ } else { ++ IXGBE_READ_REG(hw, IXGBE_LXONRXC); ++ IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); ++ } ++ ++ for (i = 0; i < 8; i++) { ++ IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); ++ IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); ++ if (hw->mac.type >= ixgbe_mac_82599EB) { ++ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); ++ IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); ++ } else { ++ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); ++ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); ++ } ++ } ++ if (hw->mac.type >= ixgbe_mac_82599EB) ++ for (i = 0; i < 8; i++) ++ IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); ++ IXGBE_READ_REG(hw, IXGBE_PRC64); ++ IXGBE_READ_REG(hw, IXGBE_PRC127); ++ IXGBE_READ_REG(hw, IXGBE_PRC255); ++ IXGBE_READ_REG(hw, IXGBE_PRC511); ++ IXGBE_READ_REG(hw, IXGBE_PRC1023); ++ IXGBE_READ_REG(hw, IXGBE_PRC1522); ++ IXGBE_READ_REG(hw, IXGBE_GPRC); ++ IXGBE_READ_REG(hw, IXGBE_BPRC); ++ IXGBE_READ_REG(hw, IXGBE_MPRC); ++ IXGBE_READ_REG(hw, IXGBE_GPTC); ++ IXGBE_READ_REG(hw, IXGBE_GORCL); ++ IXGBE_READ_REG(hw, IXGBE_GORCH); ++ IXGBE_READ_REG(hw, IXGBE_GOTCL); ++ IXGBE_READ_REG(hw, IXGBE_GOTCH); ++ for (i = 0; i < 8; i++) ++ IXGBE_READ_REG(hw, IXGBE_RNBC(i)); ++ IXGBE_READ_REG(hw, IXGBE_RUC); ++ IXGBE_READ_REG(hw, IXGBE_RFC); ++ IXGBE_READ_REG(hw, IXGBE_ROC); ++ IXGBE_READ_REG(hw, IXGBE_RJC); ++ IXGBE_READ_REG(hw, IXGBE_MNGPRC); ++ IXGBE_READ_REG(hw, IXGBE_MNGPDC); ++ IXGBE_READ_REG(hw, IXGBE_MNGPTC); ++ IXGBE_READ_REG(hw, IXGBE_TORL); ++ IXGBE_READ_REG(hw, IXGBE_TORH); ++ IXGBE_READ_REG(hw, IXGBE_TPR); ++ IXGBE_READ_REG(hw, IXGBE_TPT); ++ IXGBE_READ_REG(hw, IXGBE_PTC64); ++ IXGBE_READ_REG(hw, IXGBE_PTC127); ++ IXGBE_READ_REG(hw, IXGBE_PTC255); ++ IXGBE_READ_REG(hw, IXGBE_PTC511); ++ IXGBE_READ_REG(hw, IXGBE_PTC1023); ++ IXGBE_READ_REG(hw, IXGBE_PTC1522); ++ IXGBE_READ_REG(hw, IXGBE_MPTC); ++ IXGBE_READ_REG(hw, IXGBE_BPTC); ++ for (i = 0; i < 16; i++) { ++ IXGBE_READ_REG(hw, IXGBE_QPRC(i)); ++ IXGBE_READ_REG(hw, IXGBE_QPTC(i)); ++ if (hw->mac.type >= ixgbe_mac_82599EB) { ++ IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); ++ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); ++ IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); ++ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); ++ IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); ++ } else { ++ IXGBE_READ_REG(hw, IXGBE_QBRC(i)); ++ IXGBE_READ_REG(hw, IXGBE_QBTC(i)); ++ } ++ } ++ ++ return 0; ++} ++ ++ ++/** ++ * ixgbe_read_pba_string_generic - Reads part number string from EEPROM ++ * @hw: pointer to hardware structure ++ * @pba_num: stores the part number string from the EEPROM ++ * @pba_num_size: part number string buffer length ++ * ++ * Reads the part number string from the EEPROM. ++ * Returns expected buffer size in pba_num_size if passed in buffer was too ++ * small. ++ **/ ++s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, ++ u32 *pba_num_size) ++{ ++ s32 ret_val; ++ u32 required_pba_num_size; ++ u16 data; ++ u16 pointer; ++ u16 offset; ++ u16 length; ++ ++ if (pba_num_size == NULL) { ++ hw_dbg(hw, "PBA string buffer size was null\n"); ++ return IXGBE_ERR_INVALID_ARGUMENT; ++ } ++ ++ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); ++ if (ret_val) { ++ hw_dbg(hw, "NVM Read Error\n"); ++ return ret_val; ++ } else if (data != IXGBE_PBANUM_PTR_GUARD) { ++ hw_dbg(hw, "NVM PBA number is not stored as string\n"); ++ return IXGBE_NOT_IMPLEMENTED; ++ } ++ ++ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pointer); ++ if (ret_val) { ++ hw_dbg(hw, "NVM Read Error\n"); ++ return ret_val; ++ } ++ ++ ret_val = hw->eeprom.ops.read(hw, pointer, &length); ++ if (ret_val) { ++ hw_dbg(hw, "NVM Read Error\n"); ++ return ret_val; ++ } ++ ++ if (length == 0xFFFF || length == 0) { ++ hw_dbg(hw, "NVM PBA number section invalid length\n"); ++ return IXGBE_ERR_PBA_SECTION; ++ } ++ required_pba_num_size = (((u32)length - 1) * 2) + 1; ++ ++ /* check if pba_num buffer is big enough */ ++ if ((pba_num == NULL) || (*pba_num_size < required_pba_num_size)) { ++ hw_dbg(hw, "PBA string buffer too small\n"); ++ *pba_num_size = required_pba_num_size; ++ return IXGBE_ERR_NO_SPACE; ++ } ++ ++ for (offset = 1; offset < length; offset++) { ++ ret_val = hw->eeprom.ops.read(hw, pointer + offset, &data); ++ if (ret_val) { ++ hw_dbg(hw, "NVM Read Error\n"); ++ return ret_val; ++ } ++ pba_num[(offset - 1) * 2] = (u8)(data >> 8); ++ pba_num[((offset - 1) * 2) + 1] = (u8)(data & 0xFF); ++ } ++ pba_num[(length - 1) * 2] = '\0'; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_read_pba_num_generic - Reads part number from EEPROM ++ * @hw: pointer to hardware structure ++ * @pba_num: stores the part number from the EEPROM ++ * ++ * Reads the part number from the EEPROM. ++ **/ ++s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) ++{ ++ s32 ret_val; ++ u16 data; ++ ++ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); ++ if (ret_val) { ++ hw_dbg(hw, "NVM Read Error\n"); ++ return ret_val; ++ } else if (data == IXGBE_PBANUM_PTR_GUARD) { ++ hw_dbg(hw, "NVM Not supported\n"); ++ return IXGBE_NOT_IMPLEMENTED; ++ } ++ *pba_num = (u32)(data << 16); ++ ++ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); ++ if (ret_val) { ++ hw_dbg(hw, "NVM Read Error\n"); ++ return ret_val; ++ } ++ *pba_num |= data; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_get_mac_addr_generic - Generic get MAC address ++ * @hw: pointer to hardware structure ++ * @mac_addr: Adapter MAC address ++ * ++ * Reads the adapter's MAC address from first Receive Address Register (RAR0) ++ * A reset of the adapter must be performed prior to calling this function ++ * in order for the MAC address to have been loaded from the EEPROM into RAR0 ++ **/ ++s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) ++{ ++ u32 rar_high; ++ u32 rar_low; ++ u16 i; ++ ++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); ++ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); ++ ++ for (i = 0; i < 4; i++) ++ mac_addr[i] = (u8)(rar_low >> (i*8)); ++ ++ for (i = 0; i < 2; i++) ++ mac_addr[i+4] = (u8)(rar_high >> (i*8)); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_get_bus_info_generic - Generic set PCI bus info ++ * @hw: pointer to hardware structure ++ * ++ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure ++ **/ ++s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ u16 link_status; ++ ++ hw->bus.type = ixgbe_bus_type_pci_express; ++ ++ /* Get the negotiated link width and speed from PCI config space */ ++ link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); ++ ++ switch (link_status & IXGBE_PCI_LINK_WIDTH) { ++ case IXGBE_PCI_LINK_WIDTH_1: ++ hw->bus.width = ixgbe_bus_width_pcie_x1; ++ break; ++ case IXGBE_PCI_LINK_WIDTH_2: ++ hw->bus.width = ixgbe_bus_width_pcie_x2; ++ break; ++ case IXGBE_PCI_LINK_WIDTH_4: ++ hw->bus.width = ixgbe_bus_width_pcie_x4; ++ break; ++ case IXGBE_PCI_LINK_WIDTH_8: ++ hw->bus.width = ixgbe_bus_width_pcie_x8; ++ break; ++ default: ++ hw->bus.width = ixgbe_bus_width_unknown; ++ break; ++ } ++ ++ switch (link_status & IXGBE_PCI_LINK_SPEED) { ++ case IXGBE_PCI_LINK_SPEED_2500: ++ hw->bus.speed = ixgbe_bus_speed_2500; ++ break; ++ case IXGBE_PCI_LINK_SPEED_5000: ++ hw->bus.speed = ixgbe_bus_speed_5000; ++ break; ++ default: ++ hw->bus.speed = ixgbe_bus_speed_unknown; ++ break; ++ } ++ ++ mac->ops.set_lan_id(hw); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices ++ * @hw: pointer to the HW structure ++ * ++ * Determines the LAN function id by reading memory-mapped registers ++ * and swaps the port value if requested. ++ **/ ++void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_bus_info *bus = &hw->bus; ++ u32 reg; ++ ++ reg = IXGBE_READ_REG(hw, IXGBE_STATUS); ++ bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; ++ bus->lan_id = bus->func; ++ ++ /* check for a port swap */ ++ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); ++ if (reg & IXGBE_FACTPS_LFS) ++ bus->func ^= 0x1; ++} ++ ++/** ++ * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units ++ * @hw: pointer to hardware structure ++ * ++ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, ++ * disables transmit and receive units. The adapter_stopped flag is used by ++ * the shared code and drivers to determine if the adapter is in a stopped ++ * state and should not touch the hardware. ++ **/ ++s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) ++{ ++ u32 number_of_queues; ++ u32 reg_val; ++ u16 i; ++ ++ /* ++ * Set the adapter_stopped flag so other driver functions stop touching ++ * the hardware ++ */ ++ hw->adapter_stopped = true; ++ ++ /* Disable the receive unit */ ++ reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); ++ reg_val &= ~(IXGBE_RXCTRL_RXEN); ++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); ++ IXGBE_WRITE_FLUSH(hw); ++ msleep(2); ++ ++ /* Clear interrupt mask to stop from interrupts being generated */ ++ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); ++ ++ /* Clear any pending interrupts */ ++ IXGBE_READ_REG(hw, IXGBE_EICR); ++ ++ /* Disable the transmit unit. Each queue must be disabled. */ ++ number_of_queues = hw->mac.max_tx_queues; ++ for (i = 0; i < number_of_queues; i++) { ++ reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); ++ if (reg_val & IXGBE_TXDCTL_ENABLE) { ++ reg_val &= ~IXGBE_TXDCTL_ENABLE; ++ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val); ++ } ++ } ++ ++ /* ++ * Prevent the PCI-E bus from from hanging by disabling PCI-E master ++ * access and verify no pending requests ++ */ ++ ixgbe_disable_pcie_master(hw); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_led_on_generic - Turns on the software controllable LEDs. ++ * @hw: pointer to hardware structure ++ * @index: led number to turn on ++ **/ ++s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) ++{ ++ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ ++ /* To turn on the LED, set mode to ON. */ ++ led_reg &= ~IXGBE_LED_MODE_MASK(index); ++ led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); ++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_led_off_generic - Turns off the software controllable LEDs. ++ * @hw: pointer to hardware structure ++ * @index: led number to turn off ++ **/ ++s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) ++{ ++ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ ++ /* To turn off the LED, set mode to OFF. */ ++ led_reg &= ~IXGBE_LED_MODE_MASK(index); ++ led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); ++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_init_eeprom_params_generic - Initialize EEPROM params ++ * @hw: pointer to hardware structure ++ * ++ * Initializes the EEPROM parameters ixgbe_eeprom_info within the ++ * ixgbe_hw struct in order to set up EEPROM access. ++ **/ ++s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_eeprom_info *eeprom = &hw->eeprom; ++ u32 eec; ++ u16 eeprom_size; ++ ++ if (eeprom->type == ixgbe_eeprom_uninitialized) { ++ eeprom->type = ixgbe_eeprom_none; ++ /* Set default semaphore delay to 10ms which is a well ++ * tested value */ ++ eeprom->semaphore_delay = 10; ++ ++ /* ++ * Check for EEPROM present first. ++ * If not present leave as none ++ */ ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ if (eec & IXGBE_EEC_PRES) { ++ eeprom->type = ixgbe_eeprom_spi; ++ ++ /* ++ * SPI EEPROM is assumed here. This code would need to ++ * change if a future EEPROM is not SPI. ++ */ ++ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> ++ IXGBE_EEC_SIZE_SHIFT); ++ eeprom->word_size = 1 << (eeprom_size + ++ IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT); ++ } ++ ++ if (eec & IXGBE_EEC_ADDR_SIZE) ++ eeprom->address_bits = 16; ++ else ++ eeprom->address_bits = 8; ++ hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " ++ "%d\n", eeprom->type, eeprom->word_size, ++ eeprom->address_bits); ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM ++ * @hw: pointer to hardware structure ++ * @offset: offset within the EEPROM to be written to ++ * @data: 16 bit word to be written to the EEPROM ++ * ++ * If ixgbe_eeprom_update_checksum is not called after this function, the ++ * EEPROM will most likely contain an invalid checksum. ++ **/ ++s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) ++{ ++ s32 status; ++ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; ++ ++ hw->eeprom.ops.init_params(hw); ++ ++ if (offset >= hw->eeprom.word_size) { ++ status = IXGBE_ERR_EEPROM; ++ goto out; ++ } ++ ++ /* Prepare the EEPROM for writing */ ++ status = ixgbe_acquire_eeprom(hw); ++ ++ if (status == 0) { ++ if (ixgbe_ready_eeprom(hw) != 0) { ++ ixgbe_release_eeprom(hw); ++ status = IXGBE_ERR_EEPROM; ++ } ++ } ++ ++ if (status == 0) { ++ ixgbe_standby_eeprom(hw); ++ ++ /* Send the WRITE ENABLE command (8 bit opcode ) */ ++ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI, ++ IXGBE_EEPROM_OPCODE_BITS); ++ ++ ixgbe_standby_eeprom(hw); ++ ++ /* ++ * Some SPI eeproms use the 8th address bit embedded in the ++ * opcode ++ */ ++ if ((hw->eeprom.address_bits == 8) && (offset >= 128)) ++ write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; ++ ++ /* Send the Write command (8-bit opcode + addr) */ ++ ixgbe_shift_out_eeprom_bits(hw, write_opcode, ++ IXGBE_EEPROM_OPCODE_BITS); ++ ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), ++ hw->eeprom.address_bits); ++ ++ /* Send the data */ ++ data = (data >> 8) | (data << 8); ++ ixgbe_shift_out_eeprom_bits(hw, data, 16); ++ ixgbe_standby_eeprom(hw); ++ ++ /* Done with writing - release the EEPROM */ ++ ixgbe_release_eeprom(hw); ++ } ++ ++out: ++ return status; ++} ++ ++/** ++ * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang ++ * @hw: pointer to hardware structure ++ * @offset: offset within the EEPROM to be read ++ * @data: read 16 bit value from EEPROM ++ * ++ * Reads 16 bit value from EEPROM through bit-bang method ++ **/ ++s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, ++ u16 *data) ++{ ++ s32 status; ++ u16 word_in; ++ u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; ++ ++ hw->eeprom.ops.init_params(hw); ++ ++ if (offset >= hw->eeprom.word_size) { ++ status = IXGBE_ERR_EEPROM; ++ goto out; ++ } ++ ++ /* Prepare the EEPROM for reading */ ++ status = ixgbe_acquire_eeprom(hw); ++ ++ if (status == 0) { ++ if (ixgbe_ready_eeprom(hw) != 0) { ++ ixgbe_release_eeprom(hw); ++ status = IXGBE_ERR_EEPROM; ++ } ++ } ++ ++ if (status == 0) { ++ ixgbe_standby_eeprom(hw); ++ ++ /* ++ * Some SPI eeproms use the 8th address bit embedded in the ++ * opcode ++ */ ++ if ((hw->eeprom.address_bits == 8) && (offset >= 128)) ++ read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; ++ ++ /* Send the READ command (opcode + addr) */ ++ ixgbe_shift_out_eeprom_bits(hw, read_opcode, ++ IXGBE_EEPROM_OPCODE_BITS); ++ ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), ++ hw->eeprom.address_bits); ++ ++ /* Read the data. */ ++ word_in = ixgbe_shift_in_eeprom_bits(hw, 16); ++ *data = (word_in >> 8) | (word_in << 8); ++ ++ /* End this read operation */ ++ ixgbe_release_eeprom(hw); ++ } ++ ++out: ++ return status; ++} ++ ++/** ++ * ixgbe_read_eerd_generic - Read EEPROM word using EERD ++ * @hw: pointer to hardware structure ++ * @offset: offset of word in the EEPROM to read ++ * @data: word read from the EEPROM ++ * ++ * Reads a 16 bit word from the EEPROM using the EERD register. ++ **/ ++s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) ++{ ++ u32 eerd; ++ s32 status; ++ ++ hw->eeprom.ops.init_params(hw); ++ ++ if (offset >= hw->eeprom.word_size) { ++ status = IXGBE_ERR_EEPROM; ++ goto out; ++ } ++ ++ eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) + ++ IXGBE_EEPROM_RW_REG_START; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); ++ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); ++ ++ if (status == 0) ++ *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> ++ IXGBE_EEPROM_RW_REG_DATA); ++ else ++ hw_dbg(hw, "Eeprom read timed out\n"); ++ ++out: ++ return status; ++} ++ ++/** ++ * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status ++ * @hw: pointer to hardware structure ++ * @ee_reg: EEPROM flag for polling ++ * ++ * Polls the status bit (bit 1) of the EERD or EEWR to determine when the ++ * read or write is done respectively. ++ **/ ++s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) ++{ ++ u32 i; ++ u32 reg; ++ s32 status = IXGBE_ERR_EEPROM; ++ ++ for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { ++ if (ee_reg == IXGBE_NVM_POLL_READ) ++ reg = IXGBE_READ_REG(hw, IXGBE_EERD); ++ else ++ reg = IXGBE_READ_REG(hw, IXGBE_EEWR); ++ ++ if (reg & IXGBE_EEPROM_RW_REG_DONE) { ++ status = 0; ++ break; ++ } ++ udelay(5); ++ } ++ return status; ++} ++ ++/** ++ * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang ++ * @hw: pointer to hardware structure ++ * ++ * Prepares EEPROM for access using bit-bang method. This function should ++ * be called before issuing a command to the EEPROM. ++ **/ ++static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ u32 eec; ++ u32 i; ++ ++ if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) ++ status = IXGBE_ERR_SWFW_SYNC; ++ ++ if (status == 0) { ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ ++ /* Request EEPROM Access */ ++ eec |= IXGBE_EEC_REQ; ++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ ++ for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ if (eec & IXGBE_EEC_GNT) ++ break; ++ udelay(5); ++ } ++ ++ /* Release if grant not acquired */ ++ if (!(eec & IXGBE_EEC_GNT)) { ++ eec &= ~IXGBE_EEC_REQ; ++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ hw_dbg(hw, "Could not acquire EEPROM grant\n"); ++ ++ ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ status = IXGBE_ERR_EEPROM; ++ } ++ } ++ ++ /* Setup EEPROM for Read/Write */ ++ if (status == 0) { ++ /* Clear CS and SK */ ++ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_FLUSH(hw); ++ udelay(1); ++ } ++ return status; ++} ++ ++/** ++ * ixgbe_get_eeprom_semaphore - Get hardware semaphore ++ * @hw: pointer to hardware structure ++ * ++ * Sets the hardware semaphores so EEPROM access can occur for bit-bang method ++ **/ ++static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_ERR_EEPROM; ++ u32 timeout = 2000; ++ u32 i; ++ u32 swsm; ++ ++ /* Get SMBI software semaphore between device drivers first */ ++ for (i = 0; i < timeout; i++) { ++ /* ++ * If the SMBI bit is 0 when we read it, then the bit will be ++ * set and we have the semaphore ++ */ ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); ++ if (!(swsm & IXGBE_SWSM_SMBI)) { ++ status = 0; ++ break; ++ } ++ udelay(50); ++ } ++ ++ /* Now get the semaphore between SW/FW through the SWESMBI bit */ ++ if (status == 0) { ++ for (i = 0; i < timeout; i++) { ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); ++ ++ /* Set the SW EEPROM semaphore bit to request access */ ++ swsm |= IXGBE_SWSM_SWESMBI; ++ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); ++ ++ /* ++ * If we set the bit successfully then we got the ++ * semaphore. ++ */ ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); ++ if (swsm & IXGBE_SWSM_SWESMBI) ++ break; ++ ++ udelay(50); ++ } ++ ++ /* ++ * Release semaphores and return error if SW EEPROM semaphore ++ * was not granted because we don't have access to the EEPROM ++ */ ++ if (i >= timeout) { ++ hw_dbg(hw, "SWESMBI Software EEPROM semaphore " ++ "not granted.\n"); ++ ixgbe_release_eeprom_semaphore(hw); ++ status = IXGBE_ERR_EEPROM; ++ } ++ } else { ++ hw_dbg(hw, "Software semaphore SMBI between device drivers " ++ "not granted.\n"); ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_release_eeprom_semaphore - Release hardware semaphore ++ * @hw: pointer to hardware structure ++ * ++ * This function clears hardware semaphore bits. ++ **/ ++static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) ++{ ++ u32 swsm; ++ ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); ++ ++ /* Release both semaphores by writing 0 to the bits SWESMBI ++ * and SMBI ++ */ ++ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); ++ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); ++ IXGBE_WRITE_FLUSH(hw); ++} ++ ++/** ++ * ixgbe_ready_eeprom - Polls for EEPROM ready ++ * @hw: pointer to hardware structure ++ **/ ++static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ u16 i; ++ u8 spi_stat_reg; ++ ++ /* ++ * Read "Status Register" repeatedly until the LSB is cleared. The ++ * EEPROM will signal that the command has been completed by clearing ++ * bit 0 of the internal status register. If it's not cleared within ++ * 5 milliseconds, then error out. ++ */ ++ for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { ++ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, ++ IXGBE_EEPROM_OPCODE_BITS); ++ spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); ++ if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) ++ break; ++ ++ udelay(5); ++ ixgbe_standby_eeprom(hw); ++ }; ++ ++ /* ++ * On some parts, SPI write time could vary from 0-20mSec on 3.3V ++ * devices (and only 0-5mSec on 5V devices) ++ */ ++ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { ++ hw_dbg(hw, "SPI EEPROM Status error\n"); ++ status = IXGBE_ERR_EEPROM; ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state ++ * @hw: pointer to hardware structure ++ **/ ++static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) ++{ ++ u32 eec; ++ ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ ++ /* Toggle CS to flush commands */ ++ eec |= IXGBE_EEC_CS; ++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_FLUSH(hw); ++ udelay(1); ++ eec &= ~IXGBE_EEC_CS; ++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_FLUSH(hw); ++ udelay(1); ++} ++ ++/** ++ * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. ++ * @hw: pointer to hardware structure ++ * @data: data to send to the EEPROM ++ * @count: number of bits to shift out ++ **/ ++static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, ++ u16 count) ++{ ++ u32 eec; ++ u32 mask; ++ u32 i; ++ ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ ++ /* ++ * Mask is used to shift "count" bits of "data" out to the EEPROM ++ * one bit at a time. Determine the starting bit based on count ++ */ ++ mask = 0x01 << (count - 1); ++ ++ for (i = 0; i < count; i++) { ++ /* ++ * A "1" is shifted out to the EEPROM by setting bit "DI" to a ++ * "1", and then raising and then lowering the clock (the SK ++ * bit controls the clock input to the EEPROM). A "0" is ++ * shifted out to the EEPROM by setting "DI" to "0" and then ++ * raising and then lowering the clock. ++ */ ++ if (data & mask) ++ eec |= IXGBE_EEC_DI; ++ else ++ eec &= ~IXGBE_EEC_DI; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ udelay(1); ++ ++ ixgbe_raise_eeprom_clk(hw, &eec); ++ ixgbe_lower_eeprom_clk(hw, &eec); ++ ++ /* ++ * Shift mask to signify next bit of data to shift in to the ++ * EEPROM ++ */ ++ mask = mask >> 1; ++ }; ++ ++ /* We leave the "DI" bit set to "0" when we leave this routine. */ ++ eec &= ~IXGBE_EEC_DI; ++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_FLUSH(hw); ++} ++ ++/** ++ * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM ++ * @hw: pointer to hardware structure ++ **/ ++static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) ++{ ++ u32 eec; ++ u32 i; ++ u16 data = 0; ++ ++ /* ++ * In order to read a register from the EEPROM, we need to shift ++ * 'count' bits in from the EEPROM. Bits are "shifted in" by raising ++ * the clock input to the EEPROM (setting the SK bit), and then reading ++ * the value of the "DO" bit. During this "shifting in" process the ++ * "DI" bit should always be clear. ++ */ ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ ++ eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); ++ ++ for (i = 0; i < count; i++) { ++ data = data << 1; ++ ixgbe_raise_eeprom_clk(hw, &eec); ++ ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ ++ eec &= ~(IXGBE_EEC_DI); ++ if (eec & IXGBE_EEC_DO) ++ data |= 1; ++ ++ ixgbe_lower_eeprom_clk(hw, &eec); ++ } ++ ++ return data; ++} ++ ++/** ++ * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. ++ * @hw: pointer to hardware structure ++ * @eec: EEC register's current value ++ **/ ++static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) ++{ ++ /* ++ * Raise the clock input to the EEPROM ++ * (setting the SK bit), then delay ++ */ ++ *eec = *eec | IXGBE_EEC_SK; ++ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); ++ IXGBE_WRITE_FLUSH(hw); ++ udelay(1); ++} ++ ++/** ++ * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. ++ * @hw: pointer to hardware structure ++ * @eecd: EECD's current value ++ **/ ++static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) ++{ ++ /* ++ * Lower the clock input to the EEPROM (clearing the SK bit), then ++ * delay ++ */ ++ *eec = *eec & ~IXGBE_EEC_SK; ++ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); ++ IXGBE_WRITE_FLUSH(hw); ++ udelay(1); ++} ++ ++/** ++ * ixgbe_release_eeprom - Release EEPROM, release semaphores ++ * @hw: pointer to hardware structure ++ **/ ++static void ixgbe_release_eeprom(struct ixgbe_hw *hw) ++{ ++ u32 eec; ++ ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ ++ eec |= IXGBE_EEC_CS; /* Pull CS high */ ++ eec &= ~IXGBE_EEC_SK; /* Lower SCK */ ++ ++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ udelay(1); ++ ++ /* Stop requesting EEPROM access */ ++ eec &= ~IXGBE_EEC_REQ; ++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ ++ ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ ++ /* Delay before attempt to obtain semaphore again to allow FW access */ ++ msleep(hw->eeprom.semaphore_delay); ++} ++ ++/** ++ * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum ++ * @hw: pointer to hardware structure ++ **/ ++u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) ++{ ++ u16 i; ++ u16 j; ++ u16 checksum = 0; ++ u16 length = 0; ++ u16 pointer = 0; ++ u16 word = 0; ++ ++ /* Include 0x0-0x3F in the checksum */ ++ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { ++ if (hw->eeprom.ops.read(hw, i, &word) != 0) { ++ hw_dbg(hw, "EEPROM read failed\n"); ++ break; ++ } ++ checksum += word; ++ } ++ ++ /* Include all data from pointers except for the fw pointer */ ++ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { ++ hw->eeprom.ops.read(hw, i, &pointer); ++ ++ /* Make sure the pointer seems valid */ ++ if (pointer != 0xFFFF && pointer != 0) { ++ hw->eeprom.ops.read(hw, pointer, &length); ++ ++ if (length != 0xFFFF && length != 0) { ++ for (j = pointer+1; j <= pointer+length; j++) { ++ hw->eeprom.ops.read(hw, j, &word); ++ checksum += word; ++ } ++ } ++ } ++ } ++ ++ checksum = (u16)IXGBE_EEPROM_SUM - checksum; ++ ++ return checksum; ++} ++ ++/** ++ * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum ++ * @hw: pointer to hardware structure ++ * @checksum_val: calculated checksum ++ * ++ * Performs checksum calculation and validates the EEPROM checksum. If the ++ * caller does not need checksum_val, the value can be NULL. ++ **/ ++s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, ++ u16 *checksum_val) ++{ ++ s32 status; ++ u16 checksum; ++ u16 read_checksum = 0; ++ ++ /* ++ * Read the first word from the EEPROM. If this times out or fails, do ++ * not continue or we could be in for a very long wait while every ++ * EEPROM read fails ++ */ ++ status = hw->eeprom.ops.read(hw, 0, &checksum); ++ ++ if (status == 0) { ++ checksum = hw->eeprom.ops.calc_checksum(hw); ++ ++ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); ++ ++ /* ++ * Verify read checksum from EEPROM is the same as ++ * calculated checksum ++ */ ++ if (read_checksum != checksum) ++ status = IXGBE_ERR_EEPROM_CHECKSUM; ++ ++ /* If the user cares, return the calculated checksum */ ++ if (checksum_val) ++ *checksum_val = checksum; ++ } else { ++ hw_dbg(hw, "EEPROM read failed\n"); ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum ++ * @hw: pointer to hardware structure ++ **/ ++s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ u16 checksum; ++ ++ /* ++ * Read the first word from the EEPROM. If this times out or fails, do ++ * not continue or we could be in for a very long wait while every ++ * EEPROM read fails ++ */ ++ status = hw->eeprom.ops.read(hw, 0, &checksum); ++ ++ if (status == 0) { ++ checksum = hw->eeprom.ops.calc_checksum(hw); ++ status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, ++ checksum); ++ } else { ++ hw_dbg(hw, "EEPROM read failed\n"); ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_validate_mac_addr - Validate MAC address ++ * @mac_addr: pointer to MAC address. ++ * ++ * Tests a MAC address to ensure it is a valid Individual Address ++ **/ ++s32 ixgbe_validate_mac_addr(u8 *mac_addr) ++{ ++ s32 status = 0; ++ ++ /* Make sure it is not a multicast address */ ++ if (IXGBE_IS_MULTICAST(mac_addr)) { ++ hw_dbg(hw, "MAC address is multicast\n"); ++ status = IXGBE_ERR_INVALID_MAC_ADDR; ++ /* Not a broadcast address */ ++ } else if (IXGBE_IS_BROADCAST(mac_addr)) { ++ hw_dbg(hw, "MAC address is broadcast\n"); ++ status = IXGBE_ERR_INVALID_MAC_ADDR; ++ /* Reject the zero address */ ++ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && ++ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { ++ hw_dbg(hw, "MAC address is all zeros\n"); ++ status = IXGBE_ERR_INVALID_MAC_ADDR; ++ } ++ return status; ++} ++ ++/** ++ * ixgbe_set_rar_generic - Set Rx address register ++ * @hw: pointer to hardware structure ++ * @index: Receive address register to write ++ * @addr: Address to put into receive address register ++ * @vmdq: VMDq "set" or "pool" index ++ * @enable_addr: set flag that address is active ++ * ++ * Puts an ethernet address into a receive address register. ++ **/ ++s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, ++ u32 enable_addr) ++{ ++ u32 rar_low, rar_high; ++ u32 rar_entries = hw->mac.num_rar_entries; ++ ++ /* setup VMDq pool selection before this RAR gets enabled */ ++ hw->mac.ops.set_vmdq(hw, index, vmdq); ++ ++ /* Make sure we are using a valid rar index range */ ++ if (index < rar_entries) { ++ /* ++ * HW expects these in little endian so we reverse the byte ++ * order from network order (big endian) to little endian ++ */ ++ rar_low = ((u32)addr[0] | ++ ((u32)addr[1] << 8) | ++ ((u32)addr[2] << 16) | ++ ((u32)addr[3] << 24)); ++ /* ++ * Some parts put the VMDq setting in the extra RAH bits, ++ * so save everything except the lower 16 bits that hold part ++ * of the address and the address valid bit. ++ */ ++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); ++ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); ++ rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); ++ ++ if (enable_addr != 0) ++ rar_high |= IXGBE_RAH_AV; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); ++ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); ++ } else { ++ hw_dbg(hw, "RAR index %d is out of range.\n", index); ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_clear_rar_generic - Remove Rx address register ++ * @hw: pointer to hardware structure ++ * @index: Receive address register to write ++ * ++ * Clears an ethernet address from a receive address register. ++ **/ ++s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) ++{ ++ u32 rar_high; ++ u32 rar_entries = hw->mac.num_rar_entries; ++ ++ /* Make sure we are using a valid rar index range */ ++ if (index < rar_entries) { ++ /* ++ * Some parts put the VMDq setting in the extra RAH bits, ++ * so save everything except the lower 16 bits that hold part ++ * of the address and the address valid bit. ++ */ ++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); ++ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); ++ } else { ++ hw_dbg(hw, "RAR index %d is out of range.\n", index); ++ } ++ ++ /* clear VMDq pool/queue selection for this RAR */ ++ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_init_rx_addrs_generic - Initializes receive address filters. ++ * @hw: pointer to hardware structure ++ * ++ * Places the MAC address in receive address register 0 and clears the rest ++ * of the receive address registers. Clears the multicast table. Assumes ++ * the receiver is in reset when the routine is called. ++ **/ ++s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) ++{ ++ u32 i; ++ u32 rar_entries = hw->mac.num_rar_entries; ++ ++ /* ++ * If the current mac address is valid, assume it is a software override ++ * to the permanent address. ++ * Otherwise, use the permanent address from the eeprom. ++ */ ++ if (ixgbe_validate_mac_addr(hw->mac.addr) == ++ IXGBE_ERR_INVALID_MAC_ADDR) { ++ /* Get the MAC address from the RAR0 for later reference */ ++ hw->mac.ops.get_mac_addr(hw, hw->mac.addr); ++ ++ hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", ++ hw->mac.addr[0], hw->mac.addr[1], ++ hw->mac.addr[2]); ++ hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], ++ hw->mac.addr[4], hw->mac.addr[5]); ++ } else { ++ /* Setup the receive address. */ ++ hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); ++ hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", ++ hw->mac.addr[0], hw->mac.addr[1], ++ hw->mac.addr[2]); ++ hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], ++ hw->mac.addr[4], hw->mac.addr[5]); ++ ++ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); ++ } ++ hw->addr_ctrl.overflow_promisc = 0; ++ ++ hw->addr_ctrl.rar_used_count = 1; ++ ++ /* Zero out the other receive addresses. */ ++ hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); ++ for (i = 1; i < rar_entries; i++) { ++ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); ++ } ++ ++ /* Clear the MTA */ ++ hw->addr_ctrl.mta_in_use = 0; ++ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); ++ ++ hw_dbg(hw, " Clearing MTA\n"); ++ for (i = 0; i < hw->mac.mcft_size; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); ++ ++ ixgbe_init_uta_tables(hw); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_add_uc_addr - Adds a secondary unicast address. ++ * @hw: pointer to hardware structure ++ * @addr: new address ++ * ++ * Adds it to unused receive address register or goes into promiscuous mode. ++ **/ ++void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) ++{ ++ u32 rar_entries = hw->mac.num_rar_entries; ++ u32 rar; ++ ++ hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", ++ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); ++ ++ /* ++ * Place this address in the RAR if there is room, ++ * else put the controller into promiscuous mode ++ */ ++ if (hw->addr_ctrl.rar_used_count < rar_entries) { ++ rar = hw->addr_ctrl.rar_used_count; ++ hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); ++ hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar); ++ hw->addr_ctrl.rar_used_count++; ++ } else { ++ hw->addr_ctrl.overflow_promisc++; ++ } ++ ++ hw_dbg(hw, "ixgbe_add_uc_addr Complete\n"); ++} ++ ++/** ++ * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses ++ * @hw: pointer to hardware structure ++ * @addr_list: the list of new addresses ++ * @addr_count: number of addresses ++ * @next: iterator function to walk the address list ++ * ++ * The given list replaces any existing list. Clears the secondary addrs from ++ * receive address registers. Uses unused receive address registers for the ++ * first secondary addresses, and falls back to promiscuous mode as needed. ++ * ++ * Drivers using secondary unicast addresses must set user_set_promisc when ++ * manually putting the device into promiscuous mode. ++ **/ ++s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, ++ u32 addr_count, ixgbe_mc_addr_itr next) ++{ ++ u8 *addr; ++ u32 i; ++ u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; ++ u32 uc_addr_in_use; ++ u32 fctrl; ++ u32 vmdq; ++ ++ /* ++ * Clear accounting of old secondary address list, ++ * don't count RAR[0] ++ */ ++ uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; ++ hw->addr_ctrl.rar_used_count -= uc_addr_in_use; ++ hw->addr_ctrl.overflow_promisc = 0; ++ ++ /* Zero out the other receive addresses */ ++ hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use+1); ++ for (i = 0; i < uc_addr_in_use; i++) { ++ IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); ++ } ++ ++ /* Add the new addresses */ ++ for (i = 0; i < addr_count; i++) { ++ hw_dbg(hw, " Adding the secondary addresses:\n"); ++ addr = next(hw, &addr_list, &vmdq); ++ ixgbe_add_uc_addr(hw, addr, vmdq); ++ } ++ ++ if (hw->addr_ctrl.overflow_promisc) { ++ /* enable promisc if not already in overflow or set by user */ ++ if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { ++ hw_dbg(hw, " Entering address overflow promisc mode\n"); ++ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++ fctrl |= IXGBE_FCTRL_UPE; ++ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); ++ } ++ } else { ++ /* only disable if set by overflow, not by user */ ++ if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { ++ hw_dbg(hw, " Leaving address overflow promisc mode\n"); ++ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++ fctrl &= ~IXGBE_FCTRL_UPE; ++ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); ++ } ++ } ++ ++ hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n"); ++ return 0; ++} ++ ++/** ++ * ixgbe_mta_vector - Determines bit-vector in multicast table to set ++ * @hw: pointer to hardware structure ++ * @mc_addr: the multicast address ++ * ++ * Extracts the 12 bits, from a multicast address, to determine which ++ * bit-vector to set in the multicast table. The hardware uses 12 bits, from ++ * incoming rx multicast addresses, to determine the bit-vector to check in ++ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set ++ * by the MO field of the MCSTCTRL. The MO field is set during initialization ++ * to mc_filter_type. ++ **/ ++static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) ++{ ++ u32 vector = 0; ++ ++ switch (hw->mac.mc_filter_type) { ++ case 0: /* use bits [47:36] of the address */ ++ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); ++ break; ++ case 1: /* use bits [46:35] of the address */ ++ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); ++ break; ++ case 2: /* use bits [45:34] of the address */ ++ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); ++ break; ++ case 3: /* use bits [43:32] of the address */ ++ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); ++ break; ++ default: /* Invalid mc_filter_type */ ++ hw_dbg(hw, "MC filter type param set incorrectly\n"); ++ break; ++ } ++ ++ /* vector can only be 12-bits or boundary will be exceeded */ ++ vector &= 0xFFF; ++ return vector; ++} ++ ++/** ++ * ixgbe_set_mta - Set bit-vector in multicast table ++ * @hw: pointer to hardware structure ++ * @hash_value: Multicast address hash value ++ * ++ * Sets the bit-vector in the multicast table. ++ **/ ++void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) ++{ ++ u32 vector; ++ u32 vector_bit; ++ u32 vector_reg; ++ ++ hw->addr_ctrl.mta_in_use++; ++ ++ vector = ixgbe_mta_vector(hw, mc_addr); ++ hw_dbg(hw, " bit-vector = 0x%03X\n", vector); ++ ++ /* ++ * The MTA is a register array of 128 32-bit registers. It is treated ++ * like an array of 4096 bits. We want to set bit ++ * BitArray[vector_value]. So we figure out what register the bit is ++ * in, read it, OR in the new bit, then write back the new value. The ++ * register is determined by the upper 7 bits of the vector value and ++ * the bit within that register are determined by the lower 5 bits of ++ * the value. ++ */ ++ vector_reg = (vector >> 5) & 0x7F; ++ vector_bit = vector & 0x1F; ++ hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); ++} ++ ++/** ++ * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses ++ * @hw: pointer to hardware structure ++ * @mc_addr_list: the list of new multicast addresses ++ * @mc_addr_count: number of addresses ++ * @next: iterator function to walk the multicast address list ++ * ++ * The given list replaces any existing list. Clears the MC addrs from receive ++ * address registers and the multicast table. Uses unused receive address ++ * registers for the first multicast addresses, and hashes the rest into the ++ * multicast table. ++ **/ ++s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count, ixgbe_mc_addr_itr next) ++{ ++ u32 i; ++ u32 vmdq; ++ ++ /* ++ * Set the new number of MC addresses that we are being requested to ++ * use. ++ */ ++ hw->addr_ctrl.num_mc_addrs = mc_addr_count; ++ hw->addr_ctrl.mta_in_use = 0; ++ ++ /* Clear mta_shadow */ ++ hw_dbg(hw, " Clearing MTA\n"); ++ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); ++ ++ /* Update mta_shadow */ ++ for (i = 0; i < mc_addr_count; i++) { ++ hw_dbg(hw, " Adding the multicast addresses:\n"); ++ ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); ++ } ++ ++ /* Enable mta */ ++ for (i = 0; i < hw->mac.mcft_size; i++) ++ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, ++ hw->mac.mta_shadow[i]); ++ ++ if (hw->addr_ctrl.mta_in_use > 0) ++ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, ++ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); ++ ++ hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); ++ return 0; ++} ++ ++/** ++ * ixgbe_enable_mc_generic - Enable multicast address in RAR ++ * @hw: pointer to hardware structure ++ * ++ * Enables multicast address in RAR and the use of the multicast hash table. ++ **/ ++s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; ++ ++ if (a->mta_in_use > 0) ++ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | ++ hw->mac.mc_filter_type); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_disable_mc_generic - Disable multicast address in RAR ++ * @hw: pointer to hardware structure ++ * ++ * Disables multicast address in RAR and the use of the multicast hash table. ++ **/ ++s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; ++ ++ if (a->mta_in_use > 0) ++ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_fc_enable_generic - Enable flow control ++ * @hw: pointer to hardware structure ++ * @packetbuf_num: packet buffer number (0-7) ++ * ++ * Enable flow control according to the current settings. ++ **/ ++s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) ++{ ++ s32 ret_val = 0; ++ u32 mflcn_reg, fccfg_reg; ++ u32 reg; ++ u32 rx_pba_size; ++ ++#ifdef CONFIG_DCB ++ if (hw->fc.requested_mode == ixgbe_fc_pfc) ++ goto out; ++ ++#endif /* CONFIG_DCB */ ++ /* Negotiate the fc mode to use */ ++ ret_val = ixgbe_fc_autoneg(hw); ++ if (ret_val == IXGBE_ERR_FLOW_CONTROL) ++ goto out; ++ ++ /* Disable any previous flow control settings */ ++ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); ++ mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); ++ ++ fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); ++ fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); ++ ++ /* ++ * The possible values of fc.current_mode are: ++ * 0: Flow control is completely disabled ++ * 1: Rx flow control is enabled (we can receive pause frames, ++ * but not send pause frames). ++ * 2: Tx flow control is enabled (we can send pause frames but ++ * we do not support receiving pause frames). ++ * 3: Both Rx and Tx flow control (symmetric) are enabled. ++#ifdef CONFIG_DCB ++ * 4: Priority Flow Control is enabled. ++#endif ++ * other: Invalid. ++ */ ++ switch (hw->fc.current_mode) { ++ case ixgbe_fc_none: ++ /* Flow control is disabled by software override or autoneg. ++ * The code below will actually disable it in the HW. ++ */ ++ break; ++ case ixgbe_fc_rx_pause: ++ /* ++ * Rx Flow control is enabled and Tx Flow control is ++ * disabled by software override. Since there really ++ * isn't a way to advertise that we are capable of RX ++ * Pause ONLY, we will advertise that we support both ++ * symmetric and asymmetric Rx PAUSE. Later, we will ++ * disable the adapter's ability to send PAUSE frames. ++ */ ++ mflcn_reg |= IXGBE_MFLCN_RFCE; ++ break; ++ case ixgbe_fc_tx_pause: ++ /* ++ * Tx Flow control is enabled, and Rx Flow control is ++ * disabled by software override. ++ */ ++ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; ++ break; ++ case ixgbe_fc_full: ++ /* Flow control (both Rx and Tx) is enabled by SW override. */ ++ mflcn_reg |= IXGBE_MFLCN_RFCE; ++ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; ++ break; ++#ifdef CONFIG_DCB ++ case ixgbe_fc_pfc: ++ goto out; ++ break; ++#endif /* CONFIG_DCB */ ++ default: ++ hw_dbg(hw, "Flow control param set incorrectly\n"); ++ ret_val = IXGBE_ERR_CONFIG; ++ goto out; ++ break; ++ } ++ ++ /* Set 802.3x based flow control settings. */ ++ mflcn_reg |= IXGBE_MFLCN_DPF; ++ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); ++ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); ++ ++ reg = IXGBE_READ_REG(hw, IXGBE_MTQC); ++ /* Thresholds are different for link flow control when in DCB mode */ ++ if (reg & IXGBE_MTQC_RT_ENA) { ++ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); ++ ++ /* Always disable XON for LFC when in DCB mode */ ++ reg = (rx_pba_size >> 5) & 0xFFE0; ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg); ++ ++ reg = (rx_pba_size >> 2) & 0xFFE0; ++ if (hw->fc.current_mode & ixgbe_fc_tx_pause) ++ reg |= IXGBE_FCRTH_FCEN; ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg); ++ } else { ++ /* Set up and enable Rx high/low water mark thresholds, ++ * enable XON. */ ++ if (hw->fc.current_mode & ixgbe_fc_tx_pause) { ++ if (hw->fc.send_xon) { ++ IXGBE_WRITE_REG(hw, ++ IXGBE_FCRTL_82599(packetbuf_num), ++ (hw->fc.low_water | ++ IXGBE_FCRTL_XONE)); ++ } else { ++ IXGBE_WRITE_REG(hw, ++ IXGBE_FCRTL_82599(packetbuf_num), ++ hw->fc.low_water); ++ } ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), ++ (hw->fc.high_water | IXGBE_FCRTH_FCEN)); ++ } ++ } ++ ++ /* Configure pause time (2 TCs per register) */ ++ reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); ++ if ((packetbuf_num & 1) == 0) ++ reg = (reg & 0xFFFF0000) | hw->fc.pause_time; ++ else ++ reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); ++ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_fc_autoneg - Configure flow control ++ * @hw: pointer to hardware structure ++ * ++ * Compares our advertised flow control capabilities to those advertised by ++ * our link partner, and determines the proper flow control mode to use. ++ **/ ++s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) ++{ ++ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ ixgbe_link_speed speed; ++ bool link_up; ++ ++ if (hw->fc.disable_fc_autoneg) ++ goto out; ++ ++ /* ++ * AN should have completed when the cable was plugged in. ++ * Look for reasons to bail out. Bail out if: ++ * - FC autoneg is disabled, or if ++ * - link is not up. ++ * ++ * Since we're being called from an LSC, link is already known to be up. ++ * So use link_up_wait_to_complete=false. ++ */ ++ hw->mac.ops.check_link(hw, &speed, &link_up, false); ++ if (!link_up) { ++ ret_val = IXGBE_ERR_FLOW_CONTROL; ++ goto out; ++ } ++ ++ switch (hw->phy.media_type) { ++ /* Autoneg flow control on fiber adapters */ ++ case ixgbe_media_type_fiber: ++ if (speed == IXGBE_LINK_SPEED_1GB_FULL) ++ ret_val = ixgbe_fc_autoneg_fiber(hw); ++ break; ++ ++ /* Autoneg flow control on backplane adapters */ ++ case ixgbe_media_type_backplane: ++ ret_val = ixgbe_fc_autoneg_backplane(hw); ++ break; ++ ++ /* Autoneg flow control on copper adapters */ ++ case ixgbe_media_type_copper: ++ if (ixgbe_device_supports_autoneg_fc(hw) == 0) ++ ret_val = ixgbe_fc_autoneg_copper(hw); ++ break; ++ ++ default: ++ break; ++ } ++ ++out: ++ if (ret_val == 0) { ++ hw->fc.fc_was_autonegged = true; ++ } else { ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; ++ } ++ return ret_val; ++} ++ ++/** ++ * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber ++ * @hw: pointer to hardware structure ++ * @speed: ++ * @link_up ++ * ++ * Enable flow control according on 1 gig fiber. ++ **/ ++static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) ++{ ++ u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; ++ s32 ret_val; ++ ++ /* ++ * On multispeed fiber at 1g, bail out if ++ * - link is up but AN did not complete, or if ++ * - link is up and AN completed but timed out ++ */ ++ ++ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); ++ if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || ++ ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { ++ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ goto out; ++ } ++ ++ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); ++ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); ++ ++ ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, ++ pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, ++ IXGBE_PCS1GANA_ASM_PAUSE, ++ IXGBE_PCS1GANA_SYM_PAUSE, ++ IXGBE_PCS1GANA_ASM_PAUSE); ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 ++ * @hw: pointer to hardware structure ++ * ++ * Enable flow control according to IEEE clause 37. ++ **/ ++static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) ++{ ++ u32 links2, anlp1_reg, autoc_reg, links; ++ s32 ret_val; ++ ++ /* ++ * On backplane, bail out if ++ * - backplane autoneg was not completed, or if ++ * - we are 82599 and link partner is not AN enabled ++ */ ++ links = IXGBE_READ_REG(hw, IXGBE_LINKS); ++ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; ++ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ goto out; ++ } ++ ++ if (hw->mac.type == ixgbe_mac_82599EB) { ++ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); ++ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; ++ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ goto out; ++ } ++ } ++ /* ++ * Read the 10g AN autoc and LP ability registers and resolve ++ * local flow control settings accordingly ++ */ ++ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); ++ ++ ret_val = ixgbe_negotiate_fc(hw, autoc_reg, ++ anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, ++ IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 ++ * @hw: pointer to hardware structure ++ * ++ * Enable flow control according to IEEE clause 37. ++ **/ ++static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) ++{ ++ u16 technology_ability_reg = 0; ++ u16 lp_technology_ability_reg = 0; ++ ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &technology_ability_reg); ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &lp_technology_ability_reg); ++ ++ return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, ++ (u32)lp_technology_ability_reg, ++ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, ++ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); ++} ++ ++/** ++ * ixgbe_negotiate_fc - Negotiate flow control ++ * @hw: pointer to hardware structure ++ * @adv_reg: flow control advertised settings ++ * @lp_reg: link partner's flow control settings ++ * @adv_sym: symmetric pause bit in advertisement ++ * @adv_asm: asymmetric pause bit in advertisement ++ * @lp_sym: symmetric pause bit in link partner advertisement ++ * @lp_asm: asymmetric pause bit in link partner advertisement ++ * ++ * Find the intersection between advertised settings and link partner's ++ * advertised settings ++ **/ ++static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, ++ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) ++{ ++ if ((!(adv_reg)) || (!(lp_reg))) ++ return IXGBE_ERR_FC_NOT_NEGOTIATED; ++ ++ if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { ++ /* ++ * Now we need to check if the user selected Rx ONLY ++ * of pause frames. In this case, we had to advertise ++ * FULL flow control because we could not advertise RX ++ * ONLY. Hence, we must now check to see if we need to ++ * turn OFF the TRANSMISSION of PAUSE frames. ++ */ ++ if (hw->fc.requested_mode == ixgbe_fc_full) { ++ hw->fc.current_mode = ixgbe_fc_full; ++ hw_dbg(hw, "Flow Control = FULL.\n"); ++ } else { ++ hw->fc.current_mode = ixgbe_fc_rx_pause; ++ hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); ++ } ++ } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && ++ (lp_reg & lp_sym) && (lp_reg & lp_asm)) { ++ hw->fc.current_mode = ixgbe_fc_tx_pause; ++ hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); ++ } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && ++ !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { ++ hw->fc.current_mode = ixgbe_fc_rx_pause; ++ hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); ++ } else { ++ hw->fc.current_mode = ixgbe_fc_none; ++ hw_dbg(hw, "Flow Control = NONE.\n"); ++ } ++ return 0; ++} ++ ++/** ++ * ixgbe_setup_fc - Set up flow control ++ * @hw: pointer to hardware structure ++ * ++ * Called at init time to set up flow control. ++ **/ ++s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) ++{ ++ s32 ret_val = 0; ++ u32 reg = 0, reg_bp = 0; ++ u16 reg_cu = 0; ++ ++#ifdef CONFIG_DCB ++ if (hw->fc.requested_mode == ixgbe_fc_pfc) { ++ hw->fc.current_mode = hw->fc.requested_mode; ++ goto out; ++ } ++ ++#endif /* CONFIG_DCB */ ++ ++ /* Validate the packetbuf configuration */ ++ if (packetbuf_num < 0 || packetbuf_num > 7) { ++ hw_dbg(hw, "Invalid packet buffer number [%d], expected range is" ++ " 0-7\n", packetbuf_num); ++ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; ++ goto out; ++ } ++ ++ /* ++ * Validate the water mark configuration. Zero water marks are invalid ++ * because it causes the controller to just blast out fc packets. ++ */ ++ if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { ++ hw_dbg(hw, "Invalid water mark configuration\n"); ++ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; ++ goto out; ++ } ++ ++ /* ++ * Validate the requested mode. Strict IEEE mode does not allow ++ * ixgbe_fc_rx_pause because it will cause us to fail at UNH. ++ */ ++ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ++ hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); ++ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; ++ goto out; ++ } ++ ++ /* ++ * 10gig parts do not have a word in the EEPROM to determine the ++ * default flow control setting, so we explicitly set it to full. ++ */ ++ if (hw->fc.requested_mode == ixgbe_fc_default) ++ hw->fc.requested_mode = ixgbe_fc_full; ++ ++ /* ++ * Set up the 1G and 10G flow control advertisement registers so the ++ * HW will be able to do fc autoneg once the cable is plugged in. If ++ * we link at 10G, the 1G advertisement is harmless and vice versa. ++ */ ++ ++ switch (hw->phy.media_type) { ++ case ixgbe_media_type_fiber: ++ case ixgbe_media_type_backplane: ++ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); ++ reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ break; ++ ++ case ixgbe_media_type_copper: ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); ++ break; ++ ++ default: ++ ; ++ } ++ ++ /* ++ * The possible values of fc.requested_mode are: ++ * 0: Flow control is completely disabled ++ * 1: Rx flow control is enabled (we can receive pause frames, ++ * but not send pause frames). ++ * 2: Tx flow control is enabled (we can send pause frames but ++ * we do not support receiving pause frames). ++ * 3: Both Rx and Tx flow control (symmetric) are enabled. ++#ifdef CONFIG_DCB ++ * 4: Priority Flow Control is enabled. ++#endif ++ * other: Invalid. ++ */ ++ switch (hw->fc.requested_mode) { ++ case ixgbe_fc_none: ++ /* Flow control completely disabled by software override. */ ++ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); ++ if (hw->phy.media_type == ixgbe_media_type_backplane) ++ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | ++ IXGBE_AUTOC_ASM_PAUSE); ++ else if (hw->phy.media_type == ixgbe_media_type_copper) ++ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); ++ break; ++ case ixgbe_fc_rx_pause: ++ /* ++ * Rx Flow control is enabled and Tx Flow control is ++ * disabled by software override. Since there really ++ * isn't a way to advertise that we are capable of RX ++ * Pause ONLY, we will advertise that we support both ++ * symmetric and asymmetric Rx PAUSE. Later, we will ++ * disable the adapter's ability to send PAUSE frames. ++ */ ++ reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); ++ if (hw->phy.media_type == ixgbe_media_type_backplane) ++ reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | ++ IXGBE_AUTOC_ASM_PAUSE); ++ else if (hw->phy.media_type == ixgbe_media_type_copper) ++ reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); ++ break; ++ case ixgbe_fc_tx_pause: ++ /* ++ * Tx Flow control is enabled, and Rx Flow control is ++ * disabled by software override. ++ */ ++ reg |= (IXGBE_PCS1GANA_ASM_PAUSE); ++ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); ++ if (hw->phy.media_type == ixgbe_media_type_backplane) { ++ reg_bp |= (IXGBE_AUTOC_ASM_PAUSE); ++ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE); ++ } else if (hw->phy.media_type == ixgbe_media_type_copper) { ++ reg_cu |= (IXGBE_TAF_ASM_PAUSE); ++ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE); ++ } ++ break; ++ case ixgbe_fc_full: ++ /* Flow control (both Rx and Tx) is enabled by SW override. */ ++ reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); ++ if (hw->phy.media_type == ixgbe_media_type_backplane) ++ reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | ++ IXGBE_AUTOC_ASM_PAUSE); ++ else if (hw->phy.media_type == ixgbe_media_type_copper) ++ reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); ++ break; ++#ifdef CONFIG_DCB ++ case ixgbe_fc_pfc: ++ goto out; ++ break; ++#endif /* CONFIG_DCB */ ++ default: ++ hw_dbg(hw, "Flow control param set incorrectly\n"); ++ ret_val = IXGBE_ERR_CONFIG; ++ goto out; ++ break; ++ } ++ ++ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); ++ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); ++ ++ /* Disable AN timeout */ ++ if (hw->fc.strict_ieee) ++ reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); ++ hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); ++ ++ /* ++ * AUTOC restart handles negotiation of 1G and 10G. There is ++ * no need to set the PCS1GCTL register. ++ */ ++ if (hw->phy.media_type == ixgbe_media_type_backplane) { ++ reg_bp |= IXGBE_AUTOC_AN_RESTART; ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); ++ } else if ((hw->phy.media_type == ixgbe_media_type_copper) && ++ (ixgbe_device_supports_autoneg_fc(hw) == 0)) { ++ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); ++ } ++ ++ hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); ++out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_disable_pcie_master - Disable PCI-express master access ++ * @hw: pointer to hardware structure ++ * ++ * Disables PCI-Express master access and verifies there are no pending ++ * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable ++ * bit hasn't caused the master requests to be disabled, else 0 ++ * is returned signifying master requests disabled. ++ **/ ++s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) ++{ ++ u32 i; ++ u32 reg_val; ++ u32 number_of_queues; ++ s32 status = 0; ++ ++ /* Just jump out if bus mastering is already disabled */ ++ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) ++ goto out; ++ ++ /* Disable the receive unit by stopping each queue */ ++ number_of_queues = hw->mac.max_rx_queues; ++ for (i = 0; i < number_of_queues; i++) { ++ reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); ++ if (reg_val & IXGBE_RXDCTL_ENABLE) { ++ reg_val &= ~IXGBE_RXDCTL_ENABLE; ++ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); ++ } ++ } ++ ++ reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL); ++ reg_val |= IXGBE_CTRL_GIO_DIS; ++ IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); ++ ++ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { ++ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) ++ goto out; ++ udelay(100); ++ } ++ ++ hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); ++ status = IXGBE_ERR_MASTER_REQUESTS_PENDING; ++ ++ /* ++ * The GIO Master Disable bit didn't clear. There are multiple reasons ++ * for this listed in the datasheet 5.2.5.3.2 Master Disable, and they ++ * all require a double reset to recover from. Before proceeding, we ++ * first wait a little more to try to ensure that, at a minimum, the ++ * PCIe block has no transactions pending. ++ */ ++ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { ++ if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) & ++ IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) ++ break; ++ udelay(100); ++ } ++ ++ if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT) ++ hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); ++ ++ /* ++ * Two consecutive resets are required via CTRL.RST per datasheet ++ * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine ++ * of this need. The first reset prevents new master requests from ++ * being issued by our device. We then must wait 1usec for any ++ * remaining completions from the PCIe bus to trickle in, and then reset ++ * again to clear out any effects they may have had on our device. ++ */ ++ hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; ++ ++out: ++ return status; ++} ++ ++ ++/** ++ * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to acquire ++ * ++ * Acquires the SWFW semaphore thought the GSSR register for the specified ++ * function (CSR, PHY0, PHY1, EEPROM, Flash) ++ **/ ++s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) ++{ ++ u32 gssr; ++ u32 swmask = mask; ++ u32 fwmask = mask << 5; ++ s32 timeout = 200; ++ ++ while (timeout) { ++ /* ++ * SW EEPROM semaphore bit is used for access to all ++ * SW_FW_SYNC/GSSR bits (not just EEPROM) ++ */ ++ if (ixgbe_get_eeprom_semaphore(hw)) ++ return IXGBE_ERR_SWFW_SYNC; ++ ++ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); ++ if (!(gssr & (fwmask | swmask))) ++ break; ++ ++ /* ++ * Firmware currently using resource (fwmask) or other software ++ * thread currently using resource (swmask) ++ */ ++ ixgbe_release_eeprom_semaphore(hw); ++ msleep(5); ++ timeout--; ++ } ++ ++ if (!timeout) { ++ hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n"); ++ return IXGBE_ERR_SWFW_SYNC; ++ } ++ ++ gssr |= swmask; ++ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); ++ ++ ixgbe_release_eeprom_semaphore(hw); ++ return 0; ++} ++ ++/** ++ * ixgbe_release_swfw_sync - Release SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to release ++ * ++ * Releases the SWFW semaphore thought the GSSR register for the specified ++ * function (CSR, PHY0, PHY1, EEPROM, Flash) ++ **/ ++void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) ++{ ++ u32 gssr; ++ u32 swmask = mask; ++ ++ ixgbe_get_eeprom_semaphore(hw); ++ ++ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); ++ gssr &= ~swmask; ++ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); ++ ++ ixgbe_release_eeprom_semaphore(hw); ++} ++ ++/** ++ * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit ++ * @hw: pointer to hardware structure ++ * @regval: register value to write to RXCTRL ++ * ++ * Enables the Rx DMA unit ++ **/ ++s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) ++{ ++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_blink_led_start_generic - Blink LED based on index. ++ * @hw: pointer to hardware structure ++ * @index: led number to blink ++ **/ ++s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) ++{ ++ ixgbe_link_speed speed = 0; ++ bool link_up = 0; ++ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ ++ /* ++ * Link must be up to auto-blink the LEDs; ++ * Force it if link is down. ++ */ ++ hw->mac.ops.check_link(hw, &speed, &link_up, false); ++ ++ if (!link_up) { ++ ++ autoc_reg |= IXGBE_AUTOC_AN_RESTART; ++ autoc_reg |= IXGBE_AUTOC_FLU; ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); ++ msleep(10); ++ } ++ ++ led_reg &= ~IXGBE_LED_MODE_MASK(index); ++ led_reg |= IXGBE_LED_BLINK(index); ++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. ++ * @hw: pointer to hardware structure ++ * @index: led number to stop blinking ++ **/ ++s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) ++{ ++ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ ++ autoc_reg &= ~IXGBE_AUTOC_FLU; ++ autoc_reg |= IXGBE_AUTOC_AN_RESTART; ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); ++ ++ led_reg &= ~IXGBE_LED_MODE_MASK(index); ++ led_reg &= ~IXGBE_LED_BLINK(index); ++ led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); ++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM ++ * @hw: pointer to hardware structure ++ * @san_mac_offset: SAN MAC address offset ++ * ++ * This function will read the EEPROM location for the SAN MAC address ++ * pointer, and returns the value at that location. This is used in both ++ * get and set mac_addr routines. ++ **/ ++static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, ++ u16 *san_mac_offset) ++{ ++ /* ++ * First read the EEPROM pointer to see if the MAC addresses are ++ * available. ++ */ ++ hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM ++ * @hw: pointer to hardware structure ++ * @san_mac_addr: SAN MAC address ++ * ++ * Reads the SAN MAC address from the EEPROM, if it's available. This is ++ * per-port, so set_lan_id() must be called before reading the addresses. ++ * set_lan_id() is called by identify_sfp(), but this cannot be relied ++ * upon for non-SFP connections, so we must call it here. ++ **/ ++s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) ++{ ++ u16 san_mac_data, san_mac_offset; ++ u8 i; ++ ++ /* ++ * First read the EEPROM pointer to see if the MAC addresses are ++ * available. If they're not, no point in calling set_lan_id() here. ++ */ ++ ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); ++ ++ if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { ++ /* ++ * No addresses available in this EEPROM. It's not an ++ * error though, so just wipe the local address and return. ++ */ ++ for (i = 0; i < 6; i++) ++ san_mac_addr[i] = 0xFF; ++ ++ goto san_mac_addr_out; ++ } ++ ++ /* make sure we know which port we need to program */ ++ hw->mac.ops.set_lan_id(hw); ++ /* apply the port offset to the address offset */ ++ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : ++ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); ++ for (i = 0; i < 3; i++) { ++ hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); ++ san_mac_addr[i * 2] = (u8)(san_mac_data); ++ san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); ++ san_mac_offset++; ++ } ++ ++san_mac_addr_out: ++ return 0; ++} ++ ++/** ++ * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM ++ * @hw: pointer to hardware structure ++ * @san_mac_addr: SAN MAC address ++ * ++ * Write a SAN MAC address to the EEPROM. ++ **/ ++s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) ++{ ++ s32 status = 0; ++ u16 san_mac_data, san_mac_offset; ++ u8 i; ++ ++ /* Look for SAN mac address pointer. If not defined, return */ ++ ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); ++ ++ if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { ++ status = IXGBE_ERR_NO_SAN_ADDR_PTR; ++ goto san_mac_addr_out; ++ } ++ ++ /* Make sure we know which port we need to write */ ++ hw->mac.ops.set_lan_id(hw); ++ /* Apply the port offset to the address offset */ ++ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : ++ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); ++ ++ for (i = 0; i < 3; i++) { ++ san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); ++ san_mac_data |= (u16)(san_mac_addr[i * 2]); ++ hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); ++ san_mac_offset++; ++ } ++ ++san_mac_addr_out: ++ return status; ++} ++ ++/** ++ * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count ++ * @hw: pointer to hardware structure ++ * ++ * Read PCIe configuration space, and get the MSI-X vector count from ++ * the capabilities table. ++ **/ ++u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) ++{ ++ u32 msix_count = 64; ++ ++ if (hw->mac.msix_vectors_from_pcie) { ++ msix_count = IXGBE_READ_PCIE_WORD(hw, ++ IXGBE_PCIE_MSIX_82599_CAPS); ++ msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; ++ ++ /* MSI-X count is zero-based in HW, so increment to give ++ * proper value */ ++ msix_count++; ++ } ++ ++ return msix_count; ++} ++ ++/** ++ * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address ++ * @hw: pointer to hardware structure ++ * @addr: Address to put into receive address register ++ * @vmdq: VMDq pool to assign ++ * ++ * Puts an ethernet address into a receive address register, or ++ * finds the rar that it is aleady in; adds to the pool list ++ **/ ++s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) ++{ ++ static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; ++ u32 first_empty_rar = NO_EMPTY_RAR_FOUND; ++ u32 rar; ++ u32 rar_low, rar_high; ++ u32 addr_low, addr_high; ++ ++ /* swap bytes for HW little endian */ ++ addr_low = addr[0] | (addr[1] << 8) ++ | (addr[2] << 16) ++ | (addr[3] << 24); ++ addr_high = addr[4] | (addr[5] << 8); ++ ++ /* ++ * Either find the mac_id in rar or find the first empty space. ++ * rar_highwater points to just after the highest currently used ++ * rar in order to shorten the search. It grows when we add a new ++ * rar to the top. ++ */ ++ for (rar = 0; rar < hw->mac.rar_highwater; rar++) { ++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); ++ ++ if (((IXGBE_RAH_AV & rar_high) == 0) ++ && first_empty_rar == NO_EMPTY_RAR_FOUND) { ++ first_empty_rar = rar; ++ } else if ((rar_high & 0xFFFF) == addr_high) { ++ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); ++ if (rar_low == addr_low) ++ break; /* found it already in the rars */ ++ } ++ } ++ ++ if (rar < hw->mac.rar_highwater) { ++ /* already there so just add to the pool bits */ ++ ixgbe_set_vmdq(hw, rar, vmdq); ++ } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { ++ /* stick it into first empty RAR slot we found */ ++ rar = first_empty_rar; ++ ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); ++ } else if (rar == hw->mac.rar_highwater) { ++ /* add it to the top of the list and inc the highwater mark */ ++ ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); ++ hw->mac.rar_highwater++; ++ } else if (rar >= hw->mac.num_rar_entries) { ++ return IXGBE_ERR_INVALID_MAC_ADDR; ++ } ++ ++ /* ++ * If we found rar[0], make sure the default pool bit (we use pool 0) ++ * remains cleared to be sure default pool packets will get delivered ++ */ ++ if (rar == 0) ++ ixgbe_clear_vmdq(hw, rar, 0); ++ ++ return rar; ++} ++ ++/** ++ * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address ++ * @hw: pointer to hardware struct ++ * @rar: receive address register index to disassociate ++ * @vmdq: VMDq pool index to remove from the rar ++ **/ ++s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ++{ ++ u32 mpsar_lo, mpsar_hi; ++ u32 rar_entries = hw->mac.num_rar_entries; ++ ++ if (rar < rar_entries) { ++ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); ++ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); ++ ++ if (!mpsar_lo && !mpsar_hi) ++ goto done; ++ ++ if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { ++ if (mpsar_lo) { ++ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); ++ mpsar_lo = 0; ++ } ++ if (mpsar_hi) { ++ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); ++ mpsar_hi = 0; ++ } ++ } else if (vmdq < 32) { ++ mpsar_lo &= ~(1 << vmdq); ++ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); ++ } else { ++ mpsar_hi &= ~(1 << (vmdq - 32)); ++ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); ++ } ++ ++ /* was that the last pool using this rar? */ ++ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) ++ hw->mac.ops.clear_rar(hw, rar); ++ } else { ++ hw_dbg(hw, "RAR index %d is out of range.\n", rar); ++ } ++ ++done: ++ return 0; ++} ++ ++/** ++ * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address ++ * @hw: pointer to hardware struct ++ * @rar: receive address register index to associate with a VMDq index ++ * @vmdq: VMDq pool index ++ **/ ++s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ++{ ++ u32 mpsar; ++ u32 rar_entries = hw->mac.num_rar_entries; ++ ++ if (rar < rar_entries) { ++ if (vmdq < 32) { ++ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); ++ mpsar |= 1 << vmdq; ++ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); ++ } else { ++ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); ++ mpsar |= 1 << (vmdq - 32); ++ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); ++ } ++ } else { ++ hw_dbg(hw, "RAR index %d is out of range.\n", rar); ++ } ++ return 0; ++} ++ ++/** ++ * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array ++ * @hw: pointer to hardware structure ++ **/ ++s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) ++{ ++ int i; ++ ++ hw_dbg(hw, " Clearing UTA\n"); ++ ++ for (i = 0; i < 128; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot ++ * @hw: pointer to hardware structure ++ * @vlan: VLAN id to write to VLAN filter ++ * ++ * return the VLVF index where this VLAN id should be placed ++ * ++ **/ ++s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) ++{ ++ u32 bits = 0; ++ u32 first_empty_slot = 0; ++ s32 regindex; ++ ++ /* short cut the special case */ ++ if (vlan == 0) ++ return 0; ++ ++ /* ++ * Search for the vlan id in the VLVF entries. Save off the first empty ++ * slot found along the way ++ */ ++ for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { ++ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); ++ if (!bits && !(first_empty_slot)) ++ first_empty_slot = regindex; ++ else if ((bits & 0x0FFF) == vlan) ++ break; ++ } ++ ++ /* ++ * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan ++ * in the VLVF. Else use the first empty VLVF register for this ++ * vlan id. ++ */ ++ if (regindex >= IXGBE_VLVF_ENTRIES) { ++ if (first_empty_slot) ++ regindex = first_empty_slot; ++ else { ++ hw_dbg(hw, "No space in VLVF.\n"); ++ regindex = IXGBE_ERR_NO_SPACE; ++ } ++ } ++ ++ return regindex; ++} ++ ++/** ++ * ixgbe_set_vfta_generic - Set VLAN filter table ++ * @hw: pointer to hardware structure ++ * @vlan: VLAN id to write to VLAN filter ++ * @vind: VMDq output index that maps queue to VLAN id in VFVFB ++ * @vlan_on: boolean flag to turn on/off VLAN in VFVF ++ * ++ * Turn on/off specified VLAN in the VLAN filter table. ++ **/ ++s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, ++ bool vlan_on) ++{ ++ s32 regindex; ++ u32 bitindex; ++ u32 vfta; ++ u32 bits; ++ u32 vt; ++ u32 targetbit; ++ bool vfta_changed = false; ++ ++ if (vlan > 4095) ++ return IXGBE_ERR_PARAM; ++ ++ /* ++ * this is a 2 part operation - first the VFTA, then the ++ * VLVF and VLVFB if VT Mode is set ++ * We don't write the VFTA until we know the VLVF part succeeded. ++ */ ++ ++ /* Part 1 ++ * The VFTA is a bitstring made up of 128 32-bit registers ++ * that enable the particular VLAN id, much like the MTA: ++ * bits[11-5]: which register ++ * bits[4-0]: which bit in the register ++ */ ++ regindex = (vlan >> 5) & 0x7F; ++ bitindex = vlan & 0x1F; ++ targetbit = (1 << bitindex); ++ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); ++ ++ if (vlan_on) { ++ if (!(vfta & targetbit)) { ++ vfta |= targetbit; ++ vfta_changed = true; ++ } ++ } else { ++ if ((vfta & targetbit)) { ++ vfta &= ~targetbit; ++ vfta_changed = true; ++ } ++ } ++ ++ /* Part 2 ++ * If VT Mode is set ++ * Either vlan_on ++ * make sure the vlan is in VLVF ++ * set the vind bit in the matching VLVFB ++ * Or !vlan_on ++ * clear the pool bit and possibly the vind ++ */ ++ vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); ++ if (vt & IXGBE_VT_CTL_VT_ENABLE) { ++ s32 vlvf_index; ++ ++ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); ++ if (vlvf_index < 0) ++ return vlvf_index; ++ ++ if (vlan_on) { ++ /* set the pool bit */ ++ if (vind < 32) { ++ bits = IXGBE_READ_REG(hw, ++ IXGBE_VLVFB(vlvf_index*2)); ++ bits |= (1 << vind); ++ IXGBE_WRITE_REG(hw, ++ IXGBE_VLVFB(vlvf_index*2), ++ bits); ++ } else { ++ bits = IXGBE_READ_REG(hw, ++ IXGBE_VLVFB((vlvf_index*2)+1)); ++ bits |= (1 << (vind-32)); ++ IXGBE_WRITE_REG(hw, ++ IXGBE_VLVFB((vlvf_index*2)+1), ++ bits); ++ } ++ } else { ++ /* clear the pool bit */ ++ if (vind < 32) { ++ bits = IXGBE_READ_REG(hw, ++ IXGBE_VLVFB(vlvf_index*2)); ++ bits &= ~(1 << vind); ++ IXGBE_WRITE_REG(hw, ++ IXGBE_VLVFB(vlvf_index*2), ++ bits); ++ bits |= IXGBE_READ_REG(hw, ++ IXGBE_VLVFB((vlvf_index*2)+1)); ++ } else { ++ bits = IXGBE_READ_REG(hw, ++ IXGBE_VLVFB((vlvf_index*2)+1)); ++ bits &= ~(1 << (vind-32)); ++ IXGBE_WRITE_REG(hw, ++ IXGBE_VLVFB((vlvf_index*2)+1), ++ bits); ++ bits |= IXGBE_READ_REG(hw, ++ IXGBE_VLVFB(vlvf_index*2)); ++ } ++ } ++ ++ /* ++ * If there are still bits set in the VLVFB registers ++ * for the VLAN ID indicated we need to see if the ++ * caller is requesting that we clear the VFTA entry bit. ++ * If the caller has requested that we clear the VFTA ++ * entry bit but there are still pools/VFs using this VLAN ++ * ID entry then ignore the request. We're not worried ++ * about the case where we're turning the VFTA VLAN ID ++ * entry bit on, only when requested to turn it off as ++ * there may be multiple pools and/or VFs using the ++ * VLAN ID entry. In that case we cannot clear the ++ * VFTA bit until all pools/VFs using that VLAN ID have also ++ * been cleared. This will be indicated by "bits" being ++ * zero. ++ */ ++ if (bits) { ++ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), ++ (IXGBE_VLVF_VIEN | vlan)); ++ if (!vlan_on) { ++ /* someone wants to clear the vfta entry ++ * but some pools/VFs are still using it. ++ * Ignore it. */ ++ vfta_changed = false; ++ } ++ } ++ else ++ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); ++ } ++ ++ if (vfta_changed) ++ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_clear_vfta_generic - Clear VLAN filter table ++ * @hw: pointer to hardware structure ++ * ++ * Clears the VLAN filer table, and the VMDq index associated with the filter ++ **/ ++s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) ++{ ++ u32 offset; ++ ++ for (offset = 0; offset < hw->mac.vft_size; offset++) ++ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); ++ ++ for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { ++ IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_check_mac_link_generic - Determine link and speed status ++ * @hw: pointer to hardware structure ++ * @speed: pointer to link speed ++ * @link_up: true when link is up ++ * @link_up_wait_to_complete: bool used to wait for link up or not ++ * ++ * Reads the links register to determine if link is up and the current speed ++ **/ ++s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up, bool link_up_wait_to_complete) ++{ ++ u32 links_reg, links_orig; ++ u32 i; ++ ++ /* clear the old state */ ++ links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); ++ ++ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); ++ ++ if (links_orig != links_reg) { ++ hw_dbg(hw, "LINKS changed from %08X to %08X\n", ++ links_orig, links_reg); ++ } ++ ++ if (link_up_wait_to_complete) { ++ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { ++ if (links_reg & IXGBE_LINKS_UP) { ++ *link_up = true; ++ break; ++ } else { ++ *link_up = false; ++ } ++ msleep(100); ++ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); ++ } ++ } else { ++ if (links_reg & IXGBE_LINKS_UP) ++ *link_up = true; ++ else ++ *link_up = false; ++ } ++ ++ if ((links_reg & IXGBE_LINKS_SPEED_82599) == ++ IXGBE_LINKS_SPEED_10G_82599) ++ *speed = IXGBE_LINK_SPEED_10GB_FULL; ++ else if ((links_reg & IXGBE_LINKS_SPEED_82599) == ++ IXGBE_LINKS_SPEED_1G_82599) ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ else if ((links_reg & IXGBE_LINKS_SPEED_82599) == ++ IXGBE_LINKS_SPEED_100_82599) ++ *speed = IXGBE_LINK_SPEED_100_FULL; ++ else ++ *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ ++ /* if link is down, zero out the current_mode */ ++ if (*link_up == false) { ++ hw->fc.current_mode = ixgbe_fc_none; ++ hw->fc.fc_was_autonegged = false; ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from ++ * the EEPROM ++ * @hw: pointer to hardware structure ++ * @wwnn_prefix: the alternative WWNN prefix ++ * @wwpn_prefix: the alternative WWPN prefix ++ * ++ * This function will read the EEPROM from the alternative SAN MAC address ++ * block to check the support for the alternative WWNN/WWPN prefix support. ++ **/ ++s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, ++ u16 *wwpn_prefix) ++{ ++ u16 offset, caps; ++ u16 alt_san_mac_blk_offset; ++ ++ /* clear output first */ ++ *wwnn_prefix = 0xFFFF; ++ *wwpn_prefix = 0xFFFF; ++ ++ /* check if alternative SAN MAC is supported */ ++ hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, ++ &alt_san_mac_blk_offset); ++ ++ if ((alt_san_mac_blk_offset == 0) || ++ (alt_san_mac_blk_offset == 0xFFFF)) ++ goto wwn_prefix_out; ++ ++ /* check capability in alternative san mac address block */ ++ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; ++ hw->eeprom.ops.read(hw, offset, &caps); ++ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) ++ goto wwn_prefix_out; ++ ++ /* get the corresponding prefix for WWNN/WWPN */ ++ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; ++ hw->eeprom.ops.read(hw, offset, wwnn_prefix); ++ ++ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; ++ hw->eeprom.ops.read(hw, offset, wwpn_prefix); ++ ++wwn_prefix_out: ++ return 0; ++} ++ ++/** ++ * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM ++ * @hw: pointer to hardware structure ++ * @bs: the fcoe boot status ++ * ++ * This function will read the FCOE boot status from the iSCSI FCOE block ++ **/ ++s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) ++{ ++ u16 offset, caps, flags; ++ s32 status; ++ ++ /* clear output first */ ++ *bs = ixgbe_fcoe_bootstatus_unavailable; ++ ++ /* check if FCOE IBA block is present */ ++ offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; ++ status = hw->eeprom.ops.read(hw, offset, &caps); ++ if (status != 0) ++ goto out; ++ ++ if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) ++ goto out; ++ ++ /* check if iSCSI FCOE block is populated */ ++ status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); ++ if (status != 0) ++ goto out; ++ ++ if ((offset == 0) || (offset == 0xFFFF)) ++ goto out; ++ ++ /* read fcoe flags in iSCSI FCOE block */ ++ offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; ++ status = hw->eeprom.ops.read(hw, offset, &flags); ++ if (status != 0) ++ goto out; ++ ++ if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) ++ *bs = ixgbe_fcoe_bootstatus_enabled; ++ else ++ *bs = ixgbe_fcoe_bootstatus_disabled; ++ ++out: ++ return status; ++} ++ ++/** ++ * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow ++ * control ++ * @hw: pointer to hardware structure ++ * ++ * There are several phys that do not support autoneg flow control. This ++ * function check the device id to see if the associated phy supports ++ * autoneg flow control. ++ **/ ++static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) ++{ ++ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_82599_T3_LOM: ++ return 0; ++ default: ++ return IXGBE_ERR_FC_NOT_SUPPORTED; ++ } ++} +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_common.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_common.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_common.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_common.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,105 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_COMMON_H_ ++#define _IXGBE_COMMON_H_ ++ ++#include "ixgbe_type.h" ++ ++u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); ++ ++s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); ++s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); ++s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); ++s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); ++s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); ++s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, ++ u32 *pba_num_size); ++s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); ++s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); ++void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); ++s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); ++ ++s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); ++ ++s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); ++s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); ++s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); ++s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, ++ u16 *data); ++u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); ++s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, ++ u16 *checksum_val); ++s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); ++s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); ++ ++s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, ++ u32 enable_addr); ++s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); ++s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count, ++ ixgbe_mc_addr_itr func); ++s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, ++ u32 addr_count, ixgbe_mc_addr_itr func); ++s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); ++s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); ++s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); ++ ++s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); ++s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num); ++s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); ++ ++s32 ixgbe_validate_mac_addr(u8 *mac_addr); ++s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); ++void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); ++s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); ++ ++s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); ++ ++s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); ++s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); ++ ++s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); ++s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); ++s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); ++s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); ++s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, ++ u32 vind, bool vlan_on); ++s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); ++ ++s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *link_up, bool link_up_wait_to_complete); ++ ++s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, ++ u16 *wwpn_prefix); ++ ++s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs); ++#endif /* IXGBE_COMMON */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82598.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82598.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82598.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82598.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,408 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++#include "ixgbe_type.h" ++#include "ixgbe_dcb.h" ++#include "ixgbe_dcb_82598.h" ++ ++/** ++ * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the status data for each of the Traffic Classes in use. ++ */ ++s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ int tc; ++ ++ if (tc_count > MAX_TRAFFIC_CLASS) ++ return DCB_ERR_PARAM; ++ /* Statistics pertaining to each traffic class */ ++ for (tc = 0; tc < tc_count; tc++) { ++ /* Transmitted Packets */ ++ stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); ++ /* Transmitted Bytes */ ++ stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc)); ++ /* Received Packets */ ++ stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); ++ /* Received Bytes */ ++ stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc)); ++ ++#if 0 ++ /* Can we get rid of these?? Consequently, getting rid ++ * of the tc_stats structure. ++ */ ++ tc_stats_array[up]->in_overflow_discards = 0; ++ tc_stats_array[up]->out_overflow_discards = 0; ++#endif ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the CBFC status data for each of the Traffic Classes. ++ */ ++s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ int tc; ++ ++ if (tc_count > MAX_TRAFFIC_CLASS) ++ return DCB_ERR_PARAM; ++ for (tc = 0; tc < tc_count; tc++) { ++ /* Priority XOFF Transmitted */ ++ stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); ++ /* Priority XOFF Received */ ++ stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc)); ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure packet buffers for DCB mode. ++ */ ++s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ s32 ret_val = 0; ++ u32 value = IXGBE_RXPBSIZE_64KB; ++ u8 i = 0; ++ ++ /* Setup Rx packet buffer sizes */ ++ switch (dcb_config->rx_pba_cfg) { ++ case pba_80_48: ++ /* Setup the first four at 80KB */ ++ value = IXGBE_RXPBSIZE_80KB; ++ for (; i < 4; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value); ++ /* Setup the last four at 48KB...don't re-init i */ ++ value = IXGBE_RXPBSIZE_48KB; ++ /* Fall Through */ ++ case pba_equal: ++ default: ++ for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value); ++ ++ /* Setup Tx packet buffer sizes */ ++ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { ++ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), ++ IXGBE_TXPBSIZE_40KB); ++ } ++ break; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Rx Data Arbiter and credits for each traffic class. ++ */ ++s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ struct tc_bw_alloc *p; ++ u32 reg = 0; ++ u32 credit_refill = 0; ++ u32 credit_max = 0; ++ u8 i = 0; ++ ++ reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; ++ IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); ++ ++ reg = IXGBE_READ_REG(hw, IXGBE_RMCS); ++ /* Enable Arbiter */ ++ reg &= ~IXGBE_RMCS_ARBDIS; ++ /* Enable Receive Recycle within the BWG */ ++ reg |= IXGBE_RMCS_RRM; ++ /* Enable Deficit Fixed Priority arbitration*/ ++ reg |= IXGBE_RMCS_DFP; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); ++ ++ /* Configure traffic class credits and priority */ ++ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; ++ credit_refill = p->data_credits_refill; ++ credit_max = p->data_credits_max; ++ ++ reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); ++ ++ if (p->prio_type == prio_link) ++ reg |= IXGBE_RT2CR_LSP; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); ++ } ++ ++ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); ++ reg |= IXGBE_RDRXCTL_RDMTS_1_2; ++ reg |= IXGBE_RDRXCTL_MPBEN; ++ reg |= IXGBE_RDRXCTL_MCEN; ++ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); ++ ++ reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); ++ /* Make sure there is enough descriptors before arbitration */ ++ reg &= ~IXGBE_RXCTRL_DMBYPS; ++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Tx Descriptor Arbiter and credits for each traffic class. ++ */ ++s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ struct tc_bw_alloc *p; ++ u32 reg, max_credits; ++ u8 i; ++ ++ reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); ++ ++ /* Enable arbiter */ ++ reg &= ~IXGBE_DPMCS_ARBDIS; ++ if (!(dcb_config->round_robin_enable)) { ++ /* Enable DFP and Recycle mode */ ++ reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM); ++ } ++ reg |= IXGBE_DPMCS_TSOEF; ++ /* Configure Max TSO packet size 34KB including payload and headers */ ++ reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); ++ ++ /* Configure traffic class credits and priority */ ++ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; ++ max_credits = dcb_config->tc_config[i].desc_credits_max; ++ reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; ++ reg |= p->data_credits_refill; ++ reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT; ++ ++ if (p->prio_type == prio_group) ++ reg |= IXGBE_TDTQ2TCCR_GSP; ++ ++ if (p->prio_type == prio_link) ++ reg |= IXGBE_TDTQ2TCCR_LSP; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Tx Data Arbiter and credits for each traffic class. ++ */ ++s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ struct tc_bw_alloc *p; ++ u32 reg; ++ u8 i; ++ ++ reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); ++ /* Enable Data Plane Arbiter */ ++ reg &= ~IXGBE_PDPMCS_ARBDIS; ++ /* Enable DFP and Transmit Recycle Mode */ ++ reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); ++ ++ /* Configure traffic class credits and priority */ ++ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; ++ reg = p->data_credits_refill; ++ reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT; ++ reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT; ++ ++ if (p->prio_type == prio_group) ++ reg |= IXGBE_TDPT2TCCR_GSP; ++ ++ if (p->prio_type == prio_link) ++ reg |= IXGBE_TDPT2TCCR_LSP; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); ++ } ++ ++ /* Enable Tx packet buffer division */ ++ reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); ++ reg |= IXGBE_DTXCTL_ENDBUBD; ++ IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_config_pfc_82598 - Config priority flow control ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Priority Flow Control for each traffic class. ++ */ ++s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ u32 reg, rx_pba_size; ++ u8 i; ++ ++ if (!dcb_config->pfc_mode_enable) ++ goto out; ++ ++ /* Enable Transmit Priority Flow Control */ ++ reg = IXGBE_READ_REG(hw, IXGBE_RMCS); ++ reg &= ~IXGBE_RMCS_TFCE_802_3X; ++ /* correct the reporting of our flow control status */ ++ reg |= IXGBE_RMCS_TFCE_PRIORITY; ++ IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); ++ ++ /* Enable Receive Priority Flow Control */ ++ reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++ reg &= ~IXGBE_FCTRL_RFCE; ++ reg |= IXGBE_FCTRL_RPFCE; ++ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); ++ ++ /* ++ * Configure flow control thresholds and enable priority flow control ++ * for each traffic class. ++ */ ++ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ if (dcb_config->rx_pba_cfg == pba_equal) { ++ rx_pba_size = IXGBE_RXPBSIZE_64KB; ++ } else { ++ rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB ++ : IXGBE_RXPBSIZE_48KB; ++ } ++ ++ reg = ((rx_pba_size >> 5) & 0xFFF0); ++ if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || ++ dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) ++ reg |= IXGBE_FCRTL_XONE; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); ++ ++ reg = ((rx_pba_size >> 2) & 0xFFF0); ++ if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || ++ dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) ++ reg |= IXGBE_FCRTH_FCEN; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); ++ } ++ ++ /* Configure pause time */ ++ for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++) ++ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800); ++ ++ /* Configure flow control refresh threshold value */ ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); ++ ++out: ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics ++ * @hw: pointer to hardware structure ++ * ++ * Configure queue statistics registers, all queues belonging to same traffic ++ * class uses a single set of queue statistics counters. ++ */ ++s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) ++{ ++ u32 reg = 0; ++ u8 i = 0; ++ u8 j = 0; ++ ++ /* Receive Queues stats setting - 8 queues per statistics reg */ ++ for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { ++ reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); ++ reg |= ((0x1010101) * j); ++ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); ++ reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); ++ reg |= ((0x1010101) * j); ++ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); ++ } ++ /* Transmit Queues stats setting - 4 queues per statistics reg*/ ++ for (i = 0; i < 8; i++) { ++ reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); ++ reg |= ((0x1010101) * i); ++ IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_hw_config_82598 - Config and enable DCB ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure dcb settings and enable dcb mode. ++ */ ++s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ ++ ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config); ++ ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config); ++ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config); ++ ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); ++ ixgbe_dcb_config_pfc_82598(hw, dcb_config); ++ ixgbe_dcb_config_tc_stats_82598(hw); ++ ++ ++ return 0; ++} +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82598.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82598.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82598.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82598.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,99 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _DCB_82598_CONFIG_H_ ++#define _DCB_82598_CONFIG_H_ ++ ++/* DCB register definitions */ ++ ++#define IXGBE_DPMCS_MTSOS_SHIFT 16 ++#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, ++ * 1 DFP - Deficit Fixed Priority */ ++#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ ++#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ ++#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ ++ ++#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ ++ ++#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ ++#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ ++ ++#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet ++ * buffers enable */ ++#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores ++ * (RSS) enable */ ++ ++#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 ++#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 ++#define IXGBE_TDTQ2TCCR_GSP 0x40000000 ++#define IXGBE_TDTQ2TCCR_LSP 0x80000000 ++ ++#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 ++#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 ++#define IXGBE_TDPT2TCCR_GSP 0x40000000 ++#define IXGBE_TDPT2TCCR_LSP 0x80000000 ++ ++#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, ++ * 1 DFP - Deficit Fixed Priority */ ++#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ ++#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ ++ ++#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ ++ ++#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ ++ ++/* DCB hardware-specific driver APIs */ ++ ++/* DCB PFC functions */ ++s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count); ++ ++/* DCB traffic class stats */ ++s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw); ++s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count); ++ ++/* DCB config arbiters */ ++s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++ ++/* DCB hw initialization */ ++s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *config); ++ ++#endif /* _DCB_82598_CONFIG_H */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82599.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82599.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82599.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82599.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,541 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++#include "ixgbe_type.h" ++#include "ixgbe_dcb.h" ++#include "ixgbe_dcb_82599.h" ++ ++/** ++ * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the status data for each of the Traffic Classes in use. ++ */ ++s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ int tc; ++ ++ if (tc_count > MAX_TRAFFIC_CLASS) ++ return DCB_ERR_PARAM; ++ /* Statistics pertaining to each traffic class */ ++ for (tc = 0; tc < tc_count; tc++) { ++ /* Transmitted Packets */ ++ stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); ++ /* Transmitted Bytes (read low first to prevent missed carry) */ ++ stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc)); ++ stats->qbtc[tc] += ++ (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32); ++ /* Received Packets */ ++ stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); ++ /* Received Bytes (read low first to prevent missed carry) */ ++ stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc)); ++ stats->qbrc[tc] += ++ (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32); ++ ++ /* Received Dropped Packet */ ++ stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc)); ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the CBFC status data for each of the Traffic Classes. ++ */ ++s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ int tc; ++ ++ if (tc_count > MAX_TRAFFIC_CLASS) ++ return DCB_ERR_PARAM; ++ for (tc = 0; tc < tc_count; tc++) { ++ /* Priority XOFF Transmitted */ ++ stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); ++ /* Priority XOFF Received */ ++ stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc)); ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure packet buffers for DCB mode. ++ */ ++s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ s32 ret_val = 0; ++ u32 rxpktsize; ++ u32 maxtxpktsize = IXGBE_TXPBSIZE_MAX; ++ u32 txpktsize; ++ int num_tcs; ++ u8 i = 0; ++ ++ num_tcs = dcb_config->num_tcs.pg_tcs; ++ /* Setup Rx packet buffer sizes */ ++ if (dcb_config->rx_pba_cfg == pba_80_48) { ++ /* ++ * This really means configure the first half of the TCs ++ * (Traffic Classes) to use 5/8 of the Rx packet buffer ++ * space. To determine the size of the buffer for each TC, ++ * multiply the size of the entire packet buffer by 5/8 ++ * then divide by half of the number of TCs. ++ */ ++ rxpktsize = (hw->mac.rx_pb_size * 5 / 8) / (num_tcs / 2); ++ for (i = 0; i < (num_tcs / 2); i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), ++ rxpktsize << IXGBE_RXPBSIZE_SHIFT); ++ ++ /* ++ * The second half of the TCs use the remaining 3/8 ++ * of the Rx packet buffer space. ++ */ ++ rxpktsize = (hw->mac.rx_pb_size * 3 / 8) / (num_tcs / 2); ++ for (; i < num_tcs; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), ++ rxpktsize << IXGBE_RXPBSIZE_SHIFT); ++ } else { ++ /* Divide the Rx packet buffer evenly among the TCs */ ++ rxpktsize = hw->mac.rx_pb_size / num_tcs; ++ for (i = 0; i < num_tcs; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), ++ rxpktsize << IXGBE_RXPBSIZE_SHIFT); ++ } ++ /* Setup remainig TCs, if any, to zero buffer size*/ ++ for (; i < MAX_TRAFFIC_CLASS; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); ++ ++ /* Setup Tx packet buffer and threshold equally for all TCs */ ++ txpktsize = maxtxpktsize/num_tcs; ++ for (i = 0; i < num_tcs; i++) { ++ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); ++ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), ++ ((txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX)); ++ } ++ ++ /* Setup remainig TCs, if any, to zero buffer size*/ ++ for (; i < MAX_TRAFFIC_CLASS; i++) { ++ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Rx Packet Arbiter and credits for each traffic class. ++ */ ++s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ struct tc_bw_alloc *p; ++ u32 reg = 0; ++ u32 credit_refill = 0; ++ u32 credit_max = 0; ++ u8 i = 0; ++ u8 j; ++ ++ /* ++ * Disable the arbiter before changing parameters ++ * (always enable recycle mode; WSP) ++ */ ++ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; ++ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); ++ ++ /* ++ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding ++ * bits sets for the UPs that needs to be mappped to that TC. ++ * e.g if priorities 6 and 7 are to be mapped to a TC then the ++ * up_to_tc_bitmap value for that TC will be 11000000 in binary. ++ */ ++ reg = 0; ++ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; ++ for (j = 0; j < MAX_USER_PRIORITY; j++) { ++ if (p->up_to_tc_bitmap & (1 << j)) ++ reg |= (i << (j * IXGBE_RTRUP2TC_UP_SHIFT)); ++ } ++ } ++ IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); ++ ++ /* Configure traffic class credits and priority */ ++ for (i = 0; i < dcb_config->num_tcs.pg_tcs; i++) { ++ p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; ++ ++ credit_refill = p->data_credits_refill; ++ credit_max = p->data_credits_max; ++ reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); ++ ++ reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT; ++ ++ if (p->prio_type == prio_link) ++ reg |= IXGBE_RTRPT4C_LSP; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); ++ } ++ ++ /* ++ * Configure Rx packet plane (recycle mode; WSP) and ++ * enable arbiter ++ */ ++ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; ++ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Tx Descriptor Arbiter and credits for each traffic class. ++ */ ++s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ struct tc_bw_alloc *p; ++ u32 reg, max_credits; ++ u8 i; ++ ++ /* Clear the per-Tx queue credits; we use per-TC instead */ ++ for (i = 0; i < 128; i++) { ++ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); ++ IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); ++ } ++ ++ /* Configure traffic class credits and priority */ ++ for (i = 0; i < dcb_config->num_tcs.pg_tcs; i++) { ++ p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; ++ max_credits = dcb_config->tc_config[i].desc_credits_max; ++ reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; ++ reg |= p->data_credits_refill; ++ reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT; ++ ++ if (p->prio_type == prio_group) ++ reg |= IXGBE_RTTDT2C_GSP; ++ ++ if (p->prio_type == prio_link) ++ reg |= IXGBE_RTTDT2C_LSP; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); ++ } ++ ++ /* ++ * Configure Tx descriptor plane (recycle mode; WSP) and ++ * enable arbiter ++ */ ++ reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; ++ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Tx Packet Arbiter and credits for each traffic class. ++ */ ++s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ struct tc_bw_alloc *p; ++ u32 reg; ++ u8 i, j; ++ ++ /* ++ * Disable the arbiter before changing parameters ++ * (always enable recycle mode; SP; arb delay) ++ */ ++ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | ++ (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | ++ IXGBE_RTTPCS_ARBDIS; ++ IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); ++ ++ /* ++ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding ++ * bits sets for the UPs that needs to be mappped to that TC. ++ * e.g if priorities 6 and 7 are to be mapped to a TC then the ++ * up_to_tc_bitmap value for that TC will be 11000000 in binary. ++ */ ++ reg = 0; ++ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; ++ for (j = 0; j < MAX_USER_PRIORITY; j++) ++ if (p->up_to_tc_bitmap & (1 << j)) ++ reg |= (i << (j * IXGBE_RTTUP2TC_UP_SHIFT)); ++ } ++ IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); ++ ++ /* Configure traffic class credits and priority */ ++ for (i = 0; i < dcb_config->num_tcs.pg_tcs; i++) { ++ p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; ++ reg = p->data_credits_refill; ++ reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT; ++ reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT; ++ ++ if (p->prio_type == prio_group) ++ reg |= IXGBE_RTTPT2C_GSP; ++ ++ if (p->prio_type == prio_link) ++ reg |= IXGBE_RTTPT2C_LSP; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); ++ } ++ ++ /* ++ * Configure Tx packet plane (recycle mode; SP; arb delay) and ++ * enable arbiter ++ */ ++ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | ++ (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); ++ IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_config_pfc_82599 - Configure priority flow control ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Priority Flow Control (PFC) for each traffic class. ++ */ ++s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ u32 i, reg, rx_pba_size; ++ ++ /* If PFC is disabled globally then fall back to LFC. */ ++ if (!dcb_config->pfc_mode_enable) { ++ for (i = 0; i < dcb_config->num_tcs.pg_tcs; i++) ++ hw->mac.ops.fc_enable(hw, i); ++ goto out; ++ } ++ ++ /* Configure PFC Tx thresholds per TC */ ++ for (i = 0; i < dcb_config->num_tcs.pg_tcs; i++) { ++ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); ++ ++ reg = ((rx_pba_size >> 5) & 0xFFE0); ++ if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || ++ dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) ++ reg |= IXGBE_FCRTL_XONE; ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); ++ ++ reg = ((rx_pba_size >> 2) & 0xFFE0); ++ if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || ++ dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) ++ reg |= IXGBE_FCRTH_FCEN; ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); ++ } ++ ++ /* Configure pause time (2 TCs per register) */ ++ reg = hw->fc.pause_time | (hw->fc.pause_time << 16); ++ for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) ++ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); ++ ++ /* Configure flow control refresh threshold value */ ++ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); ++ ++ /* Enable Transmit PFC */ ++ reg = IXGBE_FCCFG_TFCE_PRIORITY; ++ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); ++ ++ /* ++ * Enable Receive PFC ++ * We will always honor XOFF frames we receive when ++ * we are in PFC mode. ++ */ ++ reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); ++ reg &= ~IXGBE_MFLCN_RFCE; ++ reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; ++ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); ++out: ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics ++ * @hw: pointer to hardware structure ++ * ++ * Configure queue statistics registers, all queues belonging to same traffic ++ * class uses a single set of queue statistics counters. ++ */ ++s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) ++{ ++ u32 reg = 0; ++ u8 i = 0; ++ ++ /* ++ * Receive Queues stats setting ++ * 32 RQSMR registers, each configuring 4 queues. ++ * Set all 16 queues of each TC to the same stat ++ * with TC 'n' going to stat 'n'. ++ */ ++ for (i = 0; i < 32; i++) { ++ reg = 0x01010101 * (i / 4); ++ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); ++ } ++ /* ++ * Transmit Queues stats setting ++ * 32 TQSM registers, each controlling 4 queues. ++ * Set all queues of each TC to the same stat ++ * with TC 'n' going to stat 'n'. ++ * Tx queues are allocated non-uniformly to TCs: ++ * 32, 32, 16, 16, 8, 8, 8, 8. ++ */ ++ for (i = 0; i < 32; i++) { ++ if (i < 8) ++ reg = 0x00000000; ++ else if (i < 16) ++ reg = 0x01010101; ++ else if (i < 20) ++ reg = 0x02020202; ++ else if (i < 24) ++ reg = 0x03030303; ++ else if (i < 26) ++ reg = 0x04040404; ++ else if (i < 28) ++ reg = 0x05050505; ++ else if (i < 30) ++ reg = 0x06060606; ++ else ++ reg = 0x07070707; ++ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_config_82599 - Configure general DCB parameters ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure general DCB parameters. ++ */ ++s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ u32 reg; ++ u32 q; ++ ++ /* Disable the Tx desc arbiter so that MTQC can be changed */ ++ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); ++ reg |= IXGBE_RTTDCS_ARBDIS; ++ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); ++ ++ /* Enable DCB for Rx with 8 TCs */ ++ reg = IXGBE_READ_REG(hw, IXGBE_MRQC); ++ switch (reg & IXGBE_MRQC_MRQE_MASK) { ++ case 0: ++ case IXGBE_MRQC_RT4TCEN: ++ /* RSS disabled cases */ ++ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN; ++ break; ++ case IXGBE_MRQC_RSSEN: ++ case IXGBE_MRQC_RTRSS4TCEN: ++ /* RSS enabled cases */ ++ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN; ++ break; ++ default: ++ /* Unsupported value, assume stale data, overwrite no RSS */ ++ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN; ++ } ++ if (dcb_config->num_tcs.pg_tcs == 4) { ++ /* Enable DCB for Rx with 4 TCs and VT Mode*/ ++ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_VMDQRT4TCEN; ++ } ++ IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); ++ ++ /* Enable DCB for Tx with 8 TCs */ ++ if (dcb_config->num_tcs.pg_tcs == 8) ++ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; ++ else /* Enable DCB for Tx with 4 TCs and VT Mode*/ ++ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_VT_ENA ++ | IXGBE_MTQC_4TC_4TQ; ++ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); ++ ++ /* Disable drop for all queues */ ++ for (q=0; q < 128; q++) { ++ IXGBE_WRITE_REG(hw, IXGBE_QDE, q << IXGBE_QDE_IDX_SHIFT); ++ } ++ ++ /* Enable the Tx desc arbiter */ ++ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); ++ reg &= ~IXGBE_RTTDCS_ARBDIS; ++ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_dcb_hw_config_82599 - Configure and enable DCB ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure dcb settings and enable dcb mode. ++ */ ++s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ ++ ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config); ++ ixgbe_dcb_config_82599(hw, dcb_config); ++ ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config); ++ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config); ++ ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config); ++ ixgbe_dcb_config_pfc_82599(hw, dcb_config); ++ ixgbe_dcb_config_tc_stats_82599(hw); ++ ++ ++ return 0; ++} ++ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82599.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82599.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_82599.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_82599.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,128 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _DCB_82599_CONFIG_H_ ++#define _DCB_82599_CONFIG_H_ ++ ++/* DCB register definitions */ ++#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, ++ * 1 WSP - Weighted Strict Priority ++ */ ++#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, ++ * 1 WRR - Weighted Round Robin ++ */ ++#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ ++#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ ++#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must ++ * clear! ++ */ ++#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ ++ ++/* Receive UP2TC mapping */ ++#define IXGBE_RTRUP2TC_UP_SHIFT 3 ++/* Transmit UP2TC mapping */ ++#define IXGBE_RTTUP2TC_UP_SHIFT 3 ++ ++#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ ++#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ ++#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ ++#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ ++ ++#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet ++ * buffers enable ++ */ ++#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores ++ * (RSS) enable ++ */ ++ ++/* RTRPCS Bit Masks */ ++#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ ++/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ ++#define IXGBE_RTRPCS_RAC 0x00000004 ++#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ ++ ++/* RTTDT2C Bit Masks */ ++#define IXGBE_RTTDT2C_MCL_SHIFT 12 ++#define IXGBE_RTTDT2C_BWG_SHIFT 9 ++#define IXGBE_RTTDT2C_GSP 0x40000000 ++#define IXGBE_RTTDT2C_LSP 0x80000000 ++ ++#define IXGBE_RTTPT2C_MCL_SHIFT 12 ++#define IXGBE_RTTPT2C_BWG_SHIFT 9 ++#define IXGBE_RTTPT2C_GSP 0x40000000 ++#define IXGBE_RTTPT2C_LSP 0x80000000 ++ ++/* RTTPCS Bit Masks */ ++#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, ++ * 1 SP - Strict Priority ++ */ ++#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ ++#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ ++#define IXGBE_RTTPCS_ARBD_SHIFT 22 ++#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ ++ ++#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ ++#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/ ++#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/ ++ ++#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ ++#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ ++ ++ ++/* DCB hardware-specific driver APIs */ ++ ++/* DCB PFC functions */ ++s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count); ++ ++/* DCB traffic class stats */ ++s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw); ++s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count); ++ ++/* DCB config arbiters */ ++s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++ ++/* DCB hw initialization */ ++s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *config); ++ ++#endif /* _DCB_82599_CONFIG_H */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,361 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++#include "ixgbe_type.h" ++#include "ixgbe_dcb.h" ++#include "ixgbe_dcb_82598.h" ++#include "ixgbe_dcb_82599.h" ++ ++/** ++ * ixgbe_dcb_config - Struct containing DCB settings. ++ * @dcb_config: Pointer to DCB config structure ++ * ++ * This function checks DCB rules for DCB settings. ++ * The following rules are checked: ++ * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%. ++ * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth ++ * Group must total 100. ++ * 3. A Traffic Class should not be set to both Link Strict Priority ++ * and Group Strict Priority. ++ * 4. Link strict Bandwidth Groups can only have link strict traffic classes ++ * with zero bandwidth. ++ */ ++s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config) ++{ ++ struct tc_bw_alloc *p; ++ s32 ret_val = 0; ++ u8 i, j, bw = 0, bw_id; ++ u8 bw_sum[2][MAX_BW_GROUP]; ++ bool link_strict[2][MAX_BW_GROUP]; ++ ++ memset(bw_sum, 0, sizeof(bw_sum)); ++ memset(link_strict, 0, sizeof(link_strict)); ++ ++ /* First Tx, then Rx */ ++ for (i = 0; i < 2; i++) { ++ /* Check each traffic class for rule violation */ ++ for (j = 0; j < dcb_config->num_tcs.pg_tcs; j++) { ++ p = &dcb_config->tc_config[j].path[i]; ++ ++ bw = p->bwg_percent; ++ bw_id = p->bwg_id; ++ ++ if (bw_id >= MAX_BW_GROUP) { ++ ret_val = DCB_ERR_CONFIG; ++ goto err_config; ++ } ++ if (p->prio_type == prio_link) { ++ link_strict[i][bw_id] = true; ++ /* Link strict should have zero bandwidth */ ++ if (bw) { ++ ret_val = DCB_ERR_LS_BW_NONZERO; ++ goto err_config; ++ } ++ } else if (!bw) { ++ /* ++ * Traffic classes without link strict ++ * should have non-zero bandwidth. ++ */ ++ ret_val = DCB_ERR_TC_BW_ZERO; ++ goto err_config; ++ } ++ bw_sum[i][bw_id] += bw; ++ } ++ ++ bw = 0; ++ ++ /* Check each bandwidth group for rule violation */ ++ for (j = 0; j < MAX_BW_GROUP; j++) { ++ bw += dcb_config->bw_percentage[i][j]; ++ /* ++ * Sum of bandwidth percentages of all traffic classes ++ * within a Bandwidth Group must total 100 except for ++ * link strict group (zero bandwidth). ++ */ ++ if (link_strict[i][j]) { ++ if (bw_sum[i][j]) { ++ /* ++ * Link strict group should have zero ++ * bandwidth. ++ */ ++ ret_val = DCB_ERR_LS_BWG_NONZERO; ++ goto err_config; ++ } ++ } else if (bw_sum[i][j] != BW_PERCENT && ++ bw_sum[i][j] != 0) { ++ ret_val = DCB_ERR_TC_BW; ++ goto err_config; ++ } ++ } ++ ++ if (bw != BW_PERCENT) { ++ ret_val = DCB_ERR_BW_GROUP; ++ goto err_config; ++ } ++ } ++ ++ return DCB_SUCCESS; ++ ++err_config: ++ hw_dbg(hw, "DCB error code %d while checking %s settings.\n", ++ ret_val, (j == DCB_TX_CONFIG) ? "Tx" : "Rx"); ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits ++ * @ixgbe_dcb_config: Struct containing DCB settings. ++ * @direction: Configuring either Tx or Rx. ++ * ++ * This function calculates the credits allocated to each traffic class. ++ * It should be called only after the rules are checked by ++ * ixgbe_dcb_check_config(). ++ */ ++s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config, ++ u32 max_frame_size, ++ u8 direction) ++{ ++ struct tc_bw_alloc *p; ++ s32 ret_val = 0; ++ /* Initialization values default for Tx settings */ ++ u32 credit_refill = 0; ++ u32 credit_max = 0; ++ u32 minimal_credit_max = 0; ++ u16 link_percentage = 0; ++ u8 bw_percent = 0; ++ u8 i; ++ ++ if (dcb_config == NULL) { ++ ret_val = DCB_ERR_CONFIG; ++ goto out; ++ } ++ ++ /* Find out the link percentage for each TC first */ ++ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ p = &dcb_config->tc_config[i].path[direction]; ++ bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; ++ ++ link_percentage = p->bwg_percent; ++ /* Must be careful of integer division for very small nums */ ++ link_percentage = (link_percentage * bw_percent) / 100; ++ if (p->bwg_percent > 0 && link_percentage == 0) ++ link_percentage = 1; ++ ++ /* Save link_percentage for reference */ ++ p->link_percent = (u8)link_percentage; ++ ++ /* Calculate credit refill and save it */ ++ credit_refill = link_percentage * MINIMUM_CREDIT_REFILL; ++ p->data_credits_refill = (u16)credit_refill; ++ ++ /* Calculate maximum credit for the TC */ ++ credit_max = (link_percentage * MAX_CREDIT) / 100; ++ ++ /* ++ * Adjustment based on rule checking, if the percentage ++ * of a TC is too small, the maximum credit may not be ++ * enough to send out a jumbo frame in data plane arbitration. ++ */ ++ ++ if (credit_max) { ++ minimal_credit_max = (max_frame_size + ++ (DCB_CREDIT_QUANTUM - 1)) / ++ DCB_CREDIT_QUANTUM; ++ ++ if (credit_max < minimal_credit_max) ++ credit_max = minimal_credit_max; ++ } ++ ++ if (direction == DCB_TX_CONFIG) { ++ /* ++ * Adjustment based on rule checking, if the ++ * percentage of a TC is too small, the maximum ++ * credit may not be enough to send out a TSO ++ * packet in descriptor plane arbitration. ++ */ ++ if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_TSO) ++ && (hw->mac.type == ixgbe_mac_82598EB)) ++ credit_max = MINIMUM_CREDIT_FOR_TSO; ++ ++ dcb_config->tc_config[i].desc_credits_max = ++ (u16)credit_max; ++ } ++ ++ p->data_credits_max = (u16)credit_max; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_dcb_get_tc_stats - Returns status of each traffic class ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the status data for each of the Traffic Classes in use. ++ */ ++s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ s32 ret = 0; ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count); ++ else if (hw->mac.type >= ixgbe_mac_82599EB) ++ ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count); ++ return ret; ++} ++ ++/** ++ * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the CBFC status data for each of the Traffic Classes. ++ */ ++s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ s32 ret = 0; ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count); ++ else if (hw->mac.type >= ixgbe_mac_82599EB) ++ ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count); ++ return ret; ++} ++ ++/** ++ * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Rx Data Arbiter and credits for each traffic class. ++ */ ++s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ s32 ret = 0; ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config); ++ else if (hw->mac.type >= ixgbe_mac_82599EB) ++ ret = ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config); ++ return ret; ++} ++ ++/** ++ * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Tx Descriptor Arbiter and credits for each traffic class. ++ */ ++s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ s32 ret = 0; ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config); ++ else if (hw->mac.type >= ixgbe_mac_82599EB) ++ ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config); ++ return ret; ++} ++ ++/** ++ * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Tx Data Arbiter and credits for each traffic class. ++ */ ++s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ s32 ret = 0; ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); ++ else if (hw->mac.type >= ixgbe_mac_82599EB) ++ ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config); ++ return ret; ++} ++ ++/** ++ * ixgbe_dcb_config_pfc - Config priority flow control ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Priority Flow Control for each traffic class. ++ */ ++s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ s32 ret = 0; ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config); ++ else if (hw->mac.type >= ixgbe_mac_82599EB) ++ ret = ixgbe_dcb_config_pfc_82599(hw, dcb_config); ++ return ret; ++} ++ ++/** ++ * ixgbe_dcb_config_tc_stats - Config traffic class statistics ++ * @hw: pointer to hardware structure ++ * ++ * Configure queue statistics registers, all queues belonging to same traffic ++ * class uses a single set of queue statistics counters. ++ */ ++s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw) ++{ ++ s32 ret = 0; ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ ret = ixgbe_dcb_config_tc_stats_82598(hw); ++ else if (hw->mac.type >= ixgbe_mac_82599EB) ++ ret = ixgbe_dcb_config_tc_stats_82599(hw); ++ return ret; ++} ++ ++/** ++ * ixgbe_dcb_hw_config - Config and enable DCB ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure dcb settings and enable dcb mode. ++ */ ++s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ s32 ret = 0; ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); ++ else if (hw->mac.type >= ixgbe_mac_82599EB) ++ ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); ++ return ret; ++} +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,193 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _DCB_CONFIG_H_ ++#define _DCB_CONFIG_H_ ++ ++#include "ixgbe_type.h" ++ ++/* DCB data structures */ ++ ++#define IXGBE_MAX_PACKET_BUFFERS 8 ++#define MAX_USER_PRIORITY 8 ++#define MAX_TRAFFIC_CLASS 8 ++#define MAX_BW_GROUP 8 ++#define BW_PERCENT 100 ++ ++#define DCB_TX_CONFIG 0 ++#define DCB_RX_CONFIG 1 ++ ++/* DCB error Codes */ ++#define DCB_SUCCESS 0 ++#define DCB_ERR_CONFIG -1 ++#define DCB_ERR_PARAM -2 ++ ++/* Transmit and receive Errors */ ++/* Error in bandwidth group allocation */ ++#define DCB_ERR_BW_GROUP -3 ++/* Error in traffic class bandwidth allocation */ ++#define DCB_ERR_TC_BW -4 ++/* Traffic class has both link strict and group strict enabled */ ++#define DCB_ERR_LS_GS -5 ++/* Link strict traffic class has non zero bandwidth */ ++#define DCB_ERR_LS_BW_NONZERO -6 ++/* Link strict bandwidth group has non zero bandwidth */ ++#define DCB_ERR_LS_BWG_NONZERO -7 ++/* Traffic class has zero bandwidth */ ++#define DCB_ERR_TC_BW_ZERO -8 ++ ++#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF ++ ++struct dcb_pfc_tc_debug { ++ u8 tc; ++ u8 pause_status; ++ u64 pause_quanta; ++}; ++ ++enum strict_prio_type { ++ prio_none = 0, ++ prio_group, ++ prio_link ++}; ++ ++/* DCB capability definitions */ ++#define IXGBE_DCB_PG_SUPPORT 0x00000001 ++#define IXGBE_DCB_PFC_SUPPORT 0x00000002 ++#define IXGBE_DCB_BCN_SUPPORT 0x00000004 ++#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 ++#define IXGBE_DCB_GSP_SUPPORT 0x00000010 ++ ++#define IXGBE_DCB_8_TC_SUPPORT 0x80 ++ ++struct dcb_support { ++ /* DCB capabilities */ ++ u32 capabilities; ++ ++ /* Each bit represents a number of TCs configurable in the hw. ++ * If 8 traffic classes can be configured, the value is 0x80. ++ */ ++ u8 traffic_classes; ++ u8 pfc_traffic_classes; ++}; ++ ++/* Traffic class bandwidth allocation per direction */ ++struct tc_bw_alloc { ++ u8 bwg_id; /* Bandwidth Group (BWG) ID */ ++ u8 bwg_percent; /* % of BWG's bandwidth */ ++ u8 link_percent; /* % of link bandwidth */ ++ u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ ++ u16 data_credits_refill; /* Credit refill amount in 64B granularity */ ++ u16 data_credits_max; /* Max credits for a configured packet buffer ++ * in 64B granularity.*/ ++ enum strict_prio_type prio_type; /* Link or Group Strict Priority */ ++}; ++ ++enum dcb_pfc_type { ++ pfc_disabled = 0, ++ pfc_enabled_full, ++ pfc_enabled_tx, ++ pfc_enabled_rx ++}; ++ ++/* Traffic class configuration */ ++struct tc_configuration { ++ struct tc_bw_alloc path[2]; /* One each for Tx/Rx */ ++ enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */ ++ ++ u16 desc_credits_max; /* For Tx Descriptor arbitration */ ++ u8 tc; /* Traffic class (TC) */ ++}; ++ ++enum dcb_rx_pba_cfg { ++ pba_equal, /* PBA[0-7] each use 64KB FIFO */ ++ pba_80_48 /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */ ++}; ++ ++struct dcb_num_tcs { ++ u8 pg_tcs; ++ u8 pfc_tcs; ++}; ++ ++struct ixgbe_dcb_config { ++ struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; ++ struct dcb_support support; ++ struct dcb_num_tcs num_tcs; ++ u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ ++ bool pfc_mode_enable; ++ bool round_robin_enable; ++ ++ enum dcb_rx_pba_cfg rx_pba_cfg; ++ ++ u32 dcb_cfg_version; /* Not used...OS-specific? */ ++ u32 link_speed; /* For bandwidth allocation validation purpose */ ++}; ++ ++/* DCB driver APIs */ ++ ++/* DCB rule checking function.*/ ++s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config); ++ ++/* DCB credits calculation */ ++s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *config, ++ u32 max_frame_size, ++ u8 direction); ++ ++/* DCB PFC functions */ ++s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, ++ u8 tc_count); ++ ++/* DCB traffic class stats */ ++s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *); ++s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, ++ u8 tc_count); ++ ++/* DCB config arbiters */ ++s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config); ++ ++/* DCB hw initialization */ ++s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, struct ixgbe_dcb_config *config); ++ ++ ++/* DCB definitions for credit calculation */ ++#define DCB_CREDIT_QUANTUM 64 ++#define MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */ ++#define MINIMUM_CREDIT_REFILL 2 /* 2 * 64B = 128B */ ++#define DCB_MAX_TSO_SIZE (32 * 1024) /* MAX TSO packet size supported ++ * in DCB mode */ ++/* 513 for 32KB TSO packet */ ++#define MINIMUM_CREDIT_FOR_TSO ((DCB_MAX_TSO_SIZE / DCB_CREDIT_QUANTUM) + 1) ++#define MAX_CREDIT (2 * MAX_CREDIT_REFILL) ++ ++#endif /* _DCB_CONFIG_H */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_nl.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_nl.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_dcb_nl.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_dcb_nl.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,1923 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe.h" ++ ++#ifdef CONFIG_DCB ++#include ++#include "ixgbe_dcb_82598.h" ++#include "ixgbe_dcb_82599.h" ++#else ++#include ++#include ++#include ++#include ++#endif ++ ++/* Callbacks for DCB netlink in the kernel */ ++#define BIT_DCB_MODE 0x01 ++#define BIT_PFC 0x02 ++#define BIT_PG_RX 0x04 ++#define BIT_PG_TX 0x08 ++#define BIT_APP_UPCHG 0x10 ++#define BIT_RESETLINK 0x40 ++#define BIT_LINKSPEED 0x80 ++ ++/* Responses for the DCB_C_SET_ALL command */ ++#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ ++#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ ++#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ ++ ++#ifndef CONFIG_DCB ++/* DCB configuration commands */ ++enum { ++ DCB_C_UNDEFINED, ++ DCB_C_GSTATE, ++ DCB_C_SSTATE, ++ DCB_C_PG_STATS, ++ DCB_C_PGTX_GCFG, ++ DCB_C_PGTX_SCFG, ++ DCB_C_PGRX_GCFG, ++ DCB_C_PGRX_SCFG, ++ DCB_C_PFC_GCFG, ++ DCB_C_PFC_SCFG, ++ DCB_C_PFC_STATS, ++ DCB_C_GLINK_SPD, ++ DCB_C_SLINK_SPD, ++ DCB_C_SET_ALL, ++ DCB_C_GPERM_HWADDR, ++ __DCB_C_ENUM_MAX, ++}; ++ ++#define IXGBE_DCB_C_MAX (__DCB_C_ENUM_MAX - 1) ++ ++/* DCB configuration attributes */ ++enum { ++ DCB_A_UNDEFINED = 0, ++ DCB_A_IFNAME, ++ DCB_A_STATE, ++ DCB_A_PFC_STATS, ++ DCB_A_PFC_CFG, ++ DCB_A_PG_STATS, ++ DCB_A_PG_CFG, ++ DCB_A_LINK_SPD, ++ DCB_A_SET_ALL, ++ DCB_A_PERM_HWADDR, ++ __DCB_A_ENUM_MAX, ++}; ++ ++#define IXGBE_DCB_A_MAX (__DCB_A_ENUM_MAX - 1) ++ ++/* PERM HWADDR attributes */ ++enum { ++ PERM_HW_A_UNDEFINED, ++ PERM_HW_A_0, ++ PERM_HW_A_1, ++ PERM_HW_A_2, ++ PERM_HW_A_3, ++ PERM_HW_A_4, ++ PERM_HW_A_5, ++ PERM_HW_A_ALL, ++ __PERM_HW_A_ENUM_MAX, ++}; ++ ++#define IXGBE_DCB_PERM_HW_A_MAX (__PERM_HW_A_ENUM_MAX - 1) ++ ++/* PFC configuration attributes */ ++enum { ++ PFC_A_UP_UNDEFINED, ++ PFC_A_UP_0, ++ PFC_A_UP_1, ++ PFC_A_UP_2, ++ PFC_A_UP_3, ++ PFC_A_UP_4, ++ PFC_A_UP_5, ++ PFC_A_UP_6, ++ PFC_A_UP_7, ++ PFC_A_UP_MAX, /* Used as an iterator cap */ ++ PFC_A_UP_ALL, ++ __PFC_A_UP_ENUM_MAX, ++}; ++ ++#define IXGBE_DCB_PFC_A_UP_MAX (__PFC_A_UP_ENUM_MAX - 1) ++ ++/* Priority Group Traffic Class and Bandwidth Group ++ * configuration attributes ++ */ ++enum { ++ PG_A_UNDEFINED, ++ PG_A_TC_0, ++ PG_A_TC_1, ++ PG_A_TC_2, ++ PG_A_TC_3, ++ PG_A_TC_4, ++ PG_A_TC_5, ++ PG_A_TC_6, ++ PG_A_TC_7, ++ PG_A_TC_MAX, /* Used as an iterator cap */ ++ PG_A_TC_ALL, ++ PG_A_BWG_0, ++ PG_A_BWG_1, ++ PG_A_BWG_2, ++ PG_A_BWG_3, ++ PG_A_BWG_4, ++ PG_A_BWG_5, ++ PG_A_BWG_6, ++ PG_A_BWG_7, ++ PG_A_BWG_MAX, /* Used as an iterator cap */ ++ PG_A_BWG_ALL, ++ __PG_A_ENUM_MAX, ++}; ++ ++#define IXGBE_DCB_PG_A_MAX (__PG_A_ENUM_MAX - 1) ++ ++enum { ++ TC_A_PARAM_UNDEFINED, ++ TC_A_PARAM_STRICT_PRIO, ++ TC_A_PARAM_BW_GROUP_ID, ++ TC_A_PARAM_BW_PCT_IN_GROUP, ++ TC_A_PARAM_UP_MAPPING, ++ TC_A_PARAM_MAX, /* Used as an iterator cap */ ++ TC_A_PARAM_ALL, ++ __TC_A_PARAM_ENUM_MAX, ++}; ++ ++#define IXGBE_DCB_TC_A_PARAM_MAX (__TC_A_PARAM_ENUM_MAX - 1) ++ ++#define DCB_PROTO_VERSION 0x1 ++#define is_pci_device(dev) ((dev)->bus == &pci_bus_type) ++ ++static struct genl_family dcb_family = { ++ .id = GENL_ID_GENERATE, ++ .hdrsize = 0, ++ .name = "IXGBE_DCB", ++ .version = DCB_PROTO_VERSION, ++ .maxattr = IXGBE_DCB_A_MAX, ++}; ++ ++/* DCB NETLINK attributes policy */ ++static struct nla_policy dcb_genl_policy[IXGBE_DCB_A_MAX + 1] = { ++ [DCB_A_IFNAME] = {.type = NLA_STRING, .len = IFNAMSIZ - 1}, ++ [DCB_A_STATE] = {.type = NLA_U8}, ++ [DCB_A_PG_CFG] = {.type = NLA_NESTED}, ++ [DCB_A_PFC_CFG] = {.type = NLA_NESTED}, ++ [DCB_A_PFC_STATS] = {.type = NLA_NESTED}, ++ [DCB_A_PG_STATS] = {.type = NLA_NESTED}, ++ [DCB_A_LINK_SPD] = {.type = NLA_U8}, ++ [DCB_A_SET_ALL] = {.type = NLA_U8}, ++ [DCB_A_PERM_HWADDR] = {.type = NLA_NESTED}, ++}; ++ ++/* DCB_A_PERM_HWADDR nested attributes... an array. */ ++static struct nla_policy dcb_perm_hwaddr_nest[IXGBE_DCB_PERM_HW_A_MAX + 1] = { ++ [PERM_HW_A_0] = {.type = NLA_U8}, ++ [PERM_HW_A_1] = {.type = NLA_U8}, ++ [PERM_HW_A_2] = {.type = NLA_U8}, ++ [PERM_HW_A_3] = {.type = NLA_U8}, ++ [PERM_HW_A_4] = {.type = NLA_U8}, ++ [PERM_HW_A_5] = {.type = NLA_U8}, ++ [PERM_HW_A_ALL] = {.type = NLA_FLAG}, ++}; ++ ++/* DCB_A_PFC_CFG nested attributes...like an array. */ ++static struct nla_policy dcb_pfc_up_nest[IXGBE_DCB_PFC_A_UP_MAX + 1] = { ++ [PFC_A_UP_0] = {.type = NLA_U8}, ++ [PFC_A_UP_1] = {.type = NLA_U8}, ++ [PFC_A_UP_2] = {.type = NLA_U8}, ++ [PFC_A_UP_3] = {.type = NLA_U8}, ++ [PFC_A_UP_4] = {.type = NLA_U8}, ++ [PFC_A_UP_5] = {.type = NLA_U8}, ++ [PFC_A_UP_6] = {.type = NLA_U8}, ++ [PFC_A_UP_7] = {.type = NLA_U8}, ++ [PFC_A_UP_ALL] = {.type = NLA_FLAG}, ++}; ++ ++/* DCB_A_PG_CFG nested attributes...like a struct. */ ++static struct nla_policy dcb_pg_nest[IXGBE_DCB_PG_A_MAX + 1] = { ++ [PG_A_TC_0] = {.type = NLA_NESTED}, ++ [PG_A_TC_1] = {.type = NLA_NESTED}, ++ [PG_A_TC_2] = {.type = NLA_NESTED}, ++ [PG_A_TC_3] = {.type = NLA_NESTED}, ++ [PG_A_TC_4] = {.type = NLA_NESTED}, ++ [PG_A_TC_5] = {.type = NLA_NESTED}, ++ [PG_A_TC_6] = {.type = NLA_NESTED}, ++ [PG_A_TC_7] = {.type = NLA_NESTED}, ++ [PG_A_TC_ALL] = {.type = NLA_NESTED}, ++ [PG_A_BWG_0] = {.type = NLA_U8}, ++ [PG_A_BWG_1] = {.type = NLA_U8}, ++ [PG_A_BWG_2] = {.type = NLA_U8}, ++ [PG_A_BWG_3] = {.type = NLA_U8}, ++ [PG_A_BWG_4] = {.type = NLA_U8}, ++ [PG_A_BWG_5] = {.type = NLA_U8}, ++ [PG_A_BWG_6] = {.type = NLA_U8}, ++ [PG_A_BWG_7] = {.type = NLA_U8}, ++ [PG_A_BWG_ALL]= {.type = NLA_FLAG}, ++}; ++ ++/* TC_A_CLASS_X nested attributes. */ ++static struct nla_policy dcb_tc_param_nest[IXGBE_DCB_TC_A_PARAM_MAX + 1] = { ++ [TC_A_PARAM_STRICT_PRIO] = {.type = NLA_U8}, ++ [TC_A_PARAM_BW_GROUP_ID] = {.type = NLA_U8}, ++ [TC_A_PARAM_BW_PCT_IN_GROUP] = {.type = NLA_U8}, ++ [TC_A_PARAM_UP_MAPPING] = {.type = NLA_U8}, ++ [TC_A_PARAM_ALL] = {.type = NLA_FLAG}, ++}; ++ ++static int ixgbe_dcb_check_adapter(struct net_device *netdev) ++{ ++ struct device *busdev; ++ struct pci_dev *pcidev; ++ ++ busdev = netdev->dev.parent; ++ if (!busdev) ++ return -EINVAL; ++ ++ if (!is_pci_device(busdev)) ++ return -EINVAL; ++ ++ pcidev = to_pci_dev(busdev); ++ if (!pcidev) ++ return -EINVAL; ++ ++ if (ixgbe_is_ixgbe(pcidev)) ++ return 0; ++ else ++ return -EINVAL; ++} ++#endif ++ ++#ifdef CONFIG_DCB ++int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, ++ struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) ++{ ++ struct tc_configuration *src_tc_cfg = NULL; ++ struct tc_configuration *dst_tc_cfg = NULL; ++ int i; ++ ++ if (!src_dcb_cfg || !dst_dcb_cfg) ++ return -EINVAL; ++ ++ for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { ++ src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; ++ dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; ++ ++ dst_tc_cfg->path[DCB_TX_CONFIG].prio_type = ++ src_tc_cfg->path[DCB_TX_CONFIG].prio_type; ++ ++ dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id = ++ src_tc_cfg->path[DCB_TX_CONFIG].bwg_id; ++ ++ dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent = ++ src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent; ++ ++ dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap = ++ src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap; ++ ++ dst_tc_cfg->path[DCB_RX_CONFIG].prio_type = ++ src_tc_cfg->path[DCB_RX_CONFIG].prio_type; ++ ++ dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id = ++ src_tc_cfg->path[DCB_RX_CONFIG].bwg_id; ++ ++ dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent = ++ src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent; ++ ++ dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap = ++ src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap; ++ } ++ ++ for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { ++ dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG] ++ [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage ++ [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; ++ dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG] ++ [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage ++ [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; ++ } ++ ++ for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { ++ dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc = ++ src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; ++ } ++ dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable; ++ ++ return 0; ++} ++#else ++int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, ++ struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) ++{ ++ struct tc_configuration *src_tc_cfg = NULL; ++ struct tc_configuration *dst_tc_cfg = NULL; ++ int i; ++ ++ if (!src_dcb_cfg || !dst_dcb_cfg) ++ return -EINVAL; ++ ++ dst_dcb_cfg->link_speed = src_dcb_cfg->link_speed; ++ ++ for (i = PG_A_TC_0; i < tc_max + PG_A_TC_0; i++) { ++ src_tc_cfg = &src_dcb_cfg->tc_config[i - PG_A_TC_0]; ++ dst_tc_cfg = &dst_dcb_cfg->tc_config[i - PG_A_TC_0]; ++ ++ dst_tc_cfg->path[DCB_TX_CONFIG].prio_type = ++ src_tc_cfg->path[DCB_TX_CONFIG].prio_type; ++ ++ dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id = ++ src_tc_cfg->path[DCB_TX_CONFIG].bwg_id; ++ ++ dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent = ++ src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent; ++ ++ dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap = ++ src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap; ++ ++ dst_tc_cfg->path[DCB_RX_CONFIG].prio_type = ++ src_tc_cfg->path[DCB_RX_CONFIG].prio_type; ++ ++ dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id = ++ src_tc_cfg->path[DCB_RX_CONFIG].bwg_id; ++ ++ dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent = ++ src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent; ++ ++ dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap = ++ src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap; ++ } ++ ++ for (i = PG_A_BWG_0; i < PG_A_BWG_MAX; i++) { ++ dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG][i - PG_A_BWG_0] = ++ src_dcb_cfg->bw_percentage[DCB_TX_CONFIG][i - PG_A_BWG_0]; ++ dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG][i - PG_A_BWG_0] = ++ src_dcb_cfg->bw_percentage[DCB_RX_CONFIG][i - PG_A_BWG_0]; ++ } ++ ++ for (i = PFC_A_UP_0; i < PFC_A_UP_MAX; i++) { ++ dst_dcb_cfg->tc_config[i - PFC_A_UP_0].dcb_pfc = ++ src_dcb_cfg->tc_config[i - PFC_A_UP_0].dcb_pfc; ++ } ++ ++ return 0; ++} ++ ++static int ixgbe_nl_reply(u8 value, u8 cmd, u8 attr, struct genl_info *info) ++{ ++ struct sk_buff *dcb_skb = NULL; ++ void *data; ++ int ret; ++ ++ dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); ++ if (!dcb_skb) ++ return -EINVAL; ++ ++ data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0, cmd); ++ if (!data) ++ goto err; ++ ++ ret = nla_put_u8(dcb_skb, attr, value); ++ if (ret) ++ goto err; ++ ++ /* end the message, assign the nlmsg_len. */ ++ genlmsg_end(dcb_skb, data); ++ ret = genlmsg_reply(dcb_skb, info); ++ if (ret) ++ goto err; ++ ++ return 0; ++ ++err: ++ kfree(dcb_skb); ++ return -EINVAL; ++} ++#endif ++ ++#ifdef CONFIG_DCB ++static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); ++} ++ ++static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) ++{ ++ u8 err = 0; ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ if (state > 0) { ++ /* Turn on DCB */ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) ++ goto out; ++ ++ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { ++ DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n"); ++ err = 1; ++ goto out; ++ } ++ ++ if (netif_running(netdev)) ++#ifdef HAVE_NET_DEVICE_OPS ++ netdev->netdev_ops->ndo_stop(netdev); ++#else ++ netdev->stop(netdev); ++#endif ++ ixgbe_clear_interrupt_scheme(adapter); ++ if (adapter->hw.mac.type == ixgbe_mac_82598EB) { ++ adapter->last_lfc_mode = adapter->hw.fc.current_mode; ++ adapter->hw.fc.requested_mode = ixgbe_fc_none; ++ } ++ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; ++ if (adapter->hw.mac.type == ixgbe_mac_82599EB) { ++ DPRINTK(DRV, INFO, "DCB enabled, " ++ "disabling Flow Director\n"); ++ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ } ++ adapter->flags |= IXGBE_FLAG_DCB_ENABLED; ++ ixgbe_init_interrupt_scheme(adapter); ++ if (netif_running(netdev)) ++#ifdef HAVE_NET_DEVICE_OPS ++ netdev->netdev_ops->ndo_open(netdev); ++#else ++ netdev->open(netdev); ++#endif ++ } else { ++ /* Turn off DCB */ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ if (netif_running(netdev)) ++#ifdef HAVE_NET_DEVICE_OPS ++ netdev->netdev_ops->ndo_stop(netdev); ++#else ++ netdev->stop(netdev); ++#endif ++ ixgbe_clear_interrupt_scheme(adapter); ++ adapter->hw.fc.requested_mode = adapter->last_lfc_mode; ++ adapter->temp_dcb_cfg.pfc_mode_enable = false; ++ adapter->dcb_cfg.pfc_mode_enable = false; ++ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; ++ adapter->flags |= IXGBE_FLAG_RSS_ENABLED; ++ if (adapter->hw.mac.type == ixgbe_mac_82599EB) ++ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ ixgbe_init_interrupt_scheme(adapter); ++ if (netif_running(netdev)) ++#ifdef HAVE_NET_DEVICE_OPS ++ netdev->netdev_ops->ndo_open(netdev); ++#else ++ netdev->open(netdev); ++#endif ++ } ++ } ++out: ++ return err; ++} ++#else ++static int ixgbe_dcb_gstate(struct sk_buff *skb, struct genl_info *info) ++{ ++ int ret = -ENOMEM; ++ struct net_device *netdev = NULL; ++ struct ixgbe_adapter *adapter = NULL; ++ ++ if (!info->attrs[DCB_A_IFNAME]) ++ return -EINVAL; ++ ++ netdev = dev_get_by_name(&init_net, ++ nla_data(info->attrs[DCB_A_IFNAME])); ++ if (!netdev) ++ return -EINVAL; ++ ++ ret = ixgbe_dcb_check_adapter(netdev); ++ if (ret) ++ goto err_out; ++ else ++ adapter = netdev_priv(netdev); ++ ++ ret = ixgbe_nl_reply(!!(adapter->flags & IXGBE_FLAG_DCB_ENABLED), ++ DCB_C_GSTATE, DCB_A_STATE, info); ++ if (ret) ++ goto err_out; ++ ++err_out: ++ dev_put(netdev); ++ return ret; ++} ++ ++static int ixgbe_dcb_sstate(struct sk_buff *skb, struct genl_info *info) ++{ ++ struct net_device *netdev = NULL; ++ struct ixgbe_adapter *adapter = NULL; ++ int ret = -EINVAL; ++ u8 value; ++ ++ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_STATE]) ++ goto err; ++ ++ netdev = dev_get_by_name(&init_net, ++ nla_data(info->attrs[DCB_A_IFNAME])); ++ if (!netdev) ++ goto err; ++ ++ ret = ixgbe_dcb_check_adapter(netdev); ++ if (ret) ++ goto err_out; ++ else ++ adapter = netdev_priv(netdev); ++ ++ value = nla_get_u8(info->attrs[DCB_A_STATE]); ++ if ((value & 1) != value) { ++ DPRINTK(DRV, ERR, "Value is not 1 or 0, it is %d.\n", value); ++ } else { ++ switch (value) { ++ case 0: ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ if (netdev->flags & IFF_UP) ++#ifdef HAVE_NET_DEVICE_OPS ++ netdev->netdev_ops->ndo_stop(netdev); ++#else ++ netdev->stop(netdev); ++#endif ++ ixgbe_clear_interrupt_scheme(adapter); ++ ++ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; ++ if (adapter->flags & IXGBE_FLAG_RSS_CAPABLE) ++ adapter->flags |= ++ IXGBE_FLAG_RSS_ENABLED; ++ ixgbe_init_interrupt_scheme(adapter); ++ ixgbe_reset(adapter); ++ if (netdev->flags & IFF_UP) ++#ifdef HAVE_NET_DEVICE_OPS ++ netdev->netdev_ops->ndo_open(netdev); ++#else ++ netdev->open(netdev); ++#endif ++ break; ++ } else { ++ /* Nothing to do, already off */ ++ goto out; ++ } ++ case 1: ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ /* Nothing to do, already on */ ++ goto out; ++ } else if (!(adapter->flags & IXGBE_FLAG_DCB_CAPABLE)) { ++ DPRINTK(DRV, ERR, "Enable failed. Make sure " ++ "the driver can enable MSI-X.\n"); ++ ret = -EINVAL; ++ goto err_out; ++ } else { ++ if (netdev->flags & IFF_UP) ++#ifdef HAVE_NET_DEVICE_OPS ++ netdev->netdev_ops->ndo_stop(netdev); ++#else ++ netdev->stop(netdev); ++#endif ++ ixgbe_clear_interrupt_scheme(adapter); ++ ++ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; ++ adapter->flags |= IXGBE_FLAG_DCB_ENABLED; ++ adapter->dcb_cfg.support.capabilities = ++ (IXGBE_DCB_PG_SUPPORT | IXGBE_DCB_PFC_SUPPORT | ++ IXGBE_DCB_GSP_SUPPORT); ++ if (adapter->hw.mac.type == ixgbe_mac_82599EB) { ++ DPRINTK(DRV, INFO, "DCB enabled, " ++ "disabling Flow Director\n"); ++ adapter->flags &= ++ ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ adapter->flags &= ++ ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ adapter->dcb_cfg.support.capabilities |= ++ IXGBE_DCB_UP2TC_SUPPORT; ++ } ++ adapter->ring_feature[RING_F_DCB].indices = 8; ++ ixgbe_init_interrupt_scheme(adapter); ++ ixgbe_reset(adapter); ++ if (netdev->flags & IFF_UP) ++#ifdef HAVE_NET_DEVICE_OPS ++ netdev->netdev_ops->ndo_open(netdev); ++#else ++ netdev->open(netdev); ++#endif ++ break; ++ } ++ } ++ } ++ ++out: ++ ret = ixgbe_nl_reply(0, DCB_C_SSTATE, DCB_A_STATE, info); ++ if (ret) ++ goto err_out; ++ ++err_out: ++ dev_put(netdev); ++err: ++ return ret; ++} ++ ++static int ixgbe_dcb_glink_spd(struct sk_buff *skb, struct genl_info *info) ++{ ++ int ret = -ENOMEM; ++ struct net_device *netdev = NULL; ++ struct ixgbe_adapter *adapter = NULL; ++ ++ if (!info->attrs[DCB_A_IFNAME]) ++ return -EINVAL; ++ ++ netdev = dev_get_by_name(&init_net, ++ nla_data(info->attrs[DCB_A_IFNAME])); ++ if (!netdev) ++ return -EINVAL; ++ ++ ret = ixgbe_dcb_check_adapter(netdev); ++ if (ret) ++ goto err_out; ++ else ++ adapter = netdev_priv(netdev); ++ ++ ret = ixgbe_nl_reply(adapter->dcb_cfg.link_speed & 0xff, ++ DCB_C_GLINK_SPD, DCB_A_LINK_SPD, info); ++ if (ret) ++ goto err_out; ++ ++err_out: ++ dev_put(netdev); ++ return ret; ++} ++ ++static int ixgbe_dcb_slink_spd(struct sk_buff *skb, struct genl_info *info) ++{ ++ struct net_device *netdev = NULL; ++ struct ixgbe_adapter *adapter = NULL; ++ int ret = -EINVAL; ++ u8 value; ++ ++ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_LINK_SPD]) ++ goto err; ++ ++ netdev = dev_get_by_name(&init_net, ++ nla_data(info->attrs[DCB_A_IFNAME])); ++ if (!netdev) ++ goto err; ++ ++ ret = ixgbe_dcb_check_adapter(netdev); ++ if (ret) ++ goto err_out; ++ else ++ adapter = netdev_priv(netdev); ++ ++ value = nla_get_u8(info->attrs[DCB_A_LINK_SPD]); ++ if (value > 9) { ++ DPRINTK(DRV, ERR, "Value is not 0 thru 9, it is %d.\n", value); ++ } else { ++ if (!adapter->dcb_set_bitmap && ++ ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, ++ adapter->ring_feature[RING_F_DCB].indices)) { ++ ret = -EINVAL; ++ goto err_out; ++ } ++ ++ adapter->temp_dcb_cfg.link_speed = value; ++ adapter->dcb_set_bitmap |= BIT_LINKSPEED; ++ } ++ ++ ret = ixgbe_nl_reply(0, DCB_C_SLINK_SPD, DCB_A_LINK_SPD, info); ++ if (ret) ++ goto err_out; ++ ++err_out: ++ dev_put(netdev); ++err: ++ return ret; ++} ++#endif ++ ++#ifdef CONFIG_DCB ++static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, ++ u8 *perm_addr) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ int i, j; ++ ++ memset(perm_addr, 0xff, MAX_ADDR_LEN); ++ ++ for (i = 0; i < netdev->addr_len; i++) ++ perm_addr[i] = adapter->hw.mac.perm_addr[i]; ++ ++ if (adapter->hw.mac.type == ixgbe_mac_82599EB) { ++ for (j = 0; j < netdev->addr_len; j++, i++) ++ perm_addr[i] = adapter->hw.mac.san_addr[j]; ++ } ++} ++#else ++static int ixgbe_dcb_gperm_hwaddr(struct sk_buff *skb, struct genl_info *info) ++{ ++ void *data; ++ struct sk_buff *dcb_skb = NULL; ++ struct nlattr *tb[IXGBE_DCB_PERM_HW_A_MAX + 1], *nest; ++ struct net_device *netdev = NULL; ++ struct ixgbe_adapter *adapter = NULL; ++ struct ixgbe_hw *hw = NULL; ++ int ret = -ENOMEM; ++ int i; ++ ++ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PERM_HWADDR]) ++ return -EINVAL; ++ ++ netdev = dev_get_by_name(&init_net, ++ nla_data(info->attrs[DCB_A_IFNAME])); ++ if (!netdev) ++ return -EINVAL; ++ ++ ret = ixgbe_dcb_check_adapter(netdev); ++ if (ret) ++ goto err_out; ++ else ++ adapter = netdev_priv(netdev); ++ ++ hw = &adapter->hw; ++ ++ ret = nla_parse_nested(tb, IXGBE_DCB_PERM_HW_A_MAX, ++ info->attrs[DCB_A_PERM_HWADDR], ++ dcb_perm_hwaddr_nest); ++ if (ret) ++ goto err; ++ ++ dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); ++ if (!dcb_skb) ++ goto err; ++ ++ data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0, ++ DCB_C_GPERM_HWADDR); ++ if (!data) ++ goto err; ++ ++ nest = nla_nest_start(dcb_skb, DCB_A_PERM_HWADDR); ++ if (!nest) ++ goto err; ++ ++ for (i = 0; i < netdev->addr_len; i++) { ++ if (!tb[i+PERM_HW_A_0] && !tb[PERM_HW_A_ALL]) ++ goto err; ++ ++ ret = nla_put_u8(dcb_skb, DCB_A_PERM_HWADDR, ++ hw->mac.perm_addr[i]); ++ ++ if (ret) { ++ nla_nest_cancel(dcb_skb, nest); ++ goto err; ++ } ++ } ++ ++ nla_nest_end(dcb_skb, nest); ++ ++ genlmsg_end(dcb_skb, data); ++ ++ ret = genlmsg_reply(dcb_skb, info); ++ if (ret) ++ goto err; ++ ++ dev_put(netdev); ++ return 0; ++ ++err: ++ DPRINTK(DRV, ERR, "Error in get permanent hwaddr.\n"); ++ kfree(dcb_skb); ++err_out: ++ dev_put(netdev); ++ return ret; ++} ++#endif ++ ++#ifdef CONFIG_DCB ++static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, ++ u8 prio, u8 bwg_id, u8 bw_pct, ++ u8 up_map) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ if (prio != DCB_ATTR_VALUE_UNDEFINED) ++ adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; ++ if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) ++ adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; ++ if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) ++ adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = ++ bw_pct; ++ if (up_map != DCB_ATTR_VALUE_UNDEFINED) ++ adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = ++ up_map; ++ ++ if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type != ++ adapter->dcb_cfg.tc_config[tc].path[0].prio_type) || ++ (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id != ++ adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) || ++ (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != ++ adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || ++ (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != ++ adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) { ++ adapter->dcb_set_bitmap |= BIT_PG_TX; ++ adapter->dcb_set_bitmap |= BIT_RESETLINK; ++ } ++} ++ ++static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, ++ u8 bw_pct) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; ++ ++ if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != ++ adapter->dcb_cfg.bw_percentage[0][bwg_id]) { ++ adapter->dcb_set_bitmap |= BIT_PG_TX; ++ adapter->dcb_set_bitmap |= BIT_RESETLINK; ++ } ++} ++ ++static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, ++ u8 prio, u8 bwg_id, u8 bw_pct, ++ u8 up_map) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ if (prio != DCB_ATTR_VALUE_UNDEFINED) ++ adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; ++ if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) ++ adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; ++ if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) ++ adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = ++ bw_pct; ++ if (up_map != DCB_ATTR_VALUE_UNDEFINED) ++ adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = ++ up_map; ++ ++ if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type != ++ adapter->dcb_cfg.tc_config[tc].path[1].prio_type) || ++ (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id != ++ adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) || ++ (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != ++ adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || ++ (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != ++ adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) { ++ adapter->dcb_set_bitmap |= BIT_PG_RX; ++ adapter->dcb_set_bitmap |= BIT_RESETLINK; ++ } ++} ++ ++static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, ++ u8 bw_pct) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; ++ ++ if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != ++ adapter->dcb_cfg.bw_percentage[1][bwg_id]) { ++ adapter->dcb_set_bitmap |= BIT_PG_RX; ++ adapter->dcb_set_bitmap |= BIT_RESETLINK; ++ } ++} ++ ++static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, ++ u8 *prio, u8 *bwg_id, u8 *bw_pct, ++ u8 *up_map) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; ++ *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; ++ *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; ++ *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; ++} ++ ++static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, ++ u8 *bw_pct) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; ++} ++ ++static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, ++ u8 *prio, u8 *bwg_id, u8 *bw_pct, ++ u8 *up_map) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; ++ *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; ++ *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; ++ *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; ++} ++ ++static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, ++ u8 *bw_pct) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; ++} ++#else ++static int ixgbe_dcb_pg_scfg(struct sk_buff *skb, struct genl_info *info, ++ int dir) ++{ ++ struct net_device *netdev = NULL; ++ struct ixgbe_adapter *adapter = NULL; ++ struct tc_configuration *tc_config = NULL; ++ struct tc_configuration *tc_tmpcfg = NULL; ++ struct nlattr *pg_tb[IXGBE_DCB_PG_A_MAX + 1]; ++ struct nlattr *param_tb[IXGBE_DCB_TC_A_PARAM_MAX + 1]; ++ int i, ret, tc_max; ++ u8 value; ++ u8 changed = 0; ++ ++ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PG_CFG]) ++ return -EINVAL; ++ ++ netdev = dev_get_by_name(&init_net, ++ nla_data(info->attrs[DCB_A_IFNAME])); ++ if (!netdev) ++ return -EINVAL; ++ ++ ret = ixgbe_dcb_check_adapter(netdev); ++ if (ret) ++ goto err; ++ else ++ adapter = netdev_priv(netdev); ++ ++ ret = nla_parse_nested(pg_tb, IXGBE_DCB_PG_A_MAX, ++ info->attrs[DCB_A_PG_CFG], dcb_pg_nest); ++ if (ret) ++ goto err; ++ ++ if (!adapter->dcb_set_bitmap && ++ ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, ++ adapter->ring_feature[RING_F_DCB].indices)) ++ goto err; ++ ++ tc_max = adapter->ring_feature[RING_F_DCB].indices; ++ for (i = PG_A_TC_0; i < tc_max + PG_A_TC_0; i++) { ++ if (!pg_tb[i]) ++ continue; ++ ++ ret = nla_parse_nested(param_tb, IXGBE_DCB_TC_A_PARAM_MAX, ++ pg_tb[i], dcb_tc_param_nest); ++ if (ret) ++ goto err; ++ ++ tc_config = &adapter->dcb_cfg.tc_config[i - PG_A_TC_0]; ++ tc_tmpcfg = &adapter->temp_dcb_cfg.tc_config[i - PG_A_TC_0]; ++ if (param_tb[TC_A_PARAM_STRICT_PRIO]) { ++ value = nla_get_u8(param_tb[TC_A_PARAM_STRICT_PRIO]); ++ tc_tmpcfg->path[dir].prio_type = value; ++ if (tc_tmpcfg->path[dir].prio_type != ++ tc_config->path[dir].prio_type) ++ changed = 1; ++ } ++ if (param_tb[TC_A_PARAM_BW_GROUP_ID]) { ++ value = nla_get_u8(param_tb[TC_A_PARAM_BW_GROUP_ID]); ++ tc_tmpcfg->path[dir].bwg_id = value; ++ if (tc_tmpcfg->path[dir].bwg_id != ++ tc_config->path[dir].bwg_id) ++ changed = 1; ++ } ++ if (param_tb[TC_A_PARAM_BW_PCT_IN_GROUP]) { ++ value = nla_get_u8(param_tb[TC_A_PARAM_BW_PCT_IN_GROUP]); ++ tc_tmpcfg->path[dir].bwg_percent = value; ++ if (tc_tmpcfg->path[dir].bwg_percent != ++ tc_config->path[dir].bwg_percent) ++ changed = 1; ++ } ++ if (param_tb[TC_A_PARAM_UP_MAPPING]) { ++ value = nla_get_u8(param_tb[TC_A_PARAM_UP_MAPPING]); ++ tc_tmpcfg->path[dir].up_to_tc_bitmap = value; ++ if (tc_tmpcfg->path[dir].up_to_tc_bitmap != ++ tc_config->path[dir].up_to_tc_bitmap) ++ changed = 1; ++ } ++ } ++ ++ for (i = PG_A_BWG_0; i < PG_A_BWG_MAX; i++) { ++ if (!pg_tb[i]) ++ continue; ++ ++ value = nla_get_u8(pg_tb[i]); ++ adapter->temp_dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0] = value; ++ ++ if (adapter->temp_dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0] != ++ adapter->dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0]) ++ changed = 1; ++ } ++ ++ adapter->temp_dcb_cfg.round_robin_enable = false; ++ ++ if (changed) { ++ if (dir == DCB_TX_CONFIG) ++ adapter->dcb_set_bitmap |= BIT_PG_TX; ++ else ++ adapter->dcb_set_bitmap |= BIT_PG_RX; ++ ++ adapter->dcb_set_bitmap |= BIT_RESETLINK; ++ } ++ ++ ret = ixgbe_nl_reply(0, (dir? DCB_C_PGRX_SCFG : DCB_C_PGTX_SCFG), ++ DCB_A_PG_CFG, info); ++ if (ret) ++ goto err; ++ ++err: ++ dev_put(netdev); ++ return ret; ++} ++ ++static int ixgbe_dcb_pgtx_scfg(struct sk_buff *skb, struct genl_info *info) ++{ ++ return ixgbe_dcb_pg_scfg(skb, info, DCB_TX_CONFIG); ++} ++ ++static int ixgbe_dcb_pgrx_scfg(struct sk_buff *skb, struct genl_info *info) ++{ ++ return ixgbe_dcb_pg_scfg(skb, info, DCB_RX_CONFIG); ++} ++ ++static int ixgbe_dcb_pg_gcfg(struct sk_buff *skb, struct genl_info *info, ++ int dir) ++{ ++ void *data; ++ struct sk_buff *dcb_skb = NULL; ++ struct nlattr *pg_nest, *param_nest, *tb; ++ struct nlattr *pg_tb[IXGBE_DCB_PG_A_MAX + 1]; ++ struct nlattr *param_tb[IXGBE_DCB_TC_A_PARAM_MAX + 1]; ++ struct net_device *netdev = NULL; ++ struct ixgbe_adapter *adapter = NULL; ++ struct tc_configuration *tc_config = NULL; ++ struct tc_bw_alloc *tc = NULL; ++ int ret = -ENOMEM; ++ int i, tc_max; ++ ++ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PG_CFG]) ++ return -EINVAL; ++ ++ netdev = dev_get_by_name(&init_net, ++ nla_data(info->attrs[DCB_A_IFNAME])); ++ if (!netdev) ++ return -EINVAL; ++ ++ ret = ixgbe_dcb_check_adapter(netdev); ++ if (ret) ++ goto err_out; ++ else ++ adapter = netdev_priv(netdev); ++ ++ ret = nla_parse_nested(pg_tb, IXGBE_DCB_PG_A_MAX, ++ info->attrs[DCB_A_PG_CFG], dcb_pg_nest); ++ if (ret) ++ goto err; ++ ++ dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); ++ if (!dcb_skb) ++ goto err; ++ ++ data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0, ++ (dir) ? DCB_C_PGRX_GCFG : DCB_C_PGTX_GCFG); ++ ++ if (!data) ++ goto err; ++ ++ pg_nest = nla_nest_start(dcb_skb, DCB_A_PG_CFG); ++ if (!pg_nest) ++ goto err; ++ ++ tc_max = adapter->ring_feature[RING_F_DCB].indices; ++ for (i = PG_A_TC_0; i < tc_max + PG_A_TC_0; i++) { ++ if (!pg_tb[i] && !pg_tb[PG_A_TC_ALL]) ++ continue; ++ ++ if (pg_tb[PG_A_TC_ALL]) ++ tb = pg_tb[PG_A_TC_ALL]; ++ else ++ tb = pg_tb[i]; ++ ret = nla_parse_nested(param_tb, IXGBE_DCB_TC_A_PARAM_MAX, ++ tb, dcb_tc_param_nest); ++ if (ret) ++ goto err_pg; ++ ++ param_nest = nla_nest_start(dcb_skb, i); ++ if (!param_nest) ++ goto err_pg; ++ ++ tc_config = &adapter->dcb_cfg.tc_config[i - PG_A_TC_0]; ++ tc = &adapter->dcb_cfg.tc_config[i - PG_A_TC_0].path[dir]; ++ ++ if (param_tb[TC_A_PARAM_STRICT_PRIO] || ++ param_tb[TC_A_PARAM_ALL]) { ++ ret = nla_put_u8(dcb_skb, TC_A_PARAM_STRICT_PRIO, ++ tc->prio_type); ++ if (ret) ++ goto err_param; ++ } ++ if (param_tb[TC_A_PARAM_BW_GROUP_ID] || ++ param_tb[TC_A_PARAM_ALL]) { ++ ret = nla_put_u8(dcb_skb, TC_A_PARAM_BW_GROUP_ID, ++ tc->bwg_id); ++ if (ret) ++ goto err_param; ++ } ++ if (param_tb[TC_A_PARAM_BW_PCT_IN_GROUP] || ++ param_tb[TC_A_PARAM_ALL]) { ++ ret = nla_put_u8(dcb_skb, TC_A_PARAM_BW_PCT_IN_GROUP, ++ tc->bwg_percent); ++ if (ret) ++ goto err_param; ++ } ++ if (param_tb[TC_A_PARAM_UP_MAPPING] || ++ param_tb[TC_A_PARAM_ALL]) { ++ ret = nla_put_u8(dcb_skb, TC_A_PARAM_UP_MAPPING, ++ tc->up_to_tc_bitmap); ++ if (ret) ++ goto err_param; ++ } ++ nla_nest_end(dcb_skb, param_nest); ++ } ++ ++ for (i = PG_A_BWG_0; i < PG_A_BWG_MAX; i++) { ++ if (!pg_tb[i] && !pg_tb[PG_A_BWG_ALL]) ++ continue; ++ ++ ret = nla_put_u8(dcb_skb, i, ++ adapter->dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0]); ++ ++ if (ret) ++ goto err_pg; ++ } ++ ++ nla_nest_end(dcb_skb, pg_nest); ++ ++ genlmsg_end(dcb_skb, data); ++ ret = genlmsg_reply(dcb_skb, info); ++ if (ret) ++ goto err; ++ ++ dev_put(netdev); ++ return 0; ++ ++err_param: ++ DPRINTK(DRV, ERR, "Error in get pg %s.\n", dir?"rx":"tx"); ++ nla_nest_cancel(dcb_skb, param_nest); ++err_pg: ++ nla_nest_cancel(dcb_skb, pg_nest); ++err: ++ kfree(dcb_skb); ++err_out: ++ dev_put(netdev); ++ return ret; ++} ++ ++static int ixgbe_dcb_pgtx_gcfg(struct sk_buff *skb, struct genl_info *info) ++{ ++ return ixgbe_dcb_pg_gcfg(skb, info, DCB_TX_CONFIG); ++} ++ ++static int ixgbe_dcb_pgrx_gcfg(struct sk_buff *skb, struct genl_info *info) ++{ ++ return ixgbe_dcb_pg_gcfg(skb, info, DCB_RX_CONFIG); ++} ++#endif ++ ++#ifdef CONFIG_DCB ++static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, ++ u8 setting) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; ++ if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != ++ adapter->dcb_cfg.tc_config[priority].dcb_pfc) { ++ adapter->dcb_set_bitmap |= BIT_PFC; ++ } ++} ++ ++static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, ++ u8 *setting) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; ++} ++#else ++static int ixgbe_dcb_spfccfg(struct sk_buff *skb, struct genl_info *info) ++{ ++ struct nlattr *tb[IXGBE_DCB_PFC_A_UP_MAX + 1]; ++ struct net_device *netdev = NULL; ++ struct ixgbe_adapter *adapter = NULL; ++ int i, ret = -ENOMEM; ++ u8 setting; ++ u8 changed = 0; ++ ++ netdev = dev_get_by_name(&init_net, ++ nla_data(info->attrs[DCB_A_IFNAME])); ++ if (!netdev) ++ return -EINVAL; ++ ++ adapter = netdev_priv(netdev); ++ ++ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PFC_CFG]) ++ return -EINVAL; ++ ++ ret = ixgbe_dcb_check_adapter(netdev); ++ if (ret) ++ goto err; ++ else ++ adapter = netdev_priv(netdev); ++ ++ ret = nla_parse_nested(tb, IXGBE_DCB_PFC_A_UP_MAX, ++ info->attrs[DCB_A_PFC_CFG], ++ dcb_pfc_up_nest); ++ if (ret) ++ goto err; ++ ++ if (!adapter->dcb_set_bitmap && ++ ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, ++ adapter->ring_feature[RING_F_DCB].indices)) { ++ ret = -EINVAL; ++ goto err; ++ } ++ ++ for (i = PFC_A_UP_0; i < PFC_A_UP_MAX; i++) { ++ if (!tb[i]) ++ continue; ++ ++ setting = nla_get_u8(tb[i]); ++ adapter->temp_dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc = setting; ++ ++ if (adapter->temp_dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc != ++ adapter->dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc) ++ changed = 1; ++ } ++ ++ if (changed) ++ adapter->dcb_set_bitmap |= BIT_PFC; ++ ++ ret = ixgbe_nl_reply(0, DCB_C_PFC_SCFG, DCB_A_PFC_CFG, info); ++ if (ret) ++ goto err; ++ ++err: ++ dev_put(netdev); ++ return ret; ++} ++ ++static int ixgbe_dcb_gpfccfg(struct sk_buff *skb, struct genl_info *info) ++{ ++ void *data; ++ struct sk_buff *dcb_skb = NULL; ++ struct nlattr *tb[IXGBE_DCB_PFC_A_UP_MAX + 1], *nest; ++ struct net_device *netdev = NULL; ++ struct ixgbe_adapter *adapter = NULL; ++ int ret = -ENOMEM; ++ int i; ++ ++ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PFC_CFG]) ++ return -EINVAL; ++ ++ netdev = dev_get_by_name(&init_net, ++ nla_data(info->attrs[DCB_A_IFNAME])); ++ if (!netdev) ++ return -EINVAL; ++ ++ ret = ixgbe_dcb_check_adapter(netdev); ++ if (ret) ++ goto err_out; ++ else ++ adapter = netdev_priv(netdev); ++ ++ ret = nla_parse_nested(tb, IXGBE_DCB_PFC_A_UP_MAX, ++ info->attrs[DCB_A_PFC_CFG], dcb_pfc_up_nest); ++ if (ret) ++ goto err; ++ ++ dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); ++ if (!dcb_skb) ++ goto err; ++ ++ data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0, ++ DCB_C_PFC_GCFG); ++ if (!data) ++ goto err; ++ ++ nest = nla_nest_start(dcb_skb, DCB_A_PFC_CFG); ++ if (!nest) ++ goto err; ++ ++ for (i = PFC_A_UP_0; i < PFC_A_UP_MAX; i++) { ++ if (!tb[i] && !tb[PFC_A_UP_ALL]) ++ continue; ++ ++ ret = nla_put_u8(dcb_skb, i, ++ adapter->dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc); ++ if (ret) { ++ nla_nest_cancel(dcb_skb, nest); ++ goto err; ++ } ++ } ++ ++ nla_nest_end(dcb_skb, nest); ++ ++ genlmsg_end(dcb_skb, data); ++ ++ ret = genlmsg_reply(dcb_skb, info); ++ if (ret) ++ goto err; ++ ++ dev_put(netdev); ++ return 0; ++ ++err: ++ DPRINTK(DRV, ERR, "Error in get pfc stats.\n"); ++ kfree(dcb_skb); ++err_out: ++ dev_put(netdev); ++ return ret; ++} ++#endif ++ ++#ifdef CONFIG_DCB ++static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ int ret; ++ ++ if (!adapter->dcb_set_bitmap) ++ return DCB_NO_HW_CHG; ++ ++ ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, ++ adapter->ring_feature[RING_F_DCB].indices); ++ if (ret) ++ return DCB_NO_HW_CHG; ++ ++ /* Only take down the adapter if the configuration change ++ * requires a reset. ++ */ ++ if (adapter->dcb_set_bitmap & BIT_RESETLINK) { ++ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) ++ msleep(1); ++ ++ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { ++ if (netif_running(netdev)) ++#ifdef HAVE_NET_DEVICE_OPS ++ netdev->netdev_ops->ndo_stop(netdev); ++#else ++ netdev->stop(netdev); ++#endif ++ ixgbe_clear_interrupt_scheme(adapter); ++ } else { ++ if (netif_running(netdev)) ++ ixgbe_down(adapter); ++ } ++ } ++ ++ if (adapter->dcb_cfg.pfc_mode_enable) { ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ if (adapter->hw.fc.current_mode != ixgbe_fc_pfc) ++ adapter->last_lfc_mode = adapter->hw.fc.current_mode; ++ break; ++ default: ++ break; ++ } ++ adapter->hw.fc.requested_mode = ixgbe_fc_pfc; ++ } else { ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ adapter->hw.fc.requested_mode = ixgbe_fc_none; ++ break; ++ case ixgbe_mac_82599EB: ++ adapter->hw.fc.requested_mode = adapter->last_lfc_mode; ++ break; ++ default: ++ break; ++ } ++ } ++ ++ if (adapter->dcb_set_bitmap & BIT_RESETLINK) { ++ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { ++ ixgbe_init_interrupt_scheme(adapter); ++ if (netif_running(netdev)) ++#ifdef HAVE_NET_DEVICE_OPS ++ netdev->netdev_ops->ndo_open(netdev); ++#else ++ netdev->open(netdev); ++#endif ++ } else { ++ if (netif_running(netdev)) ++ ixgbe_up(adapter); ++ } ++ ret = DCB_HW_CHG_RST; ++ } else if (adapter->dcb_set_bitmap & BIT_PFC) { ++ if (adapter->hw.mac.type == ixgbe_mac_82598EB) ++ ixgbe_dcb_config_pfc_82598(&adapter->hw, ++ &adapter->dcb_cfg); ++ else if (adapter->hw.mac.type == ixgbe_mac_82599EB) ++ ixgbe_dcb_config_pfc_82599(&adapter->hw, ++ &adapter->dcb_cfg); ++ ret = DCB_HW_CHG; ++ } ++ if (adapter->dcb_cfg.pfc_mode_enable) ++ adapter->hw.fc.current_mode = ixgbe_fc_pfc; ++ ++ if (adapter->dcb_set_bitmap & BIT_RESETLINK) ++ clear_bit(__IXGBE_RESETTING, &adapter->state); ++ adapter->dcb_set_bitmap = 0x00; ++ return ret; ++} ++#else ++static int ixgbe_dcb_set_all(struct sk_buff *skb, struct genl_info *info) ++{ ++ struct net_device *netdev = NULL; ++ struct ixgbe_adapter *adapter = NULL; ++ int ret = -ENOMEM; ++ u8 value; ++ u8 retval = 0; ++ ++ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_SET_ALL]) ++ goto err; ++ ++ netdev = dev_get_by_name(&init_net, ++ nla_data(info->attrs[DCB_A_IFNAME])); ++ if (!netdev) ++ goto err; ++ ++ ret = ixgbe_dcb_check_adapter(netdev); ++ if (ret) ++ goto err_out; ++ else ++ adapter = netdev_priv(netdev); ++ ++ if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) { ++ ret = -EINVAL; ++ goto err_out; ++ } ++ ++ value = nla_get_u8(info->attrs[DCB_A_SET_ALL]); ++ if ((value & 1) != value) { ++ DPRINTK(DRV, ERR, "Value is not 1 or 0, it is %d.\n", value); ++ } else { ++ if (!adapter->dcb_set_bitmap) { ++ retval = 1; ++ goto out; ++ } ++ ++ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) ++ msleep(1); ++ ++ ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, ++ &adapter->dcb_cfg, ++ adapter->ring_feature[RING_F_DCB].indices); ++ if (ret) { ++ clear_bit(__IXGBE_RESETTING, &adapter->state); ++ goto err_out; ++ } ++ ++ ixgbe_down(adapter); ++ ixgbe_up(adapter); ++ adapter->dcb_set_bitmap = 0x00; ++ clear_bit(__IXGBE_RESETTING, &adapter->state); ++ } ++ ++out: ++ ret = ixgbe_nl_reply(retval, DCB_C_SET_ALL, DCB_A_SET_ALL, info); ++ if (ret) ++ goto err_out; ++ ++err_out: ++ dev_put(netdev); ++err: ++ return ret; ++} ++#endif ++ ++#ifdef CONFIG_DCB ++static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ u8 rval = 0; ++ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ switch (capid) { ++ case DCB_CAP_ATTR_PG: ++ *cap = true; ++ break; ++ case DCB_CAP_ATTR_PFC: ++ *cap = true; ++ break; ++ case DCB_CAP_ATTR_UP2TC: ++ *cap = false; ++ break; ++ case DCB_CAP_ATTR_PG_TCS: ++ *cap = 0x80; ++ break; ++ case DCB_CAP_ATTR_PFC_TCS: ++ *cap = 0x80; ++ break; ++ case DCB_CAP_ATTR_GSP: ++ *cap = true; ++ break; ++ case DCB_CAP_ATTR_BCN: ++ *cap = false; ++ break; ++ default: ++ rval = -EINVAL; ++ break; ++ } ++ } else { ++ rval = -EINVAL; ++ } ++ ++ return rval; ++} ++ ++static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ u8 rval = 0; ++ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ switch (tcid) { ++ case DCB_NUMTCS_ATTR_PG: ++ *num = adapter->dcb_cfg.num_tcs.pg_tcs; ++ break; ++ case DCB_NUMTCS_ATTR_PFC: ++ *num = adapter->dcb_cfg.num_tcs.pfc_tcs; ++ break; ++ default: ++ rval = -EINVAL; ++ break; ++ } ++ } else { ++ rval = -EINVAL; ++ } ++ ++ return rval; ++} ++ ++static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ u8 rval = 0; ++ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ switch (tcid) { ++ case DCB_NUMTCS_ATTR_PG: ++ adapter->dcb_cfg.num_tcs.pg_tcs = num; ++ break; ++ case DCB_NUMTCS_ATTR_PFC: ++ adapter->dcb_cfg.num_tcs.pfc_tcs = num; ++ break; ++ default: ++ rval = -EINVAL; ++ break; ++ } ++ } else { ++ rval = -EINVAL; ++ } ++ ++ return rval; ++} ++ ++static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ return adapter->dcb_cfg.pfc_mode_enable; ++} ++ ++static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ adapter->temp_dcb_cfg.pfc_mode_enable = state; ++ if (adapter->temp_dcb_cfg.pfc_mode_enable != ++ adapter->dcb_cfg.pfc_mode_enable) ++ adapter->dcb_set_bitmap |= BIT_PFC; ++ return; ++} ++ ++#ifdef HAVE_DCBNL_OPS_GETAPP ++/** ++ * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority ++ * @netdev : the corresponding netdev ++ * @idtype : identifies the id as ether type or TCP/UDP port number ++ * @id: id is either ether type or TCP/UDP port number ++ * ++ * Returns : on success, returns a non-zero 802.1p user priority bitmap ++ * otherwise returns 0 as the invalid user priority bitmap to indicate an ++ * error. ++ */ ++static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) ++{ ++ u8 rval = 0; ++ ++ switch (idtype) { ++ case DCB_APP_IDTYPE_ETHTYPE: ++#ifdef IXGBE_FCOE ++ if (id == ETH_P_FCOE) ++ rval = ixgbe_fcoe_getapp(netdev_priv(netdev)); ++#endif ++ break; ++ case DCB_APP_IDTYPE_PORTNUM: ++ break; ++ default: ++ break; ++ } ++ return rval; ++} ++ ++/** ++ * ixgbe_dcbnl_setapp - set the DCBX application user priority ++ * @netdev : the corresponding netdev ++ * @idtype : identifies the id as ether type or TCP/UDP port number ++ * @id: id is either ether type or TCP/UDP port number ++ * @up: the 802.1p user priority bitmap ++ * ++ * Returns : 0 on success or 1 on error ++ */ ++static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, ++ u8 idtype, u16 id, u8 up) ++{ ++ u8 rval = 1; ++ ++ switch (idtype) { ++ case DCB_APP_IDTYPE_ETHTYPE: ++#ifdef IXGBE_FCOE ++ if (id == ETH_P_FCOE) { ++ u8 tc; ++ struct ixgbe_adapter *adapter; ++ ++ adapter = netdev_priv(netdev); ++ tc = adapter->fcoe.tc; ++ rval = ixgbe_fcoe_setapp(adapter, up); ++ if ((!rval) && (tc != adapter->fcoe.tc) && ++ (adapter->flags & IXGBE_FLAG_DCB_ENABLED) && ++ (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { ++ adapter->dcb_set_bitmap |= BIT_APP_UPCHG; ++ adapter->dcb_set_bitmap |= BIT_RESETLINK; ++ } ++ } ++#endif ++ break; ++ case DCB_APP_IDTYPE_PORTNUM: ++ break; ++ default: ++ break; ++ } ++ return rval; ++} ++#endif /* HAVE_DCBNL_OPS_GETAPP */ ++ ++#else ++#endif ++ ++#ifdef CONFIG_DCB ++struct dcbnl_rtnl_ops dcbnl_ops = { ++ .getstate = ixgbe_dcbnl_get_state, ++ .setstate = ixgbe_dcbnl_set_state, ++ .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, ++ .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx, ++ .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx, ++ .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx, ++ .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx, ++ .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx, ++ .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx, ++ .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx, ++ .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx, ++ .setpfccfg = ixgbe_dcbnl_set_pfc_cfg, ++ .getpfccfg = ixgbe_dcbnl_get_pfc_cfg, ++ .setall = ixgbe_dcbnl_set_all, ++ .getcap = ixgbe_dcbnl_getcap, ++ .getnumtcs = ixgbe_dcbnl_getnumtcs, ++ .setnumtcs = ixgbe_dcbnl_setnumtcs, ++ .getpfcstate = ixgbe_dcbnl_getpfcstate, ++ .setpfcstate = ixgbe_dcbnl_setpfcstate, ++#ifdef HAVE_DCBNL_OPS_GETAPP ++ .getapp = ixgbe_dcbnl_getapp, ++ .setapp = ixgbe_dcbnl_setapp, ++#endif ++}; ++#else ++/* DCB Generic NETLINK command Definitions */ ++/* Get DCB Admin Mode */ ++static struct genl_ops ixgbe_dcb_genl_c_gstate = { ++ .cmd = DCB_C_GSTATE, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_gstate, ++ .dumpit = NULL, ++}; ++ ++/* Set DCB Admin Mode */ ++static struct genl_ops ixgbe_dcb_genl_c_sstate = { ++ .cmd = DCB_C_SSTATE, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_sstate, ++ .dumpit = NULL, ++}; ++ ++/* Set TX Traffic Attributes */ ++static struct genl_ops ixgbe_dcb_genl_c_spgtx = { ++ .cmd = DCB_C_PGTX_SCFG, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_pgtx_scfg, ++ .dumpit = NULL, ++}; ++ ++/* Set RX Traffic Attributes */ ++static struct genl_ops ixgbe_dcb_genl_c_spgrx = { ++ .cmd = DCB_C_PGRX_SCFG, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_pgrx_scfg, ++ .dumpit = NULL, ++}; ++ ++/* Set PFC CFG */ ++static struct genl_ops ixgbe_dcb_genl_c_spfc = { ++ .cmd = DCB_C_PFC_SCFG, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_spfccfg, ++ .dumpit = NULL, ++}; ++ ++/* Get TX Traffic Attributes */ ++static struct genl_ops ixgbe_dcb_genl_c_gpgtx = { ++ .cmd = DCB_C_PGTX_GCFG, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_pgtx_gcfg, ++ .dumpit = NULL, ++}; ++ ++/* Get RX Traffic Attributes */ ++static struct genl_ops ixgbe_dcb_genl_c_gpgrx = { ++ .cmd = DCB_C_PGRX_GCFG, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_pgrx_gcfg, ++ .dumpit = NULL, ++}; ++ ++/* Get PFC CFG */ ++static struct genl_ops ixgbe_dcb_genl_c_gpfc = { ++ .cmd = DCB_C_PFC_GCFG, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_gpfccfg, ++ .dumpit = NULL, ++}; ++ ++ ++/* Get Link Speed setting */ ++static struct genl_ops ixgbe_dcb_genl_c_glink_spd = { ++ .cmd = DCB_C_GLINK_SPD, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_glink_spd, ++ .dumpit = NULL, ++}; ++ ++/* Set Link Speed setting */ ++static struct genl_ops ixgbe_dcb_genl_c_slink_spd = { ++ .cmd = DCB_C_SLINK_SPD, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_slink_spd, ++ .dumpit = NULL, ++}; ++ ++/* Set all "set" feature */ ++static struct genl_ops ixgbe_dcb_genl_c_set_all= { ++ .cmd = DCB_C_SET_ALL, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_set_all, ++ .dumpit = NULL, ++}; ++ ++/* Get permanent HW address */ ++static struct genl_ops ixgbe_dcb_genl_c_gperm_hwaddr = { ++ .cmd = DCB_C_GPERM_HWADDR, ++ .flags = GENL_ADMIN_PERM, ++ .policy = dcb_genl_policy, ++ .doit = ixgbe_dcb_gperm_hwaddr, ++ .dumpit = NULL, ++}; ++ ++/** ++ * ixgbe_dcb_netlink_register - Initialize the NETLINK communication channel ++ * ++ * Description: ++ * Call out to the DCB components so they can register their families and ++ * commands with Generic NETLINK mechanism. Return zero on success and ++ * non-zero on failure. ++ * ++ */ ++int ixgbe_dcb_netlink_register(void) ++{ ++ int ret = 1; ++ ++ /* consider writing as: ++ * ret = genl_register_family(aaa) ++ * || genl_register_ops(bbb, bbb) ++ * || genl_register_ops(ccc, ccc); ++ * if (ret) ++ * goto err; ++ */ ++ ret = genl_register_family(&dcb_family); ++ if (ret) ++ return ret; ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gstate); ++ if (ret) ++ goto err; ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_sstate); ++ if (ret) ++ goto err; ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_spgtx); ++ if (ret) ++ goto err; ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_spgrx); ++ if (ret) ++ goto err; ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_spfc); ++ if (ret) ++ goto err; ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gpfc); ++ if (ret) ++ goto err; ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gpgtx); ++ if (ret) ++ goto err; ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gpgrx); ++ if (ret) ++ goto err; ++ ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_glink_spd); ++ if (ret) ++ goto err; ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_slink_spd); ++ if (ret) ++ goto err; ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_set_all); ++ if (ret) ++ goto err; ++ ++ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gperm_hwaddr); ++ if (ret) ++ goto err; ++ ++ return 0; ++ ++err: ++ genl_unregister_family(&dcb_family); ++ return ret; ++} ++ ++int ixgbe_dcb_netlink_unregister(void) ++{ ++ return genl_unregister_family(&dcb_family); ++} ++#endif +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_ethtool.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_ethtool.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_ethtool.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_ethtool.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,2551 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/* ethtool support for ixgbe */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef SIOCETHTOOL ++#include ++ ++#include "ixgbe.h" ++ ++#ifndef ETH_GSTRING_LEN ++#define ETH_GSTRING_LEN 32 ++#endif ++ ++#define IXGBE_ALL_RAR_ENTRIES 16 ++ ++#ifdef ETHTOOL_OPS_COMPAT ++#include "kcompat_ethtool.c" ++#endif ++#ifdef ETHTOOL_GSTATS ++struct ixgbe_stats { ++ char stat_string[ETH_GSTRING_LEN]; ++ int sizeof_stat; ++ int stat_offset; ++}; ++ ++#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \ ++ offsetof(struct ixgbe_adapter, m) ++static struct ixgbe_stats ixgbe_gstrings_stats[] = { ++ {"rx_packets", IXGBE_STAT(net_stats.rx_packets)}, ++ {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, ++ {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)}, ++ {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)}, ++ {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, ++ {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, ++ {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, ++ {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, ++ {"lsc_int", IXGBE_STAT(lsc_int)}, ++ {"tx_busy", IXGBE_STAT(tx_busy)}, ++ {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, ++ {"rx_errors", IXGBE_STAT(net_stats.rx_errors)}, ++ {"tx_errors", IXGBE_STAT(net_stats.tx_errors)}, ++ {"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)}, ++#ifndef CONFIG_IXGBE_NAPI ++ {"rx_dropped_backlog", IXGBE_STAT(rx_dropped_backlog)}, ++#endif ++ {"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)}, ++ {"multicast", IXGBE_STAT(net_stats.multicast)}, ++ {"broadcast", IXGBE_STAT(stats.bprc)}, ++ {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, ++ {"collisions", IXGBE_STAT(net_stats.collisions)}, ++ {"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)}, ++ {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)}, ++ {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)}, ++ {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)}, ++ {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)}, ++ {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)}, ++ {"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)}, ++ {"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)}, ++ {"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)}, ++ {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, ++ {"tx_restart_queue", IXGBE_STAT(restart_queue)}, ++ {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, ++ {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, ++ {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, ++ {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, ++ {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, ++ {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, ++ {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, ++#ifndef IXGBE_NO_LLI ++ {"low_latency_interrupt", IXGBE_STAT(lli_int)}, ++#endif ++ {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, ++ {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, ++#ifndef IXGBE_NO_LRO ++ {"lro_aggregated", IXGBE_STAT(lro_stats.coal)}, ++ {"lro_flushed", IXGBE_STAT(lro_stats.flushed)}, ++ {"lro_recycled", IXGBE_STAT(lro_stats.recycled)}, ++#endif /* IXGBE_NO_LRO */ ++ {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, ++ {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, ++ {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, ++ {"rx_flm", IXGBE_STAT(flm)}, ++#ifdef HAVE_TX_MQ ++ {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, ++ {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, ++#endif /* HAVE_TX_MQ */ ++#ifdef IXGBE_FCOE ++ {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, ++ {"fcoe_last_errors", IXGBE_STAT(stats.fclast)}, ++ {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, ++ {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, ++ {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, ++ {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, ++ {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, ++#endif /* IXGBE_FCOE */ ++}; ++ ++#define IXGBE_QUEUE_STATS_LEN \ ++ ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ ++ ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ ++ (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) ++#define IXGBE_VF_STATS_LEN \ ++ ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_vfs) * \ ++ (sizeof(struct vf_stats) / sizeof(u64))) ++#define IXGBE_PB_STATS_LEN ( \ ++ (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \ ++ IXGBE_FLAG_DCB_ENABLED) ? \ ++ (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ ++ sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ ++ sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ ++ sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ ++ / sizeof(u64) : 0) ++#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_PB_STATS_LEN + IXGBE_QUEUE_STATS_LEN + IXGBE_VF_STATS_LEN) ++#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) ++#endif /* ETHTOOL_GSTATS */ ++#ifdef ETHTOOL_TEST ++static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { ++ "Register test (offline)", "Eeprom test (offline)", ++ "Interrupt test (offline)", "Loopback test (offline)", ++ "Link test (on/offline)" ++}; ++#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN ++#endif /* ETHTOOL_TEST */ ++ ++static int ixgbe_get_settings(struct net_device *netdev, ++ struct ethtool_cmd *ecmd) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 link_speed = 0; ++ bool link_up; ++ ++ ecmd->supported = SUPPORTED_10000baseT_Full; ++ ecmd->autoneg = AUTONEG_ENABLE; ++ ecmd->transceiver = XCVR_EXTERNAL; ++ if ((hw->phy.media_type == ixgbe_media_type_copper) || ++ (hw->phy.multispeed_fiber)) { ++ ecmd->supported |= (SUPPORTED_1000baseT_Full | ++ SUPPORTED_Autoneg); ++ ++ ecmd->advertising = ADVERTISED_Autoneg; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) ++ ecmd->advertising |= ADVERTISED_10000baseT_Full; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) ++ ecmd->advertising |= ADVERTISED_1000baseT_Full; ++ /* ++ * It's possible that phy.autoneg_advertised may not be ++ * set yet. If so display what the default would be - ++ * both 1G and 10G supported. ++ */ ++ if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full | ++ ADVERTISED_10000baseT_Full))) ++ ecmd->advertising |= (ADVERTISED_10000baseT_Full | ++ ADVERTISED_1000baseT_Full); ++ ++ if (hw->phy.media_type == ixgbe_media_type_copper) { ++ ecmd->supported |= SUPPORTED_TP; ++ ecmd->advertising |= ADVERTISED_TP; ++ ecmd->port = PORT_TP; ++ } else { ++ ecmd->supported |= SUPPORTED_FIBRE; ++ ecmd->advertising |= ADVERTISED_FIBRE; ++ ecmd->port = PORT_FIBRE; ++ } ++ } else if (hw->phy.media_type == ixgbe_media_type_backplane) { ++ /* Set as FIBRE until SERDES defined in kernel */ ++ if (hw->device_id == IXGBE_DEV_ID_82598_BX) { ++ ecmd->supported = (SUPPORTED_1000baseT_Full | ++ SUPPORTED_FIBRE); ++ ecmd->advertising = (ADVERTISED_1000baseT_Full | ++ ADVERTISED_FIBRE); ++ ecmd->port = PORT_FIBRE; ++ ecmd->autoneg = AUTONEG_DISABLE; ++ } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || ++ (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { ++ ecmd->supported |= (SUPPORTED_1000baseT_Full | ++ SUPPORTED_Autoneg | ++ SUPPORTED_FIBRE); ++ ecmd->advertising = (ADVERTISED_10000baseT_Full | ++ ADVERTISED_1000baseT_Full | ++ ADVERTISED_Autoneg | ++ ADVERTISED_FIBRE); ++ ecmd->port = PORT_FIBRE; ++ } else { ++ ecmd->supported |= (SUPPORTED_1000baseT_Full | ++ SUPPORTED_FIBRE); ++ ecmd->advertising = (ADVERTISED_10000baseT_Full | ++ ADVERTISED_1000baseT_Full | ++ ADVERTISED_FIBRE); ++ ecmd->port = PORT_FIBRE; ++ } ++ } else { ++ ecmd->supported |= SUPPORTED_FIBRE; ++ ecmd->advertising = (ADVERTISED_10000baseT_Full | ++ ADVERTISED_FIBRE); ++ ecmd->port = PORT_FIBRE; ++ ecmd->autoneg = AUTONEG_DISABLE; ++ } ++ ++#ifdef HAVE_ETHTOOL_SFP_DISPLAY_PORT ++ /* Get PHY type */ ++ switch (adapter->hw.phy.type) { ++ case ixgbe_phy_tn: ++ case ixgbe_phy_cu_unknown: ++ /* Copper 10G-BASET */ ++ ecmd->port = PORT_TP; ++ break; ++ case ixgbe_phy_qt: ++ ecmd->port = PORT_FIBRE; ++ break; ++ case ixgbe_phy_nl: ++ case ixgbe_phy_sfp_passive_tyco: ++ case ixgbe_phy_sfp_passive_unknown: ++ case ixgbe_phy_sfp_ftl: ++ case ixgbe_phy_sfp_avago: ++ case ixgbe_phy_sfp_intel: ++ case ixgbe_phy_sfp_unknown: ++ switch (adapter->hw.phy.sfp_type) { ++ /* SFP+ devices, further checking needed */ ++ case ixgbe_sfp_type_da_cu: ++ case ixgbe_sfp_type_da_cu_core0: ++ case ixgbe_sfp_type_da_cu_core1: ++ ecmd->port = PORT_DA; ++ break; ++ case ixgbe_sfp_type_sr: ++ case ixgbe_sfp_type_lr: ++ case ixgbe_sfp_type_srlr_core0: ++ case ixgbe_sfp_type_srlr_core1: ++ ecmd->port = PORT_FIBRE; ++ break; ++ case ixgbe_sfp_type_not_present: ++ ecmd->port = PORT_NONE; ++ break; ++ case ixgbe_sfp_type_1g_cu_core0: ++ case ixgbe_sfp_type_1g_cu_core1: ++ ecmd->port = PORT_TP; ++ ecmd->supported = SUPPORTED_TP; ++ ecmd->advertising = (ADVERTISED_1000baseT_Full | ++ ADVERTISED_TP); ++ break; ++ case ixgbe_sfp_type_unknown: ++ default: ++ ecmd->port = PORT_OTHER; ++ break; ++ } ++ break; ++ case ixgbe_phy_xaui: ++ ecmd->port = PORT_NONE; ++ break; ++ case ixgbe_phy_unknown: ++ case ixgbe_phy_generic: ++ case ixgbe_phy_sfp_unsupported: ++ default: ++ ecmd->port = PORT_OTHER; ++ break; ++ } ++#endif ++ ++ if (!in_interrupt()) { ++ hw->mac.ops.check_link(hw, &link_speed, &link_up, false); ++ } else { ++ /* ++ * this case is a special workaround for RHEL5 bonding ++ * that calls this routine from interrupt context ++ */ ++ link_speed = adapter->link_speed; ++ link_up = adapter->link_up; ++ } ++ ++ if (link_up) { ++ ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? ++ SPEED_10000 : SPEED_1000; ++ ecmd->duplex = DUPLEX_FULL; ++ } else { ++ ecmd->speed = -1; ++ ecmd->duplex = -1; ++ } ++ ++ return 0; ++} ++ ++static int ixgbe_set_settings(struct net_device *netdev, ++ struct ethtool_cmd *ecmd) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 advertised, old; ++ s32 err = 0; ++ ++ if ((hw->phy.media_type == ixgbe_media_type_copper) || ++ (hw->phy.multispeed_fiber)) { ++ /* 10000/copper and 1000/copper must autoneg ++ * this function does not support any duplex forcing, but can ++ * limit the advertising of the adapter to only 10000 or 1000 */ ++ if (ecmd->autoneg == AUTONEG_DISABLE) ++ return -EINVAL; ++ ++ old = hw->phy.autoneg_advertised; ++ advertised = 0; ++ if (ecmd->advertising & ADVERTISED_10000baseT_Full) ++ advertised |= IXGBE_LINK_SPEED_10GB_FULL; ++ ++ if (ecmd->advertising & ADVERTISED_1000baseT_Full) ++ advertised |= IXGBE_LINK_SPEED_1GB_FULL; ++ ++ if (old == advertised) ++ return err; ++ /* this sets the link speed and restarts auto-neg */ ++ hw->mac.autotry_restart = true; ++ err = hw->mac.ops.setup_link(hw, advertised, true, true); ++ if (err) { ++ DPRINTK(PROBE, INFO, ++ "setup link failed with code %d\n", err); ++ hw->mac.ops.setup_link(hw, old, true, true); ++ } ++ } else { ++ /* in this case we currently only support 10Gb/FULL */ ++ if ((ecmd->autoneg == AUTONEG_ENABLE) || ++ (ecmd->advertising != ADVERTISED_10000baseT_Full) || ++ (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) ++ return -EINVAL; ++ } ++ ++ return err; ++} ++ ++static void ixgbe_get_pauseparam(struct net_device *netdev, ++ struct ethtool_pauseparam *pause) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ /* ++ * Flow Control Autoneg isn't on if ++ * - we didn't ask for it OR ++ * - it failed, we know this by tx & rx being off ++ */ ++ if (hw->fc.disable_fc_autoneg || (hw->fc.current_mode == ixgbe_fc_none)) ++ pause->autoneg = 0; ++ else ++ pause->autoneg = 1; ++ ++#ifdef CONFIG_DCB ++ if (hw->fc.current_mode == ixgbe_fc_pfc) { ++ pause->rx_pause = 0; ++ pause->tx_pause = 0; ++ return; ++ } ++#endif ++ ++ if (hw->fc.current_mode == ixgbe_fc_rx_pause) { ++ pause->rx_pause = 1; ++ } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { ++ pause->tx_pause = 1; ++ } else if (hw->fc.current_mode == ixgbe_fc_full) { ++ pause->rx_pause = 1; ++ pause->tx_pause = 1; ++ } ++} ++ ++static int ixgbe_set_pauseparam(struct net_device *netdev, ++ struct ethtool_pauseparam *pause) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct ixgbe_fc_info fc; ++ ++ if (adapter->dcb_cfg.pfc_mode_enable || ++ ((hw->mac.type == ixgbe_mac_82598EB) && ++ (adapter->flags & IXGBE_FLAG_DCB_ENABLED))) ++ return -EINVAL; ++ ++ fc = hw->fc; ++ ++ if (pause->autoneg != AUTONEG_ENABLE) ++ fc.disable_fc_autoneg = true; ++ else ++ fc.disable_fc_autoneg = false; ++ ++ if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) ++ fc.requested_mode = ixgbe_fc_full; ++ else if (pause->rx_pause && !pause->tx_pause) ++ fc.requested_mode = ixgbe_fc_rx_pause; ++ else if (!pause->rx_pause && pause->tx_pause) ++ fc.requested_mode = ixgbe_fc_tx_pause; ++ else if (!pause->rx_pause && !pause->tx_pause) ++ fc.requested_mode = ixgbe_fc_none; ++ else ++ return -EINVAL; ++ ++ adapter->last_lfc_mode = fc.requested_mode; ++ ++ /* if the thing changed then we'll update and use new autoneg */ ++ if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { ++ hw->fc = fc; ++ if (netif_running(netdev)) ++ ixgbe_reinit_locked(adapter); ++ else ++ ixgbe_reset(adapter); ++ } ++ ++ return 0; ++} ++ ++static u32 ixgbe_get_rx_csum(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED); ++} ++ ++static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ if (data) ++ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; ++ else ++ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; ++ ++ return 0; ++} ++ ++static u32 ixgbe_get_tx_csum(struct net_device *netdev) ++{ ++ return (netdev->features & NETIF_F_IP_CSUM) != 0; ++} ++ ++static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) ++{ ++ if (data) ++#ifdef NETIF_F_IPV6_CSUM ++ netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); ++ else ++ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); ++#else ++ netdev->features |= NETIF_F_IP_CSUM; ++ else ++ netdev->features &= ~NETIF_F_IP_CSUM; ++#endif ++ ++ return 0; ++} ++ ++#ifdef NETIF_F_TSO ++static int ixgbe_set_tso(struct net_device *netdev, u32 data) ++{ ++#ifndef HAVE_NETDEV_VLAN_FEATURES ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++ if (data) { ++ netdev->features |= NETIF_F_TSO; ++#ifdef NETIF_F_TSO6 ++ netdev->features |= NETIF_F_TSO6; ++#endif ++ } else { ++ netdev->features &= ~NETIF_F_TSO; ++#ifdef NETIF_F_TSO6 ++ netdev->features &= ~NETIF_F_TSO6; ++#endif ++#ifndef HAVE_NETDEV_VLAN_FEATURES ++#ifdef NETIF_F_HW_VLAN_TX ++ /* disable TSO on all VLANs if they're present */ ++ if (adapter->vlgrp) { ++ int i; ++ struct net_device *v_netdev; ++ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { ++ v_netdev = ++ vlan_group_get_device(adapter->vlgrp, i); ++ if (v_netdev) { ++ v_netdev->features &= ~NETIF_F_TSO; ++#ifdef NETIF_F_TSO6 ++ v_netdev->features &= ~NETIF_F_TSO6; ++#endif ++ vlan_group_set_device(adapter->vlgrp, i, ++ v_netdev); ++ } ++ } ++ } ++#endif ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++ } ++ return 0; ++} ++#endif /* NETIF_F_TSO */ ++ ++static u32 ixgbe_get_msglevel(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ return adapter->msg_enable; ++} ++ ++static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ adapter->msg_enable = data; ++} ++ ++static int ixgbe_get_regs_len(struct net_device *netdev) ++{ ++#define IXGBE_REGS_LEN 1128 ++ return IXGBE_REGS_LEN * sizeof(u32); ++} ++ ++#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ ++ ++ ++static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, ++ void *p) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 *regs_buff = p; ++ u8 i; ++ ++ memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); ++ ++ regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; ++ ++ /* General Registers */ ++ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); ++ regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); ++ regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ++ regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); ++ regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); ++ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); ++ regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); ++ ++ /* NVM Register */ ++ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); ++ regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); ++ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); ++ regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); ++ regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); ++ regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); ++ regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); ++ regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); ++ regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); ++ regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); ++ ++ /* Interrupt */ ++ /* don't read EICR because it can clear interrupt causes, instead ++ * read EICS which is a shadow but doesn't clear EICR */ ++ regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); ++ regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); ++ regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); ++ regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); ++ regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); ++ regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); ++ regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); ++ regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); ++ regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); ++ regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); ++ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); ++ regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); ++ ++ /* Flow Control */ ++ regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); ++ regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); ++ regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); ++ regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); ++ regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); ++ for (i = 0; i < 8; i++) ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); ++ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); ++ break; ++ case ixgbe_mac_82599EB: ++ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); ++ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); ++ break; ++ default: ++ break; ++ } ++ regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); ++ regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); ++ ++ /* Receive DMA */ ++ for (i = 0; i < 64; i++) ++ regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); ++ for (i = 0; i < 64; i++) ++ regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); ++ for (i = 0; i < 64; i++) ++ regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); ++ for (i = 0; i < 64; i++) ++ regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); ++ for (i = 0; i < 64; i++) ++ regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); ++ for (i = 0; i < 64; i++) ++ regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); ++ for (i = 0; i < 16; i++) ++ regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); ++ for (i = 0; i < 16; i++) ++ regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); ++ regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); ++ for (i = 0; i < 8; i++) ++ regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); ++ regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); ++ regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); ++ ++ /* Receive */ ++ regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); ++ regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); ++ for (i = 0; i < 16; i++) ++ regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); ++ for (i = 0; i < 16; i++) ++ regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); ++ regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); ++ regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++ regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ++ regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); ++ regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); ++ regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); ++ for (i = 0; i < 8; i++) ++ regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); ++ for (i = 0; i < 8; i++) ++ regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); ++ regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); ++ ++ /* Transmit */ ++ for (i = 0; i < 32; i++) ++ regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); ++ for (i = 0; i < 32; i++) ++ regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); ++ for (i = 0; i < 32; i++) ++ regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); ++ for (i = 0; i < 32; i++) ++ regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); ++ for (i = 0; i < 32; i++) ++ regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); ++ for (i = 0; i < 32; i++) ++ regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); ++ for (i = 0; i < 32; i++) ++ regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); ++ for (i = 0; i < 32; i++) ++ regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); ++ regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); ++ for (i = 0; i < 16; i++) ++ regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); ++ regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); ++ for (i = 0; i < 8; i++) ++ regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); ++ regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); ++ ++ /* Wake Up */ ++ regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); ++ regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); ++ regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); ++ regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); ++ regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); ++ regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); ++ regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); ++ regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); ++ regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); ++ ++ /* DCB */ ++ regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); ++ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); ++ regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); ++ regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); ++ for (i = 0; i < 8; i++) ++ regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); ++ for (i = 0; i < 8; i++) ++ regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); ++ for (i = 0; i < 8; i++) ++ regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); ++ for (i = 0; i < 8; i++) ++ regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); ++ for (i = 0; i < 8; i++) ++ regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); ++ for (i = 0; i < 8; i++) ++ regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); ++ ++ /* Statistics */ ++ regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); ++ regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); ++ regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); ++ regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); ++ for (i = 0; i < 8; i++) ++ regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); ++ regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); ++ regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); ++ regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); ++ regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); ++ regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); ++ regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); ++ regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); ++ for (i = 0; i < 8; i++) ++ regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); ++ for (i = 0; i < 8; i++) ++ regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); ++ for (i = 0; i < 8; i++) ++ regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); ++ for (i = 0; i < 8; i++) ++ regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); ++ regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); ++ regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); ++ regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); ++ regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); ++ regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); ++ regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); ++ regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); ++ regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); ++ regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); ++ regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); ++ regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); ++ regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); ++ for (i = 0; i < 8; i++) ++ regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); ++ regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); ++ regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); ++ regs_buff[956] = IXGBE_GET_STAT(adapter, roc); ++ regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); ++ regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); ++ regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); ++ regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); ++ regs_buff[961] = IXGBE_GET_STAT(adapter, tor); ++ regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); ++ regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); ++ regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); ++ regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); ++ regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); ++ regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); ++ regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); ++ regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); ++ regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); ++ regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); ++ regs_buff[973] = IXGBE_GET_STAT(adapter, xec); ++ for (i = 0; i < 16; i++) ++ regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); ++ for (i = 0; i < 16; i++) ++ regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); ++ for (i = 0; i < 16; i++) ++ regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); ++ for (i = 0; i < 16; i++) ++ regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); ++ ++ /* MAC */ ++ regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); ++ regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); ++ regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); ++ regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); ++ regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); ++ regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); ++ regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); ++ regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); ++ regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); ++ regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); ++ regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); ++ regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); ++ regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); ++ regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); ++ regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); ++ regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); ++ regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); ++ regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); ++ regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); ++ regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); ++ regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); ++ regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); ++ regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); ++ regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); ++ regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); ++ regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); ++ regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); ++ regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); ++ regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); ++ regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); ++ regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); ++ regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); ++ ++ /* Diagnostic */ ++ regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); ++ for (i = 0; i < 8; i++) ++ regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); ++ regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); ++ for (i = 0; i < 4; i++) ++ regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); ++ regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); ++ regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); ++ for (i = 0; i < 8; i++) ++ regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); ++ regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); ++ for (i = 0; i < 4; i++) ++ regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); ++ regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); ++ regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); ++ regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); ++ regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); ++ regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); ++ regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); ++ regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); ++ regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); ++ regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); ++ regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); ++ regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); ++ for (i = 0; i < 8; i++) ++ regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); ++ regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); ++ regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); ++ regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); ++ regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); ++ regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); ++ regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); ++ regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); ++ regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); ++ regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); ++} ++ ++static int ixgbe_get_eeprom_len(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ return adapter->hw.eeprom.word_size * 2; ++} ++ ++static int ixgbe_get_eeprom(struct net_device *netdev, ++ struct ethtool_eeprom *eeprom, u8 *bytes) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u16 *eeprom_buff; ++ int first_word, last_word, eeprom_len; ++ int ret_val = 0; ++ u16 i; ++ ++ if (eeprom->len == 0) ++ return -EINVAL; ++ ++ eeprom->magic = hw->vendor_id | (hw->device_id << 16); ++ ++ first_word = eeprom->offset >> 1; ++ last_word = (eeprom->offset + eeprom->len - 1) >> 1; ++ eeprom_len = last_word - first_word + 1; ++ ++ eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); ++ if (!eeprom_buff) ++ return -ENOMEM; ++ ++ for (i = 0; i < eeprom_len; i++) { ++ if ((ret_val = ixgbe_read_eeprom(hw, first_word + i, ++ &eeprom_buff[i]))) ++ break; ++ } ++ ++ /* Device's eeprom is always little-endian, word addressable */ ++ for (i = 0; i < eeprom_len; i++) ++ le16_to_cpus(&eeprom_buff[i]); ++ ++ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); ++ kfree(eeprom_buff); ++ ++ return ret_val; ++} ++ ++static int ixgbe_set_eeprom(struct net_device *netdev, ++ struct ethtool_eeprom *eeprom, u8 *bytes) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u16 *eeprom_buff; ++ void *ptr; ++ int max_len, first_word, last_word, ret_val = 0; ++ u16 i; ++ ++ if (eeprom->len == 0) ++ return -EOPNOTSUPP; ++ ++ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) ++ return -EFAULT; ++ ++ max_len = hw->eeprom.word_size * 2; ++ ++ first_word = eeprom->offset >> 1; ++ last_word = (eeprom->offset + eeprom->len - 1) >> 1; ++ eeprom_buff = kmalloc(max_len, GFP_KERNEL); ++ if (!eeprom_buff) ++ return -ENOMEM; ++ ++ ptr = (void *)eeprom_buff; ++ ++ if (eeprom->offset & 1) { ++ /* need read/modify/write of first changed EEPROM word */ ++ /* only the second byte of the word is being modified */ ++ ret_val = ixgbe_read_eeprom(hw, first_word, &eeprom_buff[0]); ++ ptr++; ++ } ++ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { ++ /* need read/modify/write of last changed EEPROM word */ ++ /* only the first byte of the word is being modified */ ++ ret_val = ixgbe_read_eeprom(hw, last_word, ++ &eeprom_buff[last_word - first_word]); ++ } ++ ++ /* Device's eeprom is always little-endian, word addressable */ ++ for (i = 0; i < last_word - first_word + 1; i++) ++ le16_to_cpus(&eeprom_buff[i]); ++ ++ memcpy(ptr, bytes, eeprom->len); ++ ++ for (i = 0; i <= (last_word - first_word); i++) ++ ret_val |= ixgbe_write_eeprom(hw, first_word + i, eeprom_buff[i]); ++ ++ /* Update the checksum */ ++ ixgbe_update_eeprom_checksum(hw); ++ ++ kfree(eeprom_buff); ++ return ret_val; ++} ++ ++static void ixgbe_get_drvinfo(struct net_device *netdev, ++ struct ethtool_drvinfo *drvinfo) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ char firmware_version[32]; ++ ++ strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); ++ strncpy(drvinfo->version, ixgbe_driver_version, ++ sizeof(drvinfo->version)); ++ ++ snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", ++ (adapter->eeprom_version & 0xF000) >> 12, ++ (adapter->eeprom_version & 0x0FF0) >> 4, ++ adapter->eeprom_version & 0x000F); ++ ++ strncpy(drvinfo->fw_version, firmware_version, ++ sizeof(drvinfo->fw_version)); ++ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), ++ sizeof(drvinfo->bus_info)); ++ drvinfo->n_stats = IXGBE_STATS_LEN; ++ drvinfo->testinfo_len = IXGBE_TEST_LEN; ++ drvinfo->regdump_len = ixgbe_get_regs_len(netdev); ++} ++ ++static void ixgbe_get_ringparam(struct net_device *netdev, ++ struct ethtool_ringparam *ring) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; ++ struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; ++ ++ ring->rx_max_pending = IXGBE_MAX_RXD; ++ ring->tx_max_pending = IXGBE_MAX_TXD; ++ ring->rx_mini_max_pending = 0; ++ ring->rx_jumbo_max_pending = 0; ++ ring->rx_pending = rx_ring->count; ++ ring->tx_pending = tx_ring->count; ++ ring->rx_mini_pending = 0; ++ ring->rx_jumbo_pending = 0; ++} ++ ++static int ixgbe_set_ringparam(struct net_device *netdev, ++ struct ethtool_ringparam *ring) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; ++ int i, err = 0; ++ u32 new_rx_count, new_tx_count; ++ bool need_update = false; ++ ++ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) ++ return -EINVAL; ++ ++ new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD); ++ new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD); ++ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); ++ ++ new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD); ++ new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); ++ new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); ++ ++ if ((new_tx_count == adapter->tx_ring[0]->count) && ++ (new_rx_count == adapter->rx_ring[0]->count)) { ++ /* nothing to do */ ++ return 0; ++ } ++ ++ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) ++ msleep(1); ++ ++ if (!netif_running(adapter->netdev)) { ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ adapter->tx_ring[i]->count = new_tx_count; ++ for (i = 0; i < adapter->num_rx_queues; i++) ++ adapter->rx_ring[i]->count = new_rx_count; ++ adapter->tx_ring_count = new_tx_count; ++ adapter->rx_ring_count = new_rx_count; ++ goto clear_reset; ++ } ++ ++ temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); ++ if (!temp_tx_ring) { ++ err = -ENOMEM; ++ goto clear_reset; ++ } ++ ++ if (new_tx_count != adapter->tx_ring_count) { ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ memcpy(&temp_tx_ring[i], adapter->tx_ring[i], ++ sizeof(struct ixgbe_ring)); ++ temp_tx_ring[i].count = new_tx_count; ++ err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); ++ if (err) { ++ while (i) { ++ i--; ++ ixgbe_free_tx_resources(&temp_tx_ring[i]); ++ } ++ goto clear_reset; ++ } ++ } ++ need_update = true; ++ } ++ ++ temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); ++ if (!temp_rx_ring) { ++ err = -ENOMEM; ++ goto err_setup; ++ } ++ ++ if (new_rx_count != adapter->rx_ring_count) { ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ memcpy(&temp_rx_ring[i], adapter->rx_ring[i], ++ sizeof(struct ixgbe_ring)); ++ temp_rx_ring[i].count = new_rx_count; ++ err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); ++ if (err) { ++ while (i) { ++ i--; ++ ixgbe_free_rx_resources(&temp_rx_ring[i]); ++ } ++ goto err_setup; ++ } ++ } ++ need_update = true; ++ } ++ ++ /* if rings need to be updated, here's the place to do it in one shot */ ++ if (need_update) { ++ ixgbe_down(adapter); ++ ++ /* tx */ ++ if (new_tx_count != adapter->tx_ring_count) { ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ ixgbe_free_tx_resources(adapter->tx_ring[i]); ++ memcpy(adapter->tx_ring[i], &temp_tx_ring[i], ++ sizeof(struct ixgbe_ring)); ++ } ++ adapter->tx_ring_count = new_tx_count; ++ } ++ ++ /* rx */ ++ if (new_rx_count != adapter->rx_ring_count) { ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ ixgbe_free_rx_resources(adapter->rx_ring[i]); ++ memcpy(adapter->rx_ring[i], &temp_rx_ring[i], ++ sizeof(struct ixgbe_ring)); ++ } ++ adapter->rx_ring_count = new_rx_count; ++ } ++ ixgbe_up(adapter); ++ } ++ ++ vfree(temp_rx_ring); ++err_setup: ++ vfree(temp_tx_ring); ++clear_reset: ++ clear_bit(__IXGBE_RESETTING, &adapter->state); ++ return err; ++} ++ ++#ifndef HAVE_ETHTOOL_GET_SSET_COUNT ++static int ixgbe_get_stats_count(struct net_device *netdev) ++{ ++ return IXGBE_STATS_LEN; ++} ++ ++#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ ++static int ixgbe_get_sset_count(struct net_device *netdev, int sset) ++{ ++ switch (sset) { ++ case ETH_SS_TEST: ++ return IXGBE_TEST_LEN; ++ case ETH_SS_STATS: ++ return IXGBE_STATS_LEN; ++#ifdef NETIF_F_NTUPLE ++ case ETH_SS_NTUPLE_FILTERS: ++ return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY * ++ ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY); ++#endif /* NETIF_F_NTUPLE */ ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ ++static void ixgbe_get_ethtool_stats(struct net_device *netdev, ++ struct ethtool_stats *stats, u64 *data) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ u64 *queue_stat; ++ int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); ++ int j, k; ++ int i; ++ ++ ixgbe_update_stats(adapter); ++ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { ++ char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset; ++ data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == ++ sizeof(u64)) ? *(u64 *)p : *(u32 *)p; ++ } ++ for (j = 0; j < adapter->num_tx_queues; j++) { ++ queue_stat = (u64 *)&adapter->tx_ring[j]->stats; ++ for (k = 0; k < stat_count; k++) ++ data[i + k] = queue_stat[k]; ++ i += k; ++ } ++ for (j = 0; j < adapter->num_rx_queues; j++) { ++ queue_stat = (u64 *)&adapter->rx_ring[j]->stats; ++ for (k = 0; k < stat_count; k++) ++ data[i + k] = queue_stat[k]; ++ i += k; ++ } ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { ++ data[i++] = adapter->stats.pxontxc[j]; ++ data[i++] = adapter->stats.pxofftxc[j]; ++ } ++ for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) { ++ data[i++] = adapter->stats.pxonrxc[j]; ++ data[i++] = adapter->stats.pxoffrxc[j]; ++ } ++ } ++ stat_count = sizeof(struct vf_stats) / sizeof(u64); ++ for(j = 0; j < adapter->num_vfs; j++) { ++ queue_stat = (u64 *)&adapter->vfinfo[j].vfstats; ++ for (k = 0; k < stat_count; k++) { ++ data[i + k] = queue_stat[k]; ++ } ++ queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats; ++ for (k = 0; k < stat_count; k++) { ++ data[i + k] += queue_stat[k]; ++ } ++ i += k; ++ } ++} ++ ++static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, ++ u8 *data) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ char *p = (char *)data; ++ int i; ++ ++ switch (stringset) { ++ case ETH_SS_TEST: ++ memcpy(data, *ixgbe_gstrings_test, ++ IXGBE_TEST_LEN * ETH_GSTRING_LEN); ++ break; ++ case ETH_SS_STATS: ++ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { ++ memcpy(p, ixgbe_gstrings_stats[i].stat_string, ++ ETH_GSTRING_LEN); ++ p += ETH_GSTRING_LEN; ++ } ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ sprintf(p, "tx_queue_%u_packets", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "tx_queue_%u_bytes", i); ++ p += ETH_GSTRING_LEN; ++ } ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ sprintf(p, "rx_queue_%u_packets", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "rx_queue_%u_bytes", i); ++ p += ETH_GSTRING_LEN; ++ } ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { ++ sprintf(p, "tx_pb_%u_pxon", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "tx_pb_%u_pxoff", i); ++ p += ETH_GSTRING_LEN; ++ } ++ for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) { ++ sprintf(p, "rx_pb_%u_pxon", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "rx_pb_%u_pxoff", i); ++ p += ETH_GSTRING_LEN; ++ } ++ } ++ for (i = 0; i < adapter->num_vfs; i++) { ++ sprintf(p, "VF %d Rx Packets", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "VF %d Rx Bytes", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "VF %d Tx Packets", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "VF %d Tx Bytes", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "VF %d MC Packets", i); ++ p += ETH_GSTRING_LEN; ++ } ++ /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ ++ break; ++ } ++} ++ ++static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ bool link_up; ++ u32 link_speed = 0; ++ *data = 0; ++ ++ hw->mac.ops.check_link(hw, &link_speed, &link_up, true); ++ if (link_up) ++ return *data; ++ else ++ *data = 1; ++ return *data; ++} ++ ++/* ethtool register test data */ ++struct ixgbe_reg_test { ++ u16 reg; ++ u8 array_len; ++ u8 test_type; ++ u32 mask; ++ u32 write; ++}; ++ ++/* In the hardware, registers are laid out either singly, in arrays ++ * spaced 0x40 bytes apart, or in contiguous tables. We assume ++ * most tests take place on arrays or single registers (handled ++ * as a single-element array) and special-case the tables. ++ * Table tests are always pattern tests. ++ * ++ * We also make provision for some required setup steps by specifying ++ * registers to be written without any read-back testing. ++ */ ++ ++#define PATTERN_TEST 1 ++#define SET_READ_TEST 2 ++#define WRITE_NO_TEST 3 ++#define TABLE32_TEST 4 ++#define TABLE64_TEST_LO 5 ++#define TABLE64_TEST_HI 6 ++ ++/* default 82599 register test */ ++static struct ixgbe_reg_test reg_test_82599[] = { ++ { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, ++ { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, ++ { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, ++ { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, ++ { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, ++ { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, ++ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, ++ { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, ++ { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, ++ { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, ++ { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++/* default 82598 register test */ ++static struct ixgbe_reg_test reg_test_82598[] = { ++ { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, ++ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, ++ { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, ++ { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ /* Enable all four RX queues before testing. */ ++ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, ++ /* RDH is read-only for 82598, only test RDT. */ ++ { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, ++ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, ++ { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, ++ { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, ++ { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, ++ { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, ++ { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++#define REG_PATTERN_TEST(R, M, W) \ ++{ \ ++ u32 pat, val, before; \ ++ const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ ++ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \ ++ before = readl(adapter->hw.hw_addr + R); \ ++ writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ ++ val = readl(adapter->hw.hw_addr + R); \ ++ if (val != (_test[pat] & W & M)) { \ ++ DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\ ++ "0x%08X expected 0x%08X\n", \ ++ R, val, (_test[pat] & W & M)); \ ++ *data = R; \ ++ writel(before, adapter->hw.hw_addr + R); \ ++ return 1; \ ++ } \ ++ writel(before, adapter->hw.hw_addr + R); \ ++ } \ ++} ++ ++#define REG_SET_AND_CHECK(R, M, W) \ ++{ \ ++ u32 val, before; \ ++ before = readl(adapter->hw.hw_addr + R); \ ++ writel((W & M), (adapter->hw.hw_addr + R)); \ ++ val = readl(adapter->hw.hw_addr + R); \ ++ if ((W & M) != (val & M)) { \ ++ DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ ++ "expected 0x%08X\n", R, (val & M), (W & M)); \ ++ *data = R; \ ++ writel(before, (adapter->hw.hw_addr + R)); \ ++ return 1; \ ++ } \ ++ writel(before, (adapter->hw.hw_addr + R)); \ ++} ++ ++static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) ++{ ++ struct ixgbe_reg_test *test; ++ u32 value, before, after; ++ u32 i, toggle; ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ toggle = 0x7FFFF3FF; ++ test = reg_test_82598; ++ break; ++ case ixgbe_mac_82599EB: ++ toggle = 0x7FFFF30F; ++ test = reg_test_82599; ++ break; ++ default: ++ *data = 1; ++ return 1; ++ break; ++ } ++ ++ /* ++ * Because the status register is such a special case, ++ * we handle it separately from the rest of the register ++ * tests. Some bits are read-only, some toggle, and some ++ * are writeable on newer MACs. ++ */ ++ before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); ++ value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); ++ after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; ++ if (value != after) { ++ DPRINTK(DRV, ERR, "failed STATUS register test got: " ++ "0x%08X expected: 0x%08X\n", after, value); ++ *data = 1; ++ return 1; ++ } ++ /* restore previous status */ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); ++ ++ /* ++ * Perform the remainder of the register test, looping through ++ * the test table until we either fail or reach the null entry. ++ */ ++ while (test->reg) { ++ for (i = 0; i < test->array_len; i++) { ++ switch (test->test_type) { ++ case PATTERN_TEST: ++ REG_PATTERN_TEST(test->reg + (i * 0x40), ++ test->mask, ++ test->write); ++ break; ++ case SET_READ_TEST: ++ REG_SET_AND_CHECK(test->reg + (i * 0x40), ++ test->mask, ++ test->write); ++ break; ++ case WRITE_NO_TEST: ++ writel(test->write, ++ (adapter->hw.hw_addr + test->reg) ++ + (i * 0x40)); ++ break; ++ case TABLE32_TEST: ++ REG_PATTERN_TEST(test->reg + (i * 4), ++ test->mask, ++ test->write); ++ break; ++ case TABLE64_TEST_LO: ++ REG_PATTERN_TEST(test->reg + (i * 8), ++ test->mask, ++ test->write); ++ break; ++ case TABLE64_TEST_HI: ++ REG_PATTERN_TEST((test->reg + 4) + (i * 8), ++ test->mask, ++ test->write); ++ break; ++ } ++ } ++ test++; ++ } ++ ++ *data = 0; ++ return 0; ++} ++ ++static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) ++{ ++ if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL)) ++ *data = 1; ++ else ++ *data = 0; ++ return *data; ++} ++ ++static irqreturn_t ixgbe_test_intr(int irq, void *data) ++{ ++ struct net_device *netdev = (struct net_device *) data; ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); ++ ++ return IRQ_HANDLED; ++} ++ ++static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) ++{ ++ struct net_device *netdev = adapter->netdev; ++ u32 mask, i = 0, shared_int = true; ++ u32 irq = adapter->pdev->irq; ++ ++ *data = 0; ++ ++ /* Hook up test interrupt handler just for this test */ ++ if (adapter->msix_entries) { ++ /* NOTE: we don't test MSI-X interrupts here, yet */ ++ return 0; ++ } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { ++ shared_int = false; ++ if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name, ++ netdev)) { ++ *data = 1; ++ return -1; ++ } ++ } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED, ++ netdev->name, netdev)) { ++ shared_int = false; ++ } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED, ++ netdev->name, netdev)) { ++ *data = 1; ++ return -1; ++ } ++ DPRINTK(HW, INFO, "testing %s interrupt\n", ++ (shared_int ? "shared" : "unshared")); ++ ++ /* Disable all the interrupts */ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); ++ msleep(10); ++ ++ /* Test each interrupt */ ++ for (; i < 10; i++) { ++ /* Interrupt to test */ ++ mask = 1 << i; ++ ++ if (!shared_int) { ++ /* ++ * Disable the interrupts to be reported in ++ * the cause register and then force the same ++ * interrupt and see if one gets posted. If ++ * an interrupt was posted to the bus, the ++ * test failed. ++ */ ++ adapter->test_icr = 0; ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ++ ~mask & 0x00007FFF); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, ++ ~mask & 0x00007FFF); ++ msleep(10); ++ ++ if (adapter->test_icr & mask) { ++ *data = 3; ++ break; ++ } ++ } ++ ++ /* ++ * Enable the interrupt to be reported in the cause ++ * register and then force the same interrupt and see ++ * if one gets posted. If an interrupt was not posted ++ * to the bus, the test failed. ++ */ ++ adapter->test_icr = 0; ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); ++ msleep(10); ++ ++ if (!(adapter->test_icr &mask)) { ++ *data = 4; ++ break; ++ } ++ ++ if (!shared_int) { ++ /* ++ * Disable the other interrupts to be reported in ++ * the cause register and then force the other ++ * interrupts and see if any get posted. If ++ * an interrupt was posted to the bus, the ++ * test failed. ++ */ ++ adapter->test_icr = 0; ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ++ ~mask & 0x00007FFF); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, ++ ~mask & 0x00007FFF); ++ msleep(10); ++ ++ if (adapter->test_icr) { ++ *data = 5; ++ break; ++ } ++ } ++ } ++ ++ /* Disable all the interrupts */ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); ++ msleep(10); ++ ++ /* Unhook test interrupt handler */ ++ free_irq(irq, netdev); ++ ++ return *data; ++} ++ ++static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; ++ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 reg_ctl; ++ ++ /* shut down the DMA engines now so they can be reinitialized later */ ++ ++ /* first Rx */ ++ reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); ++ reg_ctl &= ~IXGBE_RXCTRL_RXEN; ++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); ++ reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx)); ++ reg_ctl &= ~IXGBE_RXDCTL_ENABLE; ++ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl); ++ ++ /* now Tx */ ++ reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); ++ reg_ctl &= ~IXGBE_TXDCTL_ENABLE; ++ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82599EB: ++ reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); ++ reg_ctl &= ~IXGBE_DMATXCTL_TE; ++ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); ++ break; ++ default: ++ break; ++ } ++ ++ ixgbe_reset(adapter); ++ ++ ixgbe_free_tx_resources(&adapter->test_tx_ring); ++ ixgbe_free_rx_resources(&adapter->test_rx_ring); ++} ++ ++static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; ++ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; ++ u32 rctl, reg_data; ++ int ret_val; ++ int err; ++ ++ /* Setup Tx descriptor ring and Tx buffers */ ++ tx_ring->count = IXGBE_DEFAULT_TXD; ++ tx_ring->queue_index = 0; ++ tx_ring->dev = pci_dev_to_dev(adapter->pdev); ++ tx_ring->netdev = adapter->netdev; ++ tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; ++ tx_ring->numa_node = adapter->node; ++ ++ err = ixgbe_setup_tx_resources(tx_ring); ++ if (err) ++ return 1; ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); ++ reg_data |= IXGBE_DMATXCTL_TE; ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); ++ break; ++ default: ++ break; ++ } ++ ++ ixgbe_configure_tx_ring(adapter, tx_ring); ++ ++ /* Setup Rx Descriptor ring and Rx buffers */ ++ rx_ring->count = IXGBE_DEFAULT_RXD; ++ rx_ring->queue_index = 0; ++ rx_ring->dev = pci_dev_to_dev(adapter->pdev); ++ rx_ring->netdev = adapter->netdev; ++ rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; ++ rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; ++ rx_ring->numa_node = adapter->node; ++ ++ err = ixgbe_setup_rx_resources(rx_ring); ++ if (err) { ++ ret_val = 4; ++ goto err_nomem; ++ } ++ ++ rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); ++ ++ ixgbe_configure_rx_ring(adapter, rx_ring); ++ ++ rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); ++ ++ return 0; ++ ++err_nomem: ++ ixgbe_free_desc_rings(adapter); ++ return ret_val; ++} ++ ++static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 reg_data; ++ ++ /* right now we only support MAC loopback in the driver */ ++ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); ++ /* Setup MAC loopback */ ++ reg_data |= IXGBE_HLREG0_LPBK; ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); ++ ++ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); ++ reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data); ++ ++ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); ++ reg_data &= ~IXGBE_AUTOC_LMS_MASK; ++ reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); ++ IXGBE_WRITE_FLUSH(&adapter->hw); ++ msleep(10); ++ ++ /* Disable Atlas Tx lanes; re-enabled in reset path */ ++ if (hw->mac.type == ixgbe_mac_82598EB) { ++ u8 atlas; ++ ++ ixgbe_read_analog_reg8(&adapter->hw, ++ IXGBE_ATLAS_PDN_LPBK, &atlas); ++ atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; ++ ixgbe_write_analog_reg8(&adapter->hw, ++ IXGBE_ATLAS_PDN_LPBK, atlas); ++ ++ ixgbe_read_analog_reg8(&adapter->hw, ++ IXGBE_ATLAS_PDN_10G, &atlas); ++ atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; ++ ixgbe_write_analog_reg8(&adapter->hw, ++ IXGBE_ATLAS_PDN_10G, atlas); ++ ++ ixgbe_read_analog_reg8(&adapter->hw, ++ IXGBE_ATLAS_PDN_1G, &atlas); ++ atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; ++ ixgbe_write_analog_reg8(&adapter->hw, ++ IXGBE_ATLAS_PDN_1G, atlas); ++ ++ ixgbe_read_analog_reg8(&adapter->hw, ++ IXGBE_ATLAS_PDN_AN, &atlas); ++ atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; ++ ixgbe_write_analog_reg8(&adapter->hw, ++ IXGBE_ATLAS_PDN_AN, atlas); ++ } ++ ++ return 0; ++} ++ ++static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) ++{ ++ u32 reg_data; ++ ++ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); ++ reg_data &= ~IXGBE_HLREG0_LPBK; ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); ++} ++ ++static void ixgbe_create_lbtest_frame(struct sk_buff *skb, ++ unsigned int frame_size) ++{ ++ memset(skb->data, 0xFF, frame_size); ++ frame_size &= ~1; ++ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); ++ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); ++ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); ++} ++ ++static int ixgbe_check_lbtest_frame(struct sk_buff *skb, ++ unsigned int frame_size) ++{ ++ frame_size &= ~1; ++ if (*(skb->data + 3) == 0xFF) { ++ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && ++ (*(skb->data + frame_size / 2 + 12) == 0xAF)) { ++ return 0; ++ } ++ } ++ return 13; ++} ++ ++static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, ++ struct ixgbe_ring *tx_ring, ++ unsigned int size) ++{ ++ union ixgbe_adv_rx_desc *rx_desc; ++ struct ixgbe_rx_buffer *rx_buffer_info; ++ struct ixgbe_tx_buffer *tx_buffer_info; ++ const int bufsz = rx_ring->rx_buf_len; ++ u32 staterr; ++ u16 rx_ntc, tx_ntc, count = 0; ++ ++ /* initialize next to clean and descriptor values */ ++ rx_ntc = rx_ring->next_to_clean; ++ tx_ntc = tx_ring->next_to_clean; ++ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); ++ staterr = le32_to_cpu(rx_desc->wb.upper.status_error); ++ ++ while (staterr & IXGBE_RXD_STAT_DD) { ++ /* check Rx buffer */ ++ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; ++ ++ /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ ++ dma_unmap_single(rx_ring->dev, ++ rx_buffer_info->dma, ++ bufsz, ++ DMA_FROM_DEVICE); ++ rx_buffer_info->dma = 0; ++ ++ /* verify contents of skb */ ++ if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size)) ++ count++; ++ ++ /* unmap buffer on Tx side */ ++ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; ++ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); ++ ++ /* increment Rx/Tx next to clean counters */ ++ rx_ntc++; ++ if (rx_ntc == rx_ring->count) ++ rx_ntc = 0; ++ tx_ntc++; ++ if (tx_ntc == tx_ring->count) ++ tx_ntc = 0; ++ ++ /* fetch next descriptor */ ++ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); ++ staterr = le32_to_cpu(rx_desc->wb.upper.status_error); ++ } ++ ++ /* re-map buffers to ring, store next to clean values */ ++ ixgbe_alloc_rx_buffers(rx_ring, count); ++ rx_ring->next_to_clean = rx_ntc; ++ tx_ring->next_to_clean = tx_ntc; ++ ++ return count; ++} ++ ++static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; ++ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; ++ int i, j, lc, good_cnt, ret_val = 0; ++ unsigned int size = 1024; ++ netdev_tx_t tx_ret_val; ++ struct sk_buff *skb; ++ ++ /* allocate test skb */ ++ skb = alloc_skb(size, GFP_KERNEL); ++ if (!skb) ++ return 11; ++ ++ /* place data into test skb */ ++ ixgbe_create_lbtest_frame(skb, size); ++ skb_put(skb, size); ++ ++ /* ++ * Calculate the loop count based on the largest descriptor ring ++ * The idea is to wrap the largest ring a number of times using 64 ++ * send/receive pairs during each loop ++ */ ++ ++ if (rx_ring->count <= tx_ring->count) ++ lc = ((tx_ring->count / 64) * 2) + 1; ++ else ++ lc = ((rx_ring->count / 64) * 2) + 1; ++ ++ for (j = 0; j <= lc; j++) { ++ /* reset count of good packets */ ++ good_cnt = 0; ++ ++ /* place 64 packets on the transmit queue*/ ++ for (i = 0; i < 64; i++) { ++ skb_get(skb); ++ tx_ret_val = ixgbe_xmit_frame_ring(skb, ++ adapter, ++ tx_ring); ++ if (tx_ret_val == NETDEV_TX_OK) ++ good_cnt++; ++ } ++ ++ if (good_cnt != 64) { ++ ret_val = 12; ++ break; ++ } ++ ++ /* allow 200 milliseconds for packets to go from Tx to Rx */ ++ msleep(200); ++ ++ good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); ++ if (good_cnt != 64) { ++ ret_val = 13; ++ break; ++ } ++ } ++ ++ /* free the original skb */ ++ kfree_skb(skb); ++ ++ return ret_val; ++} ++ ++static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) ++{ ++ *data = ixgbe_setup_desc_rings(adapter); ++ if (*data) ++ goto out; ++ *data = ixgbe_setup_loopback_test(adapter); ++ if (*data) ++ goto err_loopback; ++ *data = ixgbe_run_loopback_test(adapter); ++ ixgbe_loopback_cleanup(adapter); ++ ++err_loopback: ++ ixgbe_free_desc_rings(adapter); ++out: ++ return *data; ++} ++ ++#ifndef HAVE_ETHTOOL_GET_SSET_COUNT ++static int ixgbe_diag_test_count(struct net_device *netdev) ++{ ++ return IXGBE_TEST_LEN; ++} ++ ++#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ ++static void ixgbe_diag_test(struct net_device *netdev, ++ struct ethtool_test *eth_test, u64 *data) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ bool if_running = netif_running(netdev); ++ ++ set_bit(__IXGBE_TESTING, &adapter->state); ++ if (eth_test->flags == ETH_TEST_FL_OFFLINE) { ++ /* Offline tests */ ++ ++ DPRINTK(HW, INFO, "offline testing starting\n"); ++ ++ /* Link test performed before hardware reset so autoneg doesn't ++ * interfere with test result */ ++ if (ixgbe_link_test(adapter, &data[4])) ++ eth_test->flags |= ETH_TEST_FL_FAILED; ++ ++ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { ++ int i; ++ for (i = 0; i < adapter->num_vfs; i++) { ++ if (adapter->vfinfo[i].clear_to_send) { ++ DPRINTK(DRV, WARNING, "Please take " ++ "active VFS offline and " ++ "restart the adapter before " ++ "running NIC diagnostics\n"); ++ data[0] = 1; ++ data[1] = 1; ++ data[2] = 1; ++ data[3] = 1; ++ clear_bit(__IXGBE_TESTING, ++ &adapter->state); ++ goto skip_ol_tests; ++ } ++ } ++ } ++ ++ if (if_running) ++ /* indicate we're in test mode */ ++ dev_close(netdev); ++ else ++ ixgbe_reset(adapter); ++ ++ DPRINTK(HW, INFO, "register testing starting\n"); ++ if (ixgbe_reg_test(adapter, &data[0])) ++ eth_test->flags |= ETH_TEST_FL_FAILED; ++ ++ ixgbe_reset(adapter); ++ DPRINTK(HW, INFO, "eeprom testing starting\n"); ++ if (ixgbe_eeprom_test(adapter, &data[1])) ++ eth_test->flags |= ETH_TEST_FL_FAILED; ++ ++ ixgbe_reset(adapter); ++ DPRINTK(HW, INFO, "interrupt testing starting\n"); ++ if (ixgbe_intr_test(adapter, &data[2])) ++ eth_test->flags |= ETH_TEST_FL_FAILED; ++ ++ /* If SRIOV or VMDq is enabled then skip MAC ++ * loopback diagnostic. */ ++ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | ++ IXGBE_FLAG_VMDQ_ENABLED)) { ++ DPRINTK(HW, INFO, "skip MAC loopback diagnostic in VT " ++ "mode\n"); ++ data[3] = 0; ++ goto skip_loopback; ++ } ++ ++ ixgbe_reset(adapter); ++ DPRINTK(HW, INFO, "loopback testing starting\n"); ++ if (ixgbe_loopback_test(adapter, &data[3])) ++ eth_test->flags |= ETH_TEST_FL_FAILED; ++ ++skip_loopback: ++ ixgbe_reset(adapter); ++ ++ clear_bit(__IXGBE_TESTING, &adapter->state); ++ if (if_running) ++ dev_open(netdev); ++ } else { ++ DPRINTK(HW, INFO, "online testing starting\n"); ++ /* Online tests */ ++ if (ixgbe_link_test(adapter, &data[4])) ++ eth_test->flags |= ETH_TEST_FL_FAILED; ++ ++ /* Online tests aren't run; pass by default */ ++ data[0] = 0; ++ data[1] = 0; ++ data[2] = 0; ++ data[3] = 0; ++ ++ clear_bit(__IXGBE_TESTING, &adapter->state); ++ } ++skip_ol_tests: ++ msleep_interruptible(4 * 1000); ++} ++ ++static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, ++ struct ethtool_wolinfo *wol) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int retval = 1; ++ ++ switch(hw->device_id) { ++ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: ++ /* All except this subdevice support WOL */ ++ if (hw->subsystem_device_id == ++ IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { ++ wol->supported = 0; ++ break; ++ } ++ case IXGBE_DEV_ID_82599_KX4: ++ retval = 0; ++ break; ++ default: ++ wol->supported = 0; ++ } ++ ++ return retval; ++} ++ ++static void ixgbe_get_wol(struct net_device *netdev, ++ struct ethtool_wolinfo *wol) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ wol->supported = WAKE_UCAST | WAKE_MCAST | ++ WAKE_BCAST | WAKE_MAGIC; ++ wol->wolopts = 0; ++ ++ if (ixgbe_wol_exclusion(adapter, wol) || ++ !device_can_wakeup(&adapter->pdev->dev)) ++ return; ++ ++ if (adapter->wol & IXGBE_WUFC_EX) ++ wol->wolopts |= WAKE_UCAST; ++ if (adapter->wol & IXGBE_WUFC_MC) ++ wol->wolopts |= WAKE_MCAST; ++ if (adapter->wol & IXGBE_WUFC_BC) ++ wol->wolopts |= WAKE_BCAST; ++ if (adapter->wol & IXGBE_WUFC_MAG) ++ wol->wolopts |= WAKE_MAGIC; ++} ++ ++static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) ++ return -EOPNOTSUPP; ++ ++ if (ixgbe_wol_exclusion(adapter, wol)) ++ return wol->wolopts ? -EOPNOTSUPP : 0; ++ ++ adapter->wol = 0; ++ ++ if (wol->wolopts & WAKE_UCAST) ++ adapter->wol |= IXGBE_WUFC_EX; ++ if (wol->wolopts & WAKE_MCAST) ++ adapter->wol |= IXGBE_WUFC_MC; ++ if (wol->wolopts & WAKE_BCAST) ++ adapter->wol |= IXGBE_WUFC_BC; ++ if (wol->wolopts & WAKE_MAGIC) ++ adapter->wol |= IXGBE_WUFC_MAG; ++ ++ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); ++ ++ return 0; ++} ++ ++static int ixgbe_nway_reset(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ if (netif_running(netdev)) ++ ixgbe_reinit_locked(adapter); ++ ++ return 0; ++} ++ ++static int ixgbe_phys_id(struct net_device *netdev, u32 data) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL); ++ u32 i; ++ ++ if (!data || data > 300) ++ data = 300; ++ ++ for (i = 0; i < (data * 1000); i += 400) { ++ ixgbe_led_on(&adapter->hw, IXGBE_LED_ON); ++ msleep_interruptible(200); ++ ixgbe_led_off(&adapter->hw, IXGBE_LED_ON); ++ msleep_interruptible(200); ++ } ++ ++ /* Restore LED settings */ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg); ++ ++ return 0; ++} ++ ++static int ixgbe_get_coalesce(struct net_device *netdev, ++ struct ethtool_coalesce *ec) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit; ++#ifndef CONFIG_IXGBE_NAPI ++ ec->rx_max_coalesced_frames_irq = adapter->rx_ring[0]->work_limit; ++#endif ++ ++ /* only valid if in constant ITR mode */ ++ switch (adapter->rx_itr_setting) { ++ case 0: ++ /* throttling disabled */ ++ ec->rx_coalesce_usecs = 0; ++ break; ++ case 1: ++ /* dynamic ITR mode */ ++ ec->rx_coalesce_usecs = 1; ++ break; ++ default: ++ /* fixed interrupt rate mode */ ++ ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param; ++ break; ++ } ++ ++ /* if in mixed tx/rx queues per vector mode, report only rx settings */ ++ if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count) ++ return 0; ++ ++ /* only valid if in constant ITR mode */ ++ switch (adapter->tx_itr_setting) { ++ case 0: ++ /* throttling disabled */ ++ ec->tx_coalesce_usecs = 0; ++ break; ++ case 1: ++ /* dynamic ITR mode */ ++ ec->tx_coalesce_usecs = 1; ++ break; ++ default: ++ ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param; ++ break; ++ } ++ ++ return 0; ++} ++ ++/* ++ * this function must be called before setting the new value of ++ * rx_itr_setting ++ */ ++static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter, ++ struct ethtool_coalesce *ec) ++{ ++ /* check the old value and enable RSC if necessary */ ++ if ((adapter->rx_itr_setting == 0) && ++ (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { ++ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; ++ adapter->netdev->features |= NETIF_F_LRO; ++ DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n", ++ ec->rx_coalesce_usecs); ++ return true; ++ } ++ return false; ++} ++ ++static int ixgbe_set_coalesce(struct net_device *netdev, ++ struct ethtool_coalesce *ec) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_q_vector *q_vector; ++ int i; ++ bool need_reset = false; ++ ++ /* don't accept tx specific changes if we've got mixed RxTx vectors */ ++ if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count ++ && ec->tx_coalesce_usecs) ++ return -EINVAL; ++ ++ if (ec->tx_max_coalesced_frames_irq) ++ adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; ++ ++#ifndef CONFIG_IXGBE_NAPI ++ if (ec->rx_max_coalesced_frames_irq) ++ adapter->rx_ring[0]->work_limit = ec->rx_max_coalesced_frames_irq; ++ ++#endif ++ if (ec->rx_coalesce_usecs > 1) { ++ u32 max_int; ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) ++ max_int = IXGBE_MAX_RSC_INT_RATE; ++ else ++ max_int = IXGBE_MAX_INT_RATE; ++ ++ /* check the limits */ ++ if ((1000000/ec->rx_coalesce_usecs > max_int) || ++ (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) ++ return -EINVAL; ++ ++ /* check the old value and enable RSC if necessary */ ++ need_reset = ixgbe_reenable_rsc(adapter, ec); ++ ++ /* store the value in ints/second */ ++ adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; ++ ++ /* static value of interrupt rate */ ++ adapter->rx_itr_setting = adapter->rx_eitr_param; ++ /* clear the lower bit as its used for dynamic state */ ++ adapter->rx_itr_setting &= ~1; ++ } else if (ec->rx_coalesce_usecs == 1) { ++ /* check the old value and enable RSC if necessary */ ++ need_reset = ixgbe_reenable_rsc(adapter, ec); ++ ++ /* 1 means dynamic mode */ ++ adapter->rx_eitr_param = 20000; ++ adapter->rx_itr_setting = 1; ++ } else { ++ /* ++ * any other value means disable eitr, which is best ++ * served by setting the interrupt rate very high ++ */ ++ adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; ++ adapter->rx_itr_setting = 0; ++ ++ /* ++ * if hardware RSC is enabled, disable it when ++ * setting low latency mode, to avoid errata, assuming ++ * that when the user set low latency mode they want ++ * it at the cost of anything else ++ */ ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { ++ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; ++ netdev->features &= ~NETIF_F_LRO; ++ DPRINTK(PROBE, INFO, ++ "rx-usecs set to 0, disabling RSC\n"); ++ need_reset = true; ++ } ++ } ++ ++ if (ec->tx_coalesce_usecs > 1) { ++ /* ++ * don't have to worry about max_int as above because ++ * tx vectors don't do hardware RSC (an rx function) ++ */ ++ /* check the limits */ ++ if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) || ++ (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE)) ++ return -EINVAL; ++ ++ /* store the value in ints/second */ ++ adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs; ++ ++ /* static value of interrupt rate */ ++ adapter->tx_itr_setting = adapter->tx_eitr_param; ++ ++ /* clear the lower bit as its used for dynamic state */ ++ adapter->tx_itr_setting &= ~1; ++ } else if (ec->tx_coalesce_usecs == 1) { ++ /* 1 means dynamic mode */ ++ adapter->tx_eitr_param = 10000; ++ adapter->tx_itr_setting = 1; ++ } else { ++ adapter->tx_eitr_param = IXGBE_MAX_INT_RATE; ++ adapter->tx_itr_setting = 0; ++ } ++ ++ /* MSI/MSIx Interrupt Mode */ ++ if (adapter->flags & ++ (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) { ++ int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ for (i = 0; i < num_vectors; i++) { ++ q_vector = adapter->q_vector[i]; ++ if (q_vector->txr_count && !q_vector->rxr_count) ++ /* tx only */ ++ q_vector->eitr = adapter->tx_eitr_param; ++ else ++ /* rx only or mixed */ ++ q_vector->eitr = adapter->rx_eitr_param; ++ ixgbe_write_eitr(q_vector); ++ } ++ /* Legacy Interrupt Mode */ ++ } else { ++ q_vector = adapter->q_vector[0]; ++ q_vector->eitr = adapter->rx_eitr_param; ++ ixgbe_write_eitr(q_vector); ++ } ++ ++ /* ++ * do reset here at the end to make sure EITR==0 case is handled ++ * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings ++ * also locks in RSC enable/disable which requires reset ++ */ ++ if (need_reset) { ++ if (netif_running(netdev)) ++ ixgbe_reinit_locked(adapter); ++ else ++ ixgbe_reset(adapter); ++ } ++ ++ return 0; ++} ++ ++#ifdef ETHTOOL_GFLAGS ++static int ixgbe_set_flags(struct net_device *netdev, u32 data) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ bool need_reset = false; ++ int rc; ++ ++ rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE); ++ if (rc) ++ return rc; ++ ++ /* if state changes we need to update adapter->flags and reset */ ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { ++ /* ++ * cast both to bool and verify if they are set the same ++ * but only enable RSC if itr is non-zero, as ++ * itr=0 and RSC are mutually exclusive ++ */ ++ if (((!!(data & ETH_FLAG_LRO)) != ++ (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) && ++ adapter->rx_itr_setting && ++ (adapter->rx_itr_setting <= IXGBE_MAX_RSC_INT_RATE)) { ++ adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ need_reset = true; ++ goto skip_lro; ++ default: ++ break; ++ } ++ } else if (!adapter->rx_itr_setting || ++ (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) { ++ netdev->features &= ~NETIF_F_LRO; ++ goto skip_lro; ++ } ++ } ++#ifndef IXGBE_NO_LRO ++ /* ++ * Cast both to bool and verify if they are set the same ++ * and don't set LRO if RSC enabled. ++ */ ++ if (((!!(data & ETH_FLAG_LRO)) != ++ (!!(adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED))) && ++ (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { ++ int i; ++ adapter->flags2 ^= IXGBE_FLAG2_SWLRO_ENABLED; ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ if (adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED) ++ set_ring_lro_enabled(adapter->rx_ring[i]); ++ else ++ clear_ring_lro_enabled(adapter->rx_ring[i]); ++ } ++ } ++ ++#endif /* IXGBE_NO_LRO */ ++skip_lro: ++ ++#ifdef NETIF_F_NTUPLE ++ /* ++ * Check if Flow Director n-tuple support was enabled or disabled. If ++ * the state changed, we need to reset. ++ */ ++ if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) && ++ (!(data & ETH_FLAG_NTUPLE))) { ++ /* turn off Flow Director perfect, set hash and reset */ ++ int i; ++ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ adapter->tx_ring[i]->atr_sample_rate = 20; ++ need_reset = true; ++ } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) && ++ (data & ETH_FLAG_NTUPLE)) { ++ /* turn off Flow Director hash, enable perfect and reset */ ++ int i; ++ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ adapter->tx_ring[i]->atr_sample_rate = 0; ++ need_reset = true; ++ } else { ++ /* no state change */ ++ } ++ ++#endif /* NETIF_F_NTUPLE */ ++ if (need_reset) { ++ if (netif_running(netdev)) ++ ixgbe_reinit_locked(adapter); ++ else ++ ixgbe_reset(adapter); ++ } ++ ++ return 0; ++} ++#endif /* ETHTOOL_GFLAGS */ ++ ++#ifdef NETIF_F_NTUPLE ++static int ixgbe_set_rx_ntuple(struct net_device *dev, ++ struct ethtool_rx_ntuple *cmd) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(dev); ++ struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; ++ struct ixgbe_atr_input input_struct; ++ struct ixgbe_atr_input_masks input_masks; ++ int target_queue; ++ ++ if (adapter->hw.mac.type == ixgbe_mac_82598EB) ++ return -EOPNOTSUPP; ++ ++ /* ++ * Don't allow programming if the action is a queue greater than ++ * the number of online Tx queues. ++ */ ++ if ((fs.action >= adapter->num_tx_queues) || ++ (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) ++ return -EINVAL; ++ ++ memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); ++ memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); ++ ++ input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; ++ input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; ++ input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; ++ input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; ++ input_masks.vlan_id_mask = fs.vlan_tag_mask; ++ /* only use the lowest 2 bytes for flex bytes */ ++ input_masks.data_mask = (fs.data_mask & 0xffff); ++ ++ switch (fs.flow_type) { ++ case TCP_V4_FLOW: ++ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); ++ break; ++ case UDP_V4_FLOW: ++ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); ++ break; ++ case SCTP_V4_FLOW: ++ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); ++ break; ++ default: ++ return -1; ++ } ++ ++ /* Mask bits from the inputs based on user-supplied mask */ ++ ixgbe_atr_set_src_ipv4_82599(&input_struct, ++ (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); ++ ixgbe_atr_set_dst_ipv4_82599(&input_struct, ++ (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); ++ /* 82599 expects these to be byte-swapped for perfect filtering */ ++ ixgbe_atr_set_src_port_82599(&input_struct, ++ ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); ++ ixgbe_atr_set_dst_port_82599(&input_struct, ++ ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); ++ ++ /* VLAN and Flex bytes are either completely masked or not */ ++ if (!fs.vlan_tag_mask) ++ ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); ++ ++ if (!input_masks.data_mask) ++ /* make sure we only use the first 2 bytes of user data */ ++ ixgbe_atr_set_flex_byte_82599(&input_struct, ++ (fs.data & 0xffff)); ++ ++ /* determine if we need to drop or route the packet */ ++ if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ++ target_queue = MAX_RX_QUEUES - 1; ++ else ++ target_queue = fs.action; ++ ++ spin_lock(&adapter->fdir_perfect_lock); ++ ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, ++ &input_masks, 0, target_queue); ++ spin_unlock(&adapter->fdir_perfect_lock); ++ ++ return 0; ++} ++ ++#endif /* NETIF_F_NTUPLE */ ++static struct ethtool_ops ixgbe_ethtool_ops = { ++ .get_settings = ixgbe_get_settings, ++ .set_settings = ixgbe_set_settings, ++ .get_drvinfo = ixgbe_get_drvinfo, ++ .get_regs_len = ixgbe_get_regs_len, ++ .get_regs = ixgbe_get_regs, ++ .get_wol = ixgbe_get_wol, ++ .set_wol = ixgbe_set_wol, ++ .nway_reset = ixgbe_nway_reset, ++ .get_link = ethtool_op_get_link, ++ .get_eeprom_len = ixgbe_get_eeprom_len, ++ .get_eeprom = ixgbe_get_eeprom, ++ .set_eeprom = ixgbe_set_eeprom, ++ .get_ringparam = ixgbe_get_ringparam, ++ .set_ringparam = ixgbe_set_ringparam, ++ .get_pauseparam = ixgbe_get_pauseparam, ++ .set_pauseparam = ixgbe_set_pauseparam, ++ .get_rx_csum = ixgbe_get_rx_csum, ++ .set_rx_csum = ixgbe_set_rx_csum, ++ .get_tx_csum = ixgbe_get_tx_csum, ++ .set_tx_csum = ixgbe_set_tx_csum, ++ .get_sg = ethtool_op_get_sg, ++ .set_sg = ethtool_op_set_sg, ++ .get_msglevel = ixgbe_get_msglevel, ++ .set_msglevel = ixgbe_set_msglevel, ++#ifdef NETIF_F_TSO ++ .get_tso = ethtool_op_get_tso, ++ .set_tso = ixgbe_set_tso, ++#endif ++#ifndef HAVE_ETHTOOL_GET_SSET_COUNT ++ .self_test_count = ixgbe_diag_test_count, ++#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ ++ .self_test = ixgbe_diag_test, ++ .get_strings = ixgbe_get_strings, ++ .phys_id = ixgbe_phys_id, ++#ifndef HAVE_ETHTOOL_GET_SSET_COUNT ++ .get_stats_count = ixgbe_get_stats_count, ++#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ ++ .get_sset_count = ixgbe_get_sset_count, ++#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ ++ .get_ethtool_stats = ixgbe_get_ethtool_stats, ++#ifdef ETHTOOL_GPERMADDR ++ .get_perm_addr = ethtool_op_get_perm_addr, ++#endif ++ .get_coalesce = ixgbe_get_coalesce, ++ .set_coalesce = ixgbe_set_coalesce, ++#ifdef ETHTOOL_GFLAGS ++ .get_flags = ethtool_op_get_flags, ++ .set_flags = ixgbe_set_flags, ++#endif ++#ifdef NETIF_F_NTUPLE ++ .set_rx_ntuple = ixgbe_set_rx_ntuple, ++#endif /* NETIF_F_NTUPLE */ ++}; ++ ++void ixgbe_set_ethtool_ops(struct net_device *netdev) ++{ ++ SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); ++} ++#endif /* SIOCETHTOOL */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_fcoe.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_fcoe.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_fcoe.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_fcoe.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,768 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe.h" ++ ++ ++#ifdef IXGBE_FCOE ++#ifdef CONFIG_DCB ++#include "ixgbe_dcb_82599.h" ++#endif /* CONFIG_DCB */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/** ++ * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type ++ * @rx_desc: advanced rx descriptor ++ * ++ * Returns : true if it is FCoE pkt ++ */ ++static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc) ++{ ++ u16 p; ++ ++ p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info); ++ if (p & IXGBE_RXDADV_PKTTYPE_ETQF) { ++ p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK; ++ p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT; ++ return p == IXGBE_ETQF_FILTER_FCOE; ++ } ++ return false; ++} ++ ++/** ++ * ixgbe_fcoe_clear_ddp - clear the given ddp context ++ * @ddp - ptr to the ixgbe_fcoe_ddp ++ * ++ * Returns : none ++ * ++ */ ++static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) ++{ ++ ddp->len = 0; ++ ddp->err = 0; ++ ddp->udl = NULL; ++ ddp->udp = 0UL; ++ ddp->sgl = NULL; ++ ddp->sgc = 0; ++} ++ ++/** ++ * ixgbe_fcoe_ddp_put - free the ddp context for a given xid ++ * @netdev: the corresponding net_device ++ * @xid: the xid that corresponding ddp will be freed ++ * ++ * This is the implementation of net_device_ops.ndo_fcoe_ddp_done ++ * and it is expected to be called by ULD, i.e., FCP layer of libfc ++ * to release the corresponding ddp context when the I/O is done. ++ * ++ * Returns : data length already ddp-ed in bytes ++ */ ++int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) ++{ ++ int len = 0; ++ struct ixgbe_fcoe *fcoe; ++ struct ixgbe_adapter *adapter; ++ struct ixgbe_fcoe_ddp *ddp; ++ ++ if (!netdev) ++ goto out_ddp_put; ++ ++ if (xid >= IXGBE_FCOE_DDP_MAX) ++ goto out_ddp_put; ++ ++ adapter = netdev_priv(netdev); ++ fcoe = &adapter->fcoe; ++ ddp = &fcoe->ddp[xid]; ++ if (!ddp->udl) ++ goto out_ddp_put; ++ ++ len = ddp->len; ++ /* if there an error, force to invalidate ddp context */ ++ if (ddp->err) { ++ spin_lock_bh(&fcoe->lock); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, ++ (xid | IXGBE_FCFLTRW_WE)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, ++ (xid | IXGBE_FCDMARW_WE)); ++ spin_unlock_bh(&fcoe->lock); ++ } ++ if (ddp->sgl) ++ pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, ++ DMA_FROM_DEVICE); ++ pci_pool_free(fcoe->pool, ddp->udl, ddp->udp); ++ ixgbe_fcoe_clear_ddp(ddp); ++ ++out_ddp_put: ++ return len; ++} ++ ++/** ++ * ixgbe_fcoe_ddp_get - called to set up ddp context ++ * @netdev: the corresponding net_device ++ * @xid: the exchange id requesting ddp ++ * @sgl: the scatter-gather list for this request ++ * @sgc: the number of scatter-gather items ++ * ++ * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup ++ * and is expected to be called from ULD, e.g., FCP layer of libfc ++ * to set up ddp for the corresponding xid of the given sglist for ++ * the corresponding I/O. ++ * ++ * Returns : 1 for success and 0 for no ddp ++ */ ++int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, ++ struct scatterlist *sgl, unsigned int sgc) ++{ ++ struct ixgbe_adapter *adapter; ++ struct ixgbe_hw *hw; ++ struct ixgbe_fcoe *fcoe; ++ struct ixgbe_fcoe_ddp *ddp; ++ struct scatterlist *sg; ++ unsigned int i, j, dmacount; ++ unsigned int len; ++ static const unsigned int bufflen = 4096; ++ unsigned int firstoff = 0; ++ unsigned int lastsize; ++ unsigned int thisoff = 0; ++ unsigned int thislen = 0; ++ u32 fcbuff, fcdmarw, fcfltrw; ++ dma_addr_t addr; ++ ++ if (!netdev || !sgl || !sgc) ++ return 0; ++ ++ adapter = netdev_priv(netdev); ++ if (xid >= IXGBE_FCOE_DDP_MAX) { ++ DPRINTK(DRV, WARNING, "xid=0x%x out-of-range\n", xid); ++ return 0; ++ } ++ ++ fcoe = &adapter->fcoe; ++ if (!fcoe->pool) { ++ DPRINTK(DRV, WARNING, "xid=0x%x no ddp pool for fcoe\n", xid); ++ return 0; ++ } ++ ++ ddp = &fcoe->ddp[xid]; ++ if (ddp->sgl) { ++ DPRINTK(DRV, ERR, "xid 0x%x w/ non-null sgl=%p nents=%d\n", ++ xid, ddp->sgl, ddp->sgc); ++ return 0; ++ } ++ ixgbe_fcoe_clear_ddp(ddp); ++ ++ /* setup dma from scsi command sgl */ ++ dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); ++ if (dmacount == 0) { ++ DPRINTK(DRV, ERR, "xid 0x%x DMA map error\n", xid); ++ return 0; ++ } ++ ++ /* alloc the udl from our ddp pool */ ++ ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp); ++ if (!ddp->udl) { ++ DPRINTK(DRV, ERR, "failed allocated ddp context\n"); ++ goto out_noddp_unmap; ++ } ++ ddp->sgl = sgl; ++ ddp->sgc = sgc; ++ ++ j = 0; ++ for_each_sg(sgl, sg, dmacount, i) { ++ addr = sg_dma_address(sg); ++ len = sg_dma_len(sg); ++ while (len) { ++ /* max number of buffers allowed in one DDP context */ ++ if (j >= IXGBE_BUFFCNT_MAX) { ++ DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx " ++ "not enough descriptors\n", ++ xid, i, j, dmacount, (u64)addr); ++ goto out_noddp_free; ++ } ++ ++ /* get the offset of length of current buffer */ ++ thisoff = addr & ((dma_addr_t)bufflen - 1); ++ thislen = min((bufflen - thisoff), len); ++ /* ++ * all but the 1st buffer (j == 0) ++ * must be aligned on bufflen ++ */ ++ if ((j != 0) && (thisoff)) ++ goto out_noddp_free; ++ /* ++ * all but the last buffer ++ * ((i == (dmacount - 1)) && (thislen == len)) ++ * must end at bufflen ++ */ ++ if (((i != (dmacount - 1)) || (thislen != len)) ++ && ((thislen + thisoff) != bufflen)) ++ goto out_noddp_free; ++ ++ ddp->udl[j] = (u64)(addr - thisoff); ++ /* only the first buffer may have none-zero offset */ ++ if (j == 0) ++ firstoff = thisoff; ++ len -= thislen; ++ addr += thislen; ++ j++; ++ } ++ } ++ /* only the last buffer may have non-full bufflen */ ++ lastsize = thisoff + thislen; ++ ++ fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); ++ fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); ++ fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); ++ fcbuff |= (IXGBE_FCBUFF_VALID); ++ ++ fcdmarw = xid; ++ fcdmarw |= IXGBE_FCDMARW_WE; ++ fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); ++ ++ fcfltrw = xid; ++ fcfltrw |= IXGBE_FCFLTRW_WE; ++ ++ /* program DMA context */ ++ hw = &adapter->hw; ++ spin_lock_bh(&fcoe->lock); ++ IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); ++ IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); ++ IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); ++ IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); ++ /* program filter context */ ++ IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); ++ IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); ++ IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); ++ spin_unlock_bh(&fcoe->lock); ++ ++ return 1; ++ ++out_noddp_free: ++ pci_pool_free(fcoe->pool, ddp->udl, ddp->udp); ++ ixgbe_fcoe_clear_ddp(ddp); ++ ++out_noddp_unmap: ++ pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); ++ return 0; ++} ++ ++/** ++ * ixgbe_fcoe_ddp - check ddp status and mark it done ++ * @adapter: ixgbe adapter ++ * @rx_desc: advanced rx descriptor ++ * @skb: the skb holding the received data ++ * ++ * This checks ddp status. ++ * ++ * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates ++ * not passing the skb to ULD, > 0 indicates is the length of data ++ * being ddped. ++ */ ++int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, ++ union ixgbe_adv_rx_desc *rx_desc, ++ struct sk_buff *skb) ++{ ++ u16 xid; ++ u32 fctl; ++ u32 sterr, fceofe, fcerr, fcstat; ++ int rc = -EINVAL; ++ struct ixgbe_fcoe *fcoe; ++ struct ixgbe_fcoe_ddp *ddp; ++ struct fc_frame_header *fh; ++ ++ if (!ixgbe_rx_is_fcoe(rx_desc)) ++ goto ddp_out; ++ ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ sterr = le32_to_cpu(rx_desc->wb.upper.status_error); ++ fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR); ++ fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE); ++ if (fcerr == IXGBE_FCERR_BADCRC) ++ skb->ip_summed = CHECKSUM_NONE; ++ ++ if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) ++ fh = (struct fc_frame_header *)(skb->data + ++ sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); ++ else ++ fh = (struct fc_frame_header *)(skb->data + ++ sizeof(struct fcoe_hdr)); ++ fctl = ntoh24(fh->fh_f_ctl); ++ if (fctl & FC_FC_EX_CTX) ++ xid = be16_to_cpu(fh->fh_ox_id); ++ else ++ xid = be16_to_cpu(fh->fh_rx_id); ++ ++ if (xid >= IXGBE_FCOE_DDP_MAX) ++ goto ddp_out; ++ ++ fcoe = &adapter->fcoe; ++ ddp = &fcoe->ddp[xid]; ++ if (!ddp->udl) ++ goto ddp_out; ++ ++ ddp->err = (fcerr | fceofe); ++ if (ddp->err) ++ goto ddp_out; ++ ++ fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); ++ if (fcstat) { ++ /* update length of DDPed data */ ++ ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); ++ /* unmap the sg list when FCP_RSP is received */ ++ if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { ++ pci_unmap_sg(adapter->pdev, ddp->sgl, ++ ddp->sgc, DMA_FROM_DEVICE); ++ ddp->sgl = NULL; ++ ddp->sgc = 0; ++ } ++ /* return 0 to bypass going to ULD for DDPed data */ ++ if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) ++ rc = 0; ++ else if (ddp->len) ++ rc = ddp->len; ++ } ++ ++ddp_out: ++ return rc; ++} ++ ++/** ++ * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) ++ * @tx_ring: tx desc ring ++ * @skb: associated skb ++ * @tx_flags: tx flags ++ * @hdr_len: hdr_len to be returned ++ * ++ * This sets up large send offload for FCoE ++ * ++ * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error ++ */ ++int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, ++ u32 tx_flags, u8 *hdr_len) ++{ ++ struct fc_frame_header *fh; ++ u32 vlan_macip_lens; ++ u32 fcoe_sof_eof = 0; ++ u32 mss_l4len_idx; ++ u8 sof, eof; ++ ++#ifdef NETIF_F_FSO ++ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) { ++ dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", ++ skb_shinfo(skb)->gso_type); ++ return -EINVAL; ++ } ++ ++#endif ++ /* resets the header to point fcoe/fc */ ++ skb_set_network_header(skb, skb->mac_len); ++ skb_set_transport_header(skb, skb->mac_len + ++ sizeof(struct fcoe_hdr)); ++ ++ /* sets up SOF and ORIS */ ++ sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; ++ switch (sof) { ++ case FC_SOF_I2: ++ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; ++ break; ++ case FC_SOF_I3: ++ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | ++ IXGBE_ADVTXD_FCOEF_ORIS; ++ break; ++ case FC_SOF_N2: ++ break; ++ case FC_SOF_N3: ++ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; ++ break; ++ default: ++ dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); ++ return -EINVAL; ++ } ++ ++ /* the first byte of the last dword is EOF */ ++ skb_copy_bits(skb, skb->len - 4, &eof, 1); ++ /* sets up EOF and ORIE */ ++ switch (eof) { ++ case FC_EOF_N: ++ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; ++ break; ++ case FC_EOF_T: ++ /* lso needs ORIE */ ++ if (skb_is_gso(skb)) ++ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | ++ IXGBE_ADVTXD_FCOEF_ORIE; ++ else ++ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; ++ break; ++ case FC_EOF_NI: ++ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; ++ break; ++ case FC_EOF_A: ++ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; ++ break; ++ default: ++ dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); ++ return -EINVAL; ++ } ++ ++ /* sets up PARINC indicating data offset */ ++ fh = (struct fc_frame_header *)skb_transport_header(skb); ++ if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) ++ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; ++ ++ /* include trailer in headlen as it is replicated per frame */ ++ *hdr_len = sizeof(struct fcoe_crc_eof); ++ ++ /* hdr_len includes fc_hdr if FCoE lso is enabled */ ++ if (skb_is_gso(skb)) ++ *hdr_len += skb_transport_offset(skb) + ++ sizeof(struct fc_frame_header); ++ ++ /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ ++ mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; ++ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; ++ ++ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ ++ vlan_macip_lens = skb_transport_offset(skb) + ++ sizeof(struct fc_frame_header); ++ vlan_macip_lens |= (skb_transport_offset(skb) - 4) ++ << IXGBE_ADVTXD_MACLEN_SHIFT; ++ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ++ ++ /* write context desc */ ++ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, ++ IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); ++ ++ return skb_is_gso(skb); ++} ++ ++/** ++ * ixgbe_configure_fcoe - configures registers for fcoe at start ++ * @adapter: ptr to ixgbe adapter ++ * ++ * This sets up FCoE related registers ++ * ++ * Returns : none ++ */ ++void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ++{ ++ int i, fcoe_q, fcoe_i; ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct ixgbe_fcoe *fcoe = &adapter->fcoe; ++ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; ++#ifdef CONFIG_DCB ++ u8 tc; ++ u32 up2tc; ++ ++#endif /* CONFIG_DCB */ ++ /* create the pool for ddp if not created yet */ ++ if (!fcoe->pool) { ++ /* allocate ddp pool */ ++ fcoe->pool = pci_pool_create("ixgbe_fcoe_ddp", ++ adapter->pdev, IXGBE_FCPTR_MAX, ++ IXGBE_FCPTR_ALIGN, PAGE_SIZE); ++ if (!fcoe->pool) ++ DPRINTK(DRV, ERR, ++ "failed to allocated FCoE DDP pool\n"); ++ ++ spin_lock_init(&fcoe->lock); ++ } ++ ++ /* Enable L2 eth type filter for FCoE */ ++ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), ++ (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); ++ /* Enable L2 eth type filter for FIP */ ++ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), ++ (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); ++ if (adapter->ring_feature[RING_F_FCOE].indices) { ++ /* Use multiple rx queues for FCoE by redirection table */ ++ for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { ++ fcoe_i = f->mask + i % f->indices; ++ fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; ++ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; ++ IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); ++ } ++ IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); ++ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); ++ } else { ++ /* Use single rx queue for FCoE */ ++ fcoe_i = f->mask; ++ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; ++ IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); ++ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), ++ IXGBE_ETQS_QUEUE_EN | ++ (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); ++ } ++ /* Enable L2 eth type filter for FIP */ ++ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), ++ (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); ++ /* send FIP frames to the first FCoE queue */ ++ fcoe_i = f->mask; ++ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; ++ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), ++ IXGBE_ETQS_QUEUE_EN | ++ (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, ++ IXGBE_FCRXCTRL_FCOELLI | ++ IXGBE_FCRXCTRL_FCCRCBO | ++ (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); ++#ifdef CONFIG_DCB ++ ++ up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC); ++ for (i = 0; i < MAX_USER_PRIORITY; i++) { ++ tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT)); ++ tc &= (MAX_TRAFFIC_CLASS - 1); ++ if (fcoe->tc == tc) { ++ fcoe->up = i; ++ break; ++ } ++ } ++#endif /* CONFIG_DCB */ ++} ++ ++/** ++ * ixgbe_cleanup_fcoe - release all fcoe ddp context resources ++ * @adapter : ixgbe adapter ++ * ++ * Cleans up outstanding ddp context resources ++ * ++ * Returns : none ++ */ ++void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ struct ixgbe_fcoe *fcoe = &adapter->fcoe; ++ ++ /* release ddp resource */ ++ if (fcoe->pool) { ++ for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) ++ ixgbe_fcoe_ddp_put(adapter->netdev, i); ++ pci_pool_destroy(fcoe->pool); ++ fcoe->pool = NULL; ++ } ++} ++ ++#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE ++/** ++ * ixgbe_fcoe_enable - turn on FCoE offload feature ++ * @netdev: the corresponding netdev ++ * ++ * Turns on FCoE offload feature in 82599. ++ * ++ * Returns : 0 indicates success or -EINVAL on failure ++ */ ++int ixgbe_fcoe_enable(struct net_device *netdev) ++{ ++ int rc = -EINVAL; ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_fcoe *fcoe = &adapter->fcoe; ++ ++ ++ if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) ++ goto out_enable; ++ ++ atomic_inc(&fcoe->refcnt); ++ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) ++ goto out_enable; ++ ++ DPRINTK(DRV, INFO, "Enabling FCoE offload features.\n"); ++ if (netif_running(netdev)) ++ netdev->netdev_ops->ndo_stop(netdev); ++ ++ ixgbe_clear_interrupt_scheme(adapter); ++ ++ adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; ++ adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; ++ netdev->features |= NETIF_F_FCOE_CRC; ++ netdev->features |= NETIF_F_FSO; ++ netdev->features |= NETIF_F_FCOE_MTU; ++ netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; ++ ++ ixgbe_init_interrupt_scheme(adapter); ++ netdev_features_change(netdev); ++ ++ if (netif_running(netdev)) ++ netdev->netdev_ops->ndo_open(netdev); ++ rc = 0; ++ ++out_enable: ++ return rc; ++} ++ ++/** ++ * ixgbe_fcoe_disable - turn off FCoE offload feature ++ * @netdev: the corresponding netdev ++ * ++ * Turns off FCoE offload feature in 82599. ++ * ++ * Returns : 0 indicates success or -EINVAL on failure ++ */ ++int ixgbe_fcoe_disable(struct net_device *netdev) ++{ ++ int rc = -EINVAL; ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_fcoe *fcoe = &adapter->fcoe; ++ ++ if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) || ++ !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) ++ goto out_disable; ++ ++ if (!atomic_dec_and_test(&fcoe->refcnt)) ++ goto out_disable; ++ ++ DPRINTK(DRV, INFO, "Disabling FCoE offload features.\n"); ++ netdev->features &= ~NETIF_F_FCOE_CRC; ++ netdev->features &= ~NETIF_F_FSO; ++ netdev->features &= ~NETIF_F_FCOE_MTU; ++ netdev->fcoe_ddp_xid = 0; ++ netdev_features_change(netdev); ++ ++ if (netif_running(netdev)) ++ netdev->netdev_ops->ndo_stop(netdev); ++ ++ ixgbe_clear_interrupt_scheme(adapter); ++ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; ++ adapter->ring_feature[RING_F_FCOE].indices = 0; ++ ixgbe_cleanup_fcoe(adapter); ++ ixgbe_init_interrupt_scheme(adapter); ++ ++ if (netif_running(netdev)) ++ netdev->netdev_ops->ndo_open(netdev); ++ rc = 0; ++ ++out_disable: ++ return rc; ++} ++#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ ++ ++#ifdef CONFIG_DCB ++#ifdef HAVE_DCBNL_OPS_GETAPP ++/** ++ * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE ++ * @adapter : ixgbe adapter ++ * ++ * Finds out the corresponding user priority bitmap from the current ++ * traffic class that FCoE belongs to. Returns 0 as the invalid user ++ * priority bitmap to indicate an error. ++ * ++ * Returns : 802.1p user priority bitmap for FCoE ++ */ ++u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter) ++{ ++ return 1 << adapter->fcoe.up; ++} ++ ++/** ++ * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE ++ * @adapter : ixgbe adapter ++ * @up : 802.1p user priority bitmap ++ * ++ * Finds out the traffic class from the input user priority ++ * bitmap for FCoE. ++ * ++ * Returns : 0 on success otherwise returns 1 on error ++ */ ++u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up) ++{ ++ int i; ++ u32 up2tc; ++ ++ /* valid user priority bitmap must not be 0 */ ++ if (up) { ++ /* from user priority to the corresponding traffic class */ ++ up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC); ++ for (i = 0; i < MAX_USER_PRIORITY; i++) { ++ if (up & (1 << i)) { ++ up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT); ++ up2tc &= (MAX_TRAFFIC_CLASS - 1); ++ adapter->fcoe.tc = (u8)up2tc; ++ adapter->fcoe.up = i; ++ return 0; ++ } ++ } ++ } ++ ++ return 1; ++} ++#endif /* HAVE_DCBNL_OPS_GETAPP */ ++#endif /* CONFIG_DCB */ ++ ++#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN ++/** ++ * ixgbe_fcoe_get_wwn - get world wide name for the node or the port ++ * @netdev : ixgbe adapter ++ * @wwn : the world wide name ++ * @type: the type of world wide name ++ * ++ * Returns the node or port world wide name if both the prefix and the san ++ * mac address are valid, then the wwn is formed based on the NAA-2 for ++ * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). ++ * ++ * Returns : 0 on success ++ */ ++int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) ++{ ++ int rc = -EINVAL; ++ u16 prefix = 0xffff; ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_mac_info *mac = &adapter->hw.mac; ++ ++ switch (type) { ++ case NETDEV_FCOE_WWNN: ++ prefix = mac->wwnn_prefix; ++ break; ++ case NETDEV_FCOE_WWPN: ++ prefix = mac->wwpn_prefix; ++ break; ++ default: ++ break; ++ } ++ ++ if ((prefix != 0xffff) && ++ is_valid_ether_addr(mac->san_addr)) { ++ *wwn = ((u64) prefix << 48) | ++ ((u64) mac->san_addr[0] << 40) | ++ ((u64) mac->san_addr[1] << 32) | ++ ((u64) mac->san_addr[2] << 24) | ++ ((u64) mac->san_addr[3] << 16) | ++ ((u64) mac->san_addr[4] << 8) | ++ ((u64) mac->san_addr[5]); ++ rc = 0; ++ } ++ return rc; ++} ++#endif /* HAVE_NETDEV_OPS_FCOE_GETWWN */ ++#endif /* IXGBE_FCOE */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_fcoe.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_fcoe.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_fcoe.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_fcoe.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,81 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_FCOE_H ++#define _IXGBE_FCOE_H ++ ++#ifdef IXGBE_FCOE ++ ++#include ++#include ++ ++/* shift bits within STAT fo FCSTAT */ ++#define IXGBE_RXDADV_FCSTAT_SHIFT 4 ++ ++/* ddp user buffer */ ++#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ ++#define IXGBE_FCPTR_ALIGN 16 ++#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) ++#define IXGBE_FCBUFF_4KB 0x0 ++#define IXGBE_FCBUFF_8KB 0x1 ++#define IXGBE_FCBUFF_16KB 0x2 ++#define IXGBE_FCBUFF_64KB 0x3 ++#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ ++#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ ++#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ ++ ++/* Default traffic class to use for FCoE */ ++#define IXGBE_FCOE_DEFTC 3 ++ ++/* fcerr */ ++#define IXGBE_FCERR_BADCRC 0x00100000 ++#define IXGBE_FCERR_EOFSOF 0x00200000 ++#define IXGBE_FCERR_NOFIRST 0x00300000 ++#define IXGBE_FCERR_OOOSEQ 0x00400000 ++#define IXGBE_FCERR_NODMA 0x00500000 ++#define IXGBE_FCERR_PKTLOST 0x00600000 ++ ++struct ixgbe_fcoe_ddp { ++ int len; ++ u32 err; ++ unsigned int sgc; ++ struct scatterlist *sgl; ++ dma_addr_t udp; ++ u64 *udl; ++}; ++ ++struct ixgbe_fcoe { ++ u8 tc; ++ u8 up; ++ atomic_t refcnt; ++ spinlock_t lock; ++ struct pci_pool *pool; ++ struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; ++}; ++#endif /* IXGBE_FCOE */ ++ ++#endif /* _IXGBE_FCOE_H */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,701 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_H_ ++#define _IXGBE_H_ ++ ++#ifndef IXGBE_NO_LRO ++#include ++#endif ++ ++#include ++#include ++#include ++ ++#ifdef SIOCETHTOOL ++#include ++#endif ++#ifdef NETIF_F_HW_VLAN_TX ++#include ++#endif ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++#define IXGBE_DCA ++#include ++#endif ++#include "ixgbe_dcb.h" ++ ++#include "kcompat.h" ++ ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++#define IXGBE_FCOE ++#include "ixgbe_fcoe.h" ++#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ ++ ++#include "ixgbe_api.h" ++ ++#define PFX "ixgbe: " ++#define DPRINTK(nlevel, klevel, fmt, args...) \ ++ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ ++ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ ++ __FUNCTION__ , ## args))) ++ ++/* TX/RX descriptor defines */ ++#define IXGBE_DEFAULT_TXD 512 ++#define IXGBE_MAX_TXD 4096 ++#define IXGBE_MIN_TXD 64 ++ ++#define IXGBE_DEFAULT_RXD 512 ++#define IXGBE_MAX_RXD 4096 ++#define IXGBE_MIN_RXD 64 ++ ++ ++/* flow control */ ++#define IXGBE_DEFAULT_FCRTL 0x10000 ++#define IXGBE_MIN_FCRTL 0x40 ++#define IXGBE_MAX_FCRTL 0x7FF80 ++#define IXGBE_DEFAULT_FCRTH 0x20000 ++#define IXGBE_MIN_FCRTH 0x600 ++#define IXGBE_MAX_FCRTH 0x7FFF0 ++#define IXGBE_DEFAULT_FCPAUSE 0xFFFF ++#define IXGBE_MIN_FCPAUSE 0 ++#define IXGBE_MAX_FCPAUSE 0xFFFF ++ ++/* Supported Rx Buffer Sizes */ ++#define IXGBE_RXBUFFER_512 512 /* Used for packet split */ ++#define IXGBE_RXBUFFER_2048 2048 ++#define IXGBE_RXBUFFER_4096 4096 ++#define IXGBE_RXBUFFER_8192 8192 ++#define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ ++ ++/* ++ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we ++ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, ++ * this adds up to 512 bytes of extra data meaning the smallest allocation ++ * we could have is 1K. ++ * i.e. RXBUFFER_512 --> size-1024 slab ++ */ ++#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512 ++ ++#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) ++ ++/* Maxium string size for the PBA string from the eeprom */ ++#define IXGBE_PBA_LEN 20 ++ ++/* How many Rx Buffers do we bundle into one write to the hardware ? */ ++#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ ++ ++#define IXGBE_TX_FLAGS_CSUM (u32)(1) ++#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) ++#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) ++#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) ++#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4) ++#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5) ++#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 ++#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 ++#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 ++#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 ++ ++#define IXGBE_MAX_RX_DESC_POLL 10 ++ ++#define IXGBE_MAX_RSC_INT_RATE 162760 ++ ++#define IXGBE_MAX_VF_MC_ENTRIES 30 ++#define IXGBE_MAX_VF_FUNCTIONS 64 ++#define IXGBE_MAX_VFTA_ENTRIES 128 ++#define MAX_EMULATION_MAC_ADDRS 16 ++ ++#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ ++ { \ ++ u32 current_counter = IXGBE_READ_REG(hw, reg); \ ++ if (current_counter < last_counter) \ ++ counter += 0x100000000LL; \ ++ last_counter = current_counter; \ ++ counter &= 0xFFFFFFFF00000000LL; \ ++ counter |= current_counter; \ ++ } ++ ++#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ ++ { \ ++ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ ++ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ ++ u64 current_counter = (current_counter_msb << 32) | \ ++ current_counter_lsb; \ ++ if (current_counter < last_counter) \ ++ counter += 0x1000000000LL; \ ++ last_counter = current_counter; \ ++ counter &= 0xFFFFFFF000000000LL; \ ++ counter |= current_counter; \ ++ } ++ ++struct vf_stats { ++ u64 gprc; ++ u64 gorc; ++ u64 gptc; ++ u64 gotc; ++ u64 mprc; ++}; ++ ++struct vf_data_storage { ++ unsigned char vf_mac_addresses[ETH_ALEN]; ++ u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; ++ u16 num_vf_mc_hashes; ++ u16 default_vf_vlan_id; ++ u16 vlans_enabled; ++ bool clear_to_send; ++ struct vf_stats vfstats; ++ struct vf_stats last_vfstats; ++ struct vf_stats saved_rst_vfstats; ++ bool pf_set_mac; ++ u16 pf_vlan; /* When set, guest VLAN config not allowed. */ ++ u16 pf_qos; ++}; ++ ++#ifndef IXGBE_NO_LRO ++#define IXGBE_LRO_MAX 32 /*Maximum number of LRO descriptors*/ ++#define IXGBE_LRO_GLOBAL 10 ++ ++struct ixgbe_lro_stats { ++ u32 flushed; ++ u32 coal; ++ u32 recycled; ++}; ++ ++struct ixgbe_lro_desc { ++ struct hlist_node lro_node; ++ struct sk_buff *skb; ++ u32 source_ip; ++ u32 dest_ip; ++ u16 source_port; ++ u16 dest_port; ++ u16 vlan_tag; ++ u16 len; ++ u32 next_seq; ++ u32 ack_seq; ++ u16 window; ++ u16 mss; ++ u16 opt_bytes; ++ u16 psh:1; ++ u32 tsval; ++ u32 tsecr; ++ u32 append_cnt; ++}; ++ ++struct ixgbe_lro_list { ++ struct hlist_head active; ++ struct hlist_head free; ++ int active_cnt; ++ struct ixgbe_lro_stats stats; ++}; ++ ++#endif /* IXGBE_NO_LRO */ ++/* wrapper around a pointer to a socket buffer, ++ * so a DMA handle can be stored along with the buffer */ ++struct ixgbe_tx_buffer { ++ struct sk_buff *skb; ++ dma_addr_t dma; ++ unsigned long time_stamp; ++ u16 length; ++ u16 next_to_watch; ++ unsigned int bytecount; ++ u16 gso_segs; ++ u8 mapped_as_page; ++}; ++ ++struct ixgbe_rx_buffer { ++ struct sk_buff *skb; ++ dma_addr_t dma; ++ struct page *page; ++ dma_addr_t page_dma; ++ unsigned int page_offset; ++}; ++ ++struct ixgbe_queue_stats { ++ u64 packets; ++ u64 bytes; ++}; ++ ++struct ixgbe_tx_queue_stats { ++ u64 restart_queue; ++ u64 tx_busy; ++}; ++ ++struct ixgbe_rx_queue_stats { ++ u64 rsc_count; ++ u64 rsc_flush; ++ u64 non_eop_descs; ++ u64 alloc_rx_page_failed; ++ u64 alloc_rx_buff_failed; ++}; ++ ++enum ixbge_ring_state_t { ++ __IXGBE_TX_FDIR_INIT_DONE, ++ __IXGBE_TX_DETECT_HANG, ++ __IXGBE_RX_PS_ENABLED, ++ __IXGBE_RX_RSC_ENABLED, ++#ifndef IXGBE_NO_LRO ++ __IXGBE_RX_LRO_ENABLED, ++#endif ++}; ++ ++#define ring_is_ps_enabled(ring) \ ++ test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) ++#define set_ring_ps_enabled(ring) \ ++ set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) ++#define clear_ring_ps_enabled(ring) \ ++ clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) ++#define check_for_tx_hang(ring) \ ++ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) ++#define set_check_for_tx_hang(ring) \ ++ set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) ++#define clear_check_for_tx_hang(ring) \ ++ clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) ++#define ring_is_rsc_enabled(ring) \ ++ test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) ++#define set_ring_rsc_enabled(ring) \ ++ set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) ++#define clear_ring_rsc_enabled(ring) \ ++ clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) ++#ifndef IXGBE_NO_LRO ++#define ring_is_lro_enabled(ring) \ ++ test_bit(__IXGBE_RX_LRO_ENABLED, &(ring)->state) ++#define set_ring_lro_enabled(ring) \ ++ set_bit(__IXGBE_RX_LRO_ENABLED, &(ring)->state) ++#define clear_ring_lro_enabled(ring) \ ++ clear_bit(__IXGBE_RX_LRO_ENABLED, &(ring)->state) ++#endif /* IXGBE_NO_LRO */ ++struct ixgbe_ring { ++ void *desc; /* descriptor ring memory */ ++ struct device *dev; /* device for dma mapping */ ++ struct net_device *netdev; /* netdev ring belongs to */ ++ union { ++ struct ixgbe_tx_buffer *tx_buffer_info; ++ struct ixgbe_rx_buffer *rx_buffer_info; ++ }; ++ unsigned long state; ++ u8 atr_sample_rate; ++ u8 atr_count; ++ u16 count; /* amount of descriptors */ ++ u16 rx_buf_len; ++ u16 next_to_use; ++ u16 next_to_clean; ++ ++ u8 queue_index; /* needed for multiqueue queue management */ ++ u8 reg_idx; /* holds the special value that gets the ++ * hardware register offset associated ++ * with this ring, which is different ++ * for DCB and RSS modes */ ++ ++ u16 work_limit; /* max work per interrupt */ ++ ++ u8 __iomem *tail; ++ ++ unsigned int total_bytes; ++ unsigned int total_packets; ++ ++ struct ixgbe_queue_stats stats; ++ union { ++ struct ixgbe_tx_queue_stats tx_stats; ++ struct ixgbe_rx_queue_stats rx_stats; ++ }; ++ int numa_node; ++ unsigned int size; /* length in bytes */ ++ dma_addr_t dma; /* phys. address of descriptor ring */ ++ struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ ++} ____cacheline_internodealigned_in_smp; ++ ++enum ixgbe_ring_f_enum { ++ RING_F_NONE = 0, ++ RING_F_DCB, ++ RING_F_VMDQ, ++ RING_F_RSS, ++ RING_F_FDIR, ++#ifdef IXGBE_FCOE ++ RING_F_FCOE, ++#endif /* IXGBE_FCOE */ ++ RING_F_ARRAY_SIZE /* must be last in enum set */ ++}; ++ ++#define IXGBE_MAX_DCB_INDICES 8 ++#define IXGBE_MAX_RSS_INDICES 16 ++#define IXGBE_MAX_VMDQ_INDICES 64 ++#define IXGBE_MAX_FDIR_INDICES 64 ++#ifdef IXGBE_FCOE ++#define IXGBE_MAX_FCOE_INDICES 8 ++#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) ++#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) ++#else ++#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES ++#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES ++#endif /* IXGBE_FCOE */ ++struct ixgbe_ring_feature { ++ int indices; ++ int mask; ++}; ++ ++ ++#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ ++ ? 8 : 1) ++#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS ++ ++/* MAX_MSIX_Q_VECTORS of these are allocated, ++ * but we only use one per queue-specific vector. ++ */ ++struct ixgbe_q_vector { ++ struct ixgbe_adapter *adapter; ++ unsigned int v_idx; /* index of q_vector within array, also used for ++ * finding the bit in EICR and friends that ++ * represents the vector for this ring */ ++ int cpu; /* cpu for DCA */ ++#ifdef CONFIG_IXGBE_NAPI ++ struct napi_struct napi; ++#endif ++ DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ ++ DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ ++ u8 rxr_count; /* Rx ring count assigned to this vector */ ++ u8 txr_count; /* Tx ring count assigned to this vector */ ++ u8 tx_itr; ++ u8 rx_itr; ++ u32 eitr; ++ struct ixgbe_lro_list *lrolist; /* LRO list for queue vector*/ ++ char name[IFNAMSIZ + 9]; ++#ifndef HAVE_NETDEV_NAPI_LIST ++ struct net_device poll_dev; ++#endif ++} ____cacheline_internodealigned_in_smp; ++ ++ ++/* Helper macros to switch between ints/sec and what the register uses. ++ * And yes, it's the same math going both ways. The lowest value ++ * supported by all of the ixgbe hardware is 8. ++ */ ++#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ ++ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) ++#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG ++ ++#define IXGBE_DESC_UNUSED(R) \ ++ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ ++ (R)->next_to_clean - (R)->next_to_use - 1) ++ ++#define IXGBE_RX_DESC_ADV(R, i) \ ++ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) ++#define IXGBE_TX_DESC_ADV(R, i) \ ++ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) ++#define IXGBE_TX_CTXTDESC_ADV(R, i) \ ++ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) ++ ++#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 ++#ifdef IXGBE_FCOE ++/* use 3K as the baby jumbo frame size for FCoE */ ++#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 ++#endif /* IXGBE_FCOE */ ++ ++#ifdef IXGBE_TCP_TIMER ++#define TCP_TIMER_VECTOR 1 ++#else ++#define TCP_TIMER_VECTOR 0 ++#endif ++#define OTHER_VECTOR 1 ++#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) ++ ++#define IXGBE_MAX_MSIX_VECTORS_82599 64 ++#define IXGBE_MAX_MSIX_Q_VECTORS_82599 64 ++#define IXGBE_MAX_MSIX_Q_VECTORS_82598 16 ++#define IXGBE_MAX_MSIX_VECTORS_82598 18 ++ ++/* ++ * Only for array allocations in our adapter struct. On 82598, there will be ++ * unused entries in the array, but that's not a big deal. Also, in 82599, ++ * we can actually assign 64 queue vectors based on our extended-extended ++ * interrupt registers. This is different than 82598, which is limited to 16. ++ */ ++#define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599 ++#define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599 ++ ++#define MIN_MSIX_Q_VECTORS 2 ++#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) ++ ++/* board specific private data structure */ ++struct ixgbe_adapter { ++ struct timer_list watchdog_timer; ++#ifdef NETIF_F_HW_VLAN_TX ++ struct vlan_group *vlgrp; ++#endif ++ u16 bd_number; ++ struct work_struct reset_task; ++ struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; ++ struct ixgbe_dcb_config dcb_cfg; ++ struct ixgbe_dcb_config temp_dcb_cfg; ++ u8 dcb_set_bitmap; ++ enum ixgbe_fc_mode last_lfc_mode; ++ ++ /* Interrupt Throttle Rate */ ++ u32 rx_itr_setting; ++ u32 tx_itr_setting; ++ u16 eitr_low; ++ u16 eitr_high; ++ ++ /* TX */ ++ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; ++ int num_tx_queues; ++ u32 tx_timeout_count; ++ ++ u64 restart_queue; ++ u64 lsc_int; ++ ++ /* RX */ ++ struct ixgbe_ring *rx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; ++ int num_rx_queues; ++ int num_rx_pools; /* == num_rx_queues in 82598 */ ++ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ ++ u64 hw_csum_rx_error; ++ u64 hw_rx_no_dma_resources; ++ u64 non_eop_descs; ++#ifndef CONFIG_IXGBE_NAPI ++ u64 rx_dropped_backlog; /* count drops from rx intr handler */ ++#endif ++ int num_msix_vectors; ++ int max_msix_q_vectors; /* true count of q_vectors for device */ ++ struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; ++ struct msix_entry *msix_entries; ++#ifdef IXGBE_TCP_TIMER ++ irqreturn_t (*msix_handlers[MAX_MSIX_COUNT])(int irq, void *data, ++ struct pt_regs *regs); ++#endif ++ ++ u32 alloc_rx_page_failed; ++ u32 alloc_rx_buff_failed; ++ ++ /* Some features need tri-state capability, ++ * thus the additional *_CAPABLE flags. ++ */ ++ u32 flags; ++#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) ++#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1) ++#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2) ++#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3) ++#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4) ++#ifndef IXGBE_NO_LLI ++#define IXGBE_FLAG_LLI_PUSH (u32)(1 << 5) ++#endif ++#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6) ++#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7) ++#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8) ++#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9) ++#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10) ++#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11) ++#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 12) ++#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13) ++#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14) ++#define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 15) ++#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16) ++#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) ++#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) ++#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) ++#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) ++#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) ++#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24) ++#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) ++#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26) ++#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) ++#ifdef IXGBE_FCOE ++#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28) ++#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) ++#endif /* IXGBE_FCOE */ ++#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 30) ++#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 31) ++ ++ u32 flags2; ++#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) ++#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) ++#define IXGBE_FLAG2_SWLRO_ENABLED (u32)(1 << 2) ++#define IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE (u32)(1 << 3) ++#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 5) ++ ++/* default to trying for four seconds */ ++#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) ++ ++ /* OS defined structs */ ++ struct net_device *netdev; ++ struct pci_dev *pdev; ++ struct net_device_stats net_stats; ++#ifndef IXGBE_NO_LRO ++ struct ixgbe_lro_stats lro_stats; ++#endif ++ ++#ifdef ETHTOOL_TEST ++ u32 test_icr; ++ struct ixgbe_ring test_tx_ring; ++ struct ixgbe_ring test_rx_ring; ++#endif ++ ++ /* structs defined in ixgbe_hw.h */ ++ struct ixgbe_hw hw; ++ u16 msg_enable; ++ struct ixgbe_hw_stats stats; ++#ifndef IXGBE_NO_LLI ++ u32 lli_port; ++ u32 lli_size; ++ u64 lli_int; ++ u32 lli_etype; ++ u32 lli_vlan_pri; ++#endif /* IXGBE_NO_LLI */ ++ /* Interrupt Throttle Rate */ ++ u32 rx_eitr_param; ++ u32 tx_eitr_param; ++ ++ unsigned long state; ++ u32 *config_space; ++ u64 tx_busy; ++ unsigned int tx_ring_count; ++ unsigned int rx_ring_count; ++ ++ u32 link_speed; ++ bool link_up; ++ unsigned long link_check_timeout; ++ ++ struct work_struct watchdog_task; ++ struct work_struct sfp_task; ++ struct timer_list sfp_timer; ++ struct work_struct multispeed_fiber_task; ++ struct work_struct sfp_config_module_task; ++ u64 flm; ++ u32 fdir_pballoc; ++ u32 atr_sample_rate; ++ spinlock_t fdir_perfect_lock; ++ struct work_struct fdir_reinit_task; ++#ifdef IXGBE_FCOE ++ struct ixgbe_fcoe fcoe; ++#endif /* IXGBE_FCOE */ ++ u64 rsc_total_count; ++ u64 rsc_total_flush; ++ u32 wol; ++ u16 eeprom_version; ++ bool netdev_registered; ++ char lsc_int_name[IFNAMSIZ + 9]; ++#ifdef IXGBE_TCP_TIMER ++ char tcp_timer_name[IFNAMSIZ + 9]; ++#endif ++ struct work_struct check_overtemp_task; ++ u32 interrupt_event; ++ ++ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); ++ unsigned int num_vfs; ++ bool repl_enable; ++ bool l2switch_enable; ++ struct vf_data_storage *vfinfo; ++ int node; ++}; ++ ++ ++enum ixbge_state_t { ++ __IXGBE_TESTING, ++ __IXGBE_RESETTING, ++ __IXGBE_DOWN, ++ __IXGBE_SFP_MODULE_NOT_FOUND ++}; ++ ++struct ixgbe_rsc_cb { ++ dma_addr_t dma; ++ u16 skb_cnt; ++ bool delay_unmap; ++}; ++#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) ++ ++extern struct dcbnl_rtnl_ops dcbnl_ops; ++extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, ++ struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max); ++ ++/* needed by ixgbe_main.c */ ++extern int ixgbe_validate_mac_addr(u8 *mc_addr); ++extern void ixgbe_check_options(struct ixgbe_adapter *adapter); ++extern void ixgbe_assign_netdev_ops(struct net_device *netdev); ++ ++/* needed by ixgbe_ethtool.c */ ++extern char ixgbe_driver_name[]; ++extern const char ixgbe_driver_version[]; ++ ++extern int ixgbe_up(struct ixgbe_adapter *adapter); ++extern void ixgbe_down(struct ixgbe_adapter *adapter); ++extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); ++extern void ixgbe_reset(struct ixgbe_adapter *adapter); ++extern void ixgbe_set_ethtool_ops(struct net_device *netdev); ++extern int ixgbe_setup_rx_resources(struct ixgbe_ring *); ++extern int ixgbe_setup_tx_resources(struct ixgbe_ring *); ++extern void ixgbe_free_rx_resources(struct ixgbe_ring *); ++extern void ixgbe_free_tx_resources(struct ixgbe_ring *); ++extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); ++extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); ++extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); ++extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); ++extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); ++extern bool ixgbe_is_ixgbe(struct pci_dev *pcidev); ++extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, ++ struct ixgbe_adapter *, ++ struct ixgbe_ring *); ++extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, ++ struct ixgbe_tx_buffer *); ++extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); ++extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *); ++extern void clear_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *); ++extern void ixgbe_set_rx_mode(struct net_device *netdev); ++extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); ++extern void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector); ++#ifdef ETHTOOL_OPS_COMPAT ++extern int ethtool_ioctl(struct ifreq *ifr); ++#endif ++extern int ixgbe_dcb_netlink_register(void); ++extern int ixgbe_dcb_netlink_unregister(void); ++ ++ ++#ifdef IXGBE_FCOE ++extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); ++extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, ++ u32 tx_flags, u8 *hdr_len); ++extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); ++extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, ++ union ixgbe_adv_rx_desc *rx_desc, ++ struct sk_buff *skb); ++extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, ++ struct scatterlist *sgl, unsigned int sgc); ++extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); ++#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE ++extern int ixgbe_fcoe_enable(struct net_device *netdev); ++extern int ixgbe_fcoe_disable(struct net_device *netdev); ++#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ ++#ifdef CONFIG_DCB ++#ifdef HAVE_DCBNL_OPS_GETAPP ++extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); ++extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); ++#endif /* HAVE_DCBNL_OPS_GETAPP */ ++#endif /* CONFIG_DCB */ ++#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN ++extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); ++#endif ++#endif /* IXGBE_FCOE */ ++ ++ ++#endif /* _IXGBE_H_ */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_main.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_main.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_main.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_main.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,8742 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++/****************************************************************************** ++ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code ++******************************************************************************/ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef NETIF_F_TSO ++#include ++#ifdef NETIF_F_TSO6 ++#include ++#endif ++#endif ++#ifdef SIOCETHTOOL ++#include ++#endif ++#ifdef NETIF_F_HW_VLAN_TX ++#include ++#endif ++ ++ ++#include "ixgbe.h" ++ ++#include "ixgbe_sriov.h" ++ ++char ixgbe_driver_name[] = "ixgbe"; ++static const char ixgbe_driver_string[] = ++ "Intel(R) 10 Gigabit PCI Express Network Driver"; ++#define DRV_HW_PERF ++ ++#ifndef CONFIG_IXGBE_NAPI ++#define DRIVERNAPI ++#else ++#define DRIVERNAPI "-NAPI" ++#endif ++ ++#define FPGA ++ ++#define DRV_VERSION "2.1.4" DRIVERNAPI DRV_HW_PERF FPGA ++const char ixgbe_driver_version[] = DRV_VERSION; ++static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; ++/* ixgbe_pci_tbl - PCI Device ID Table ++ * ++ * Wildcard entries (PCI_ANY_ID) should come last ++ * Last entry must be all 0s ++ * ++ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, ++ * Class, Class Mask, private data (not used) } ++ */ ++static struct pci_device_id ixgbe_pci_tbl[] = { ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_BX)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT2)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KR)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_EM)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_T3_LOM)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_CX4)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE)}, ++ /* required last entry */ ++ {0, } ++}; ++MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); ++ ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, ++ void *p); ++static struct notifier_block dca_notifier = { ++ .notifier_call = ixgbe_notify_dca, ++ .next = NULL, ++ .priority = 0 ++}; ++ ++#endif ++MODULE_AUTHOR("Intel Corporation, "); ++MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); ++MODULE_LICENSE("GPL"); ++MODULE_VERSION(DRV_VERSION); ++ ++#define DEFAULT_DEBUG_LEVEL_SHIFT 3 ++ ++static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 gcr; ++ u32 gpie; ++ u32 vmdctl; ++ ++#ifdef CONFIG_PCI_IOV ++ /* disable iov and allow time for transactions to clear */ ++ pci_disable_sriov(adapter->pdev); ++#endif ++ ++ /* turn off device IOV mode */ ++ gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); ++ gcr &= ~(IXGBE_GCR_EXT_SRIOV); ++ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); ++ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); ++ gpie &= ~IXGBE_GPIE_VTMODE_MASK; ++ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); ++ ++ /* set default pool back to 0 */ ++ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); ++ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; ++ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); ++ ++ /* take a breather then clean up driver data */ ++ msleep(100); ++ if (adapter->vfinfo) ++ kfree(adapter->vfinfo); ++ adapter->vfinfo = NULL; ++ ++ adapter->num_vfs = 0; ++ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; ++} ++ ++static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) ++{ ++ u32 ctrl_ext; ++ ++ /* Let firmware take over control of h/w */ ++ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ++ ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); ++} ++ ++static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) ++{ ++ u32 ctrl_ext; ++ ++ /* Let firmware know the driver has taken over */ ++ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ++ ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); ++} ++ ++/* ++ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors ++ * @adapter: pointer to adapter struct ++ * @direction: 0 for Rx, 1 for Tx, -1 for other causes ++ * @queue: queue to map the corresponding interrupt to ++ * @msix_vector: the vector to map to the corresponding queue ++ * ++ */ ++static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, ++ u8 queue, u8 msix_vector) ++{ ++ u32 ivar, index; ++ struct ixgbe_hw *hw = &adapter->hw; ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ msix_vector |= IXGBE_IVAR_ALLOC_VAL; ++ if (direction == -1) ++ direction = 0; ++ index = (((direction * 64) + queue) >> 2) & 0x1F; ++ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); ++ ivar &= ~(0xFF << (8 * (queue & 0x3))); ++ ivar |= (msix_vector << (8 * (queue & 0x3))); ++ IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); ++ break; ++ case ixgbe_mac_82599EB: ++ if (direction == -1) { ++ /* other causes */ ++ msix_vector |= IXGBE_IVAR_ALLOC_VAL; ++ index = ((queue & 1) * 8); ++ ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); ++ ivar &= ~(0xFF << index); ++ ivar |= (msix_vector << index); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); ++ break; ++ } else { ++ /* tx or rx causes */ ++ msix_vector |= IXGBE_IVAR_ALLOC_VAL; ++ index = ((16 * (queue & 1)) + (8 * direction)); ++ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); ++ ivar &= ~(0xFF << index); ++ ivar |= (msix_vector << index); ++ IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); ++ break; ++ } ++ default: ++ break; ++ } ++} ++ ++static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, ++ u64 qmask) ++{ ++ u32 mask; ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ mask = (IXGBE_EIMS_RTX_QUEUE & qmask); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); ++ break; ++ case ixgbe_mac_82599EB: ++ mask = (qmask & 0xFFFFFFFF); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); ++ mask = (qmask >> 32); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); ++ break; ++ default: ++ break; ++ } ++} ++ ++void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, ++ struct ixgbe_tx_buffer *tx_buffer_info) ++{ ++ if (tx_buffer_info->dma) { ++ if (tx_buffer_info->mapped_as_page) ++ dma_unmap_page(tx_ring->dev, ++ tx_buffer_info->dma, ++ tx_buffer_info->length, ++ DMA_TO_DEVICE); ++ else ++ dma_unmap_single(tx_ring->dev, ++ tx_buffer_info->dma, ++ tx_buffer_info->length, ++ DMA_TO_DEVICE); ++ tx_buffer_info->dma = 0; ++ } ++ if (tx_buffer_info->skb) { ++ dev_kfree_skb_any(tx_buffer_info->skb); ++ tx_buffer_info->skb = NULL; ++ } ++ tx_buffer_info->time_stamp = 0; ++ /* tx_buffer_info must be completely set up in the transmit path */ ++} ++ ++/** ++ * ixgbe_tx_xon_state - check the tx ring xon state ++ * @adapter: the ixgbe adapter ++ * @tx_ring: the corresponding tx_ring ++ * ++ * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the ++ * corresponding TC of this tx_ring when checking TFCS. ++ * ++ * Returns : true if in xon state (currently not paused) ++ */ ++static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *tx_ring) ++{ ++ u32 txoff = IXGBE_TFCS_TXOFF; ++ ++ if ((adapter->flags & IXGBE_FLAG_DCB_CAPABLE) && ++ adapter->dcb_cfg.pfc_mode_enable) { ++ int tc = 0; ++ int dcb_i = adapter->ring_feature[RING_F_DCB].indices; ++ u8 reg_idx = tx_ring->reg_idx; ++ ++ if (adapter->hw.mac.type == ixgbe_mac_82598EB) { ++ tc = reg_idx >> 2; ++ txoff = IXGBE_TFCS_TXOFF0; ++ } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { ++ tc = 0; ++ txoff = IXGBE_TFCS_TXOFF; ++ if (dcb_i == 8) { ++ /* TC0, TC1 */ ++ tc = reg_idx >> 5; ++ if (tc == 2) /* TC2, TC3 */ ++ tc += (reg_idx - 64) >> 4; ++ else if (tc == 3) /* TC4, TC5, TC6, TC7 */ ++ tc += 1 + ((reg_idx - 96) >> 3); ++ } else if (dcb_i == 4) { ++ /* TC0, TC1 */ ++ tc = reg_idx >> 6; ++ if (tc == 1) { ++ tc += (reg_idx - 64) >> 5; ++ if (tc == 2) /* TC2, TC3 */ ++ tc += (reg_idx - 96) >> 4; ++ } ++ } ++ } ++ txoff <<= tc; ++ } ++ return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff; ++} ++ ++static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *tx_ring, ++ unsigned int eop) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 head, tail; ++ ++ /* Detect a transmit hang in hardware, this serializes the ++ * check with the clearing of time_stamp and movement of eop */ ++ head = IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)); ++ tail = IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)); ++ clear_check_for_tx_hang(tx_ring); ++ if ((head != tail) && ++ tx_ring->tx_buffer_info[eop].time_stamp && ++ time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && ++ ixgbe_tx_xon_state(adapter, tx_ring)) { ++ /* detected Tx unit hang */ ++ union ixgbe_adv_tx_desc *tx_desc; ++ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); ++ DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" ++ " Tx Queue <%d>\n" ++ " TDH, TDT <%x>, <%x>\n" ++ " next_to_use <%x>\n" ++ " next_to_clean <%x>\n", ++ tx_ring->queue_index, head, tail, ++ tx_ring->next_to_use, eop); ++ DPRINTK(DRV, ERR, "tx_buffer_info[next_to_clean]\n" ++ " time_stamp <%lx>\n" ++ " jiffies <%lx>\n", ++ tx_ring->tx_buffer_info[eop].time_stamp, jiffies); ++ ++ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); ++ ++ return true; ++ } ++ ++ return false; ++} ++ ++#define IXGBE_MAX_TXD_PWR 14 ++#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) ++ ++/* Tx Descriptors needed, worst case */ ++#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ ++ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) ++#ifdef MAX_SKB_FRAGS ++#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) ++#else ++#define DESC_NEEDED 4 ++#endif ++ ++static void ixgbe_tx_timeout(struct net_device *netdev); ++ ++/** ++ * ixgbe_clean_tx_irq - Reclaim resources after transmit completes ++ * @q_vector: structure containing interrupt and ring information ++ * @tx_ring: tx ring to clean ++ **/ ++static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, ++ struct ixgbe_ring *tx_ring) ++{ ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ union ixgbe_adv_tx_desc *tx_desc, *eop_desc; ++ struct ixgbe_tx_buffer *tx_buffer_info; ++ unsigned int total_bytes = 0, total_packets = 0; ++ u16 i, eop, count = 0; ++ ++ i = tx_ring->next_to_clean; ++ eop = tx_ring->tx_buffer_info[i].next_to_watch; ++ eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); ++ ++ while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && ++ (count < tx_ring->work_limit)) { ++ bool cleaned = false; ++ rmb(); /* read buffer_info after eop_desc */ ++ for ( ; !cleaned; count++) { ++ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); ++ tx_buffer_info = &tx_ring->tx_buffer_info[i]; ++ ++ tx_desc->wb.status = 0; ++ cleaned = (i == eop); ++ ++ i++; ++ if (i == tx_ring->count) ++ i = 0; ++ ++ if (cleaned && tx_buffer_info->skb) { ++ total_bytes += tx_buffer_info->bytecount; ++ total_packets += tx_buffer_info->gso_segs; ++ } ++ ++ ixgbe_unmap_and_free_tx_resource(tx_ring, ++ tx_buffer_info); ++ } ++ ++ eop = tx_ring->tx_buffer_info[i].next_to_watch; ++ eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); ++ } ++ ++ tx_ring->next_to_clean = i; ++ tx_ring->total_bytes += total_bytes; ++ tx_ring->total_packets += total_packets; ++ tx_ring->stats.packets += total_packets; ++ tx_ring->stats.bytes += total_bytes; ++ ++ ++ if (check_for_tx_hang(tx_ring) && ++ ixgbe_check_tx_hang(adapter, tx_ring, i)) { ++ /* schedule immediate reset if we believe we hung */ ++ DPRINTK(PROBE, INFO, ++ "tx hang %d detected, resetting adapter\n", ++ adapter->tx_timeout_count + 1); ++ ixgbe_tx_timeout(tx_ring->netdev); ++ ++ /* the adapter is about to reset, no point in enabling stuff */ ++ return true; ++ } ++ ++#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) ++ if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && ++ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { ++ /* Make sure that anybody stopping the queue after this ++ * sees the new next_to_clean. ++ */ ++ smp_mb(); ++#ifdef HAVE_TX_MQ ++ if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) ++ && !test_bit(__IXGBE_DOWN, &adapter->state)) { ++ netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++ ++tx_ring->tx_stats.restart_queue; ++ } ++#else ++ if (netif_queue_stopped(tx_ring->netdev) && ++ !test_bit(__IXGBE_DOWN, &adapter->state)) { ++ netif_wake_queue(tx_ring->netdev); ++ ++tx_ring->tx_stats.restart_queue; ++ } ++#endif ++ } ++ ++#ifndef CONFIG_IXGBE_NAPI ++ /* re-arm the interrupt */ ++ if ((count >= tx_ring->work_limit) && ++ (!test_bit(__IXGBE_DOWN, &adapter->state))) ++ ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx)); ++ ++#endif ++ return (count < tx_ring->work_limit); ++} ++ ++static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *rx_ring, ++ int cpu) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 rxctrl; ++ u8 reg_idx = rx_ring->reg_idx; ++ ++ rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx)); ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; ++ rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); ++ break; ++ case ixgbe_mac_82599EB: ++ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; ++ rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << ++ IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); ++ break; ++ default: ++ break; ++ } ++ rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; ++ rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED_DATA) { ++ /* just do the header data when in Packet Split mode */ ++ if (ring_is_ps_enabled(rx_ring)) ++ rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; ++ else ++ rxctrl |= IXGBE_DCA_RXCTRL_DATA_DCA_EN; ++ } ++ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); ++ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | ++ IXGBE_DCA_RXCTRL_DESC_HSRO_EN); ++ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); ++} ++ ++static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *tx_ring, ++ int cpu) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 txctrl; ++ u8 reg_idx = tx_ring->reg_idx; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); ++ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; ++ txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); ++ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; ++ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; ++ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); ++ break; ++ case ixgbe_mac_82599EB: ++ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); ++ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; ++ txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << ++ IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); ++ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; ++ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; ++ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); ++ break; ++ default: ++ break; ++ } ++} ++ ++static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) ++{ ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ int cpu = get_cpu(); ++ long r_idx; ++ int i; ++ ++ if (q_vector->cpu == cpu) ++ goto out_no_update; ++ ++ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); ++ for (i = 0; i < q_vector->txr_count; i++) { ++ ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu); ++ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, ++ r_idx + 1); ++ } ++ ++ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); ++ for (i = 0; i < q_vector->rxr_count; i++) { ++ ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu); ++ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, ++ r_idx + 1); ++ } ++ ++ q_vector->cpu = cpu; ++out_no_update: ++ put_cpu(); ++} ++ ++static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) ++{ ++ int num_q_vectors; ++ int i; ++ ++ if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) ++ return; ++ ++ /* always use CB2 mode, difference is masked in the CB driver */ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); ++ ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ++ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ else ++ num_q_vectors = 1; ++ ++ for (i = 0; i < num_q_vectors; i++) { ++ adapter->q_vector[i]->cpu = -1; ++ ixgbe_update_dca(adapter->q_vector[i]); ++ } ++} ++ ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++static int __ixgbe_notify_dca(struct device *dev, void *data) ++{ ++ struct ixgbe_adapter *adapter = dev_get_drvdata(dev); ++ unsigned long event = *(unsigned long *)data; ++ ++ if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) ++ return 0; ++ ++ switch (event) { ++ case DCA_PROVIDER_ADD: ++ /* if we're already enabled, don't do it again */ ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ break; ++ if (dca_add_requester(dev) == 0) { ++ adapter->flags |= IXGBE_FLAG_DCA_ENABLED; ++ ixgbe_setup_dca(adapter); ++ break; ++ } ++ /* Fall Through since DCA is disabled. */ ++ case DCA_PROVIDER_REMOVE: ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { ++ dca_remove_requester(dev); ++ adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); ++ } ++ break; ++ } ++ ++ return 0; ++} ++ ++#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */ ++/** ++ * ixgbe_receive_skb - Send a completed packet up the stack ++ * @q_vector: structure containing interrupt and ring information ++ * @skb: packet to send up ++ * @vlan_tag: vlan tag for packet ++ **/ ++static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, ++ struct sk_buff *skb, u16 vlan_tag) ++{ ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ int ret = NET_RX_SUCCESS; ++ ++#ifdef CONFIG_IXGBE_NAPI ++ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { ++#ifdef NETIF_F_HW_VLAN_TX ++ if (vlan_tag & VLAN_VID_MASK) { ++ if (adapter->vlgrp) ++ vlan_gro_receive(&q_vector->napi, ++ adapter->vlgrp, ++ vlan_tag, skb); ++ else ++ dev_kfree_skb_any(skb); ++ } else { ++ napi_gro_receive(&q_vector->napi, skb); ++ } ++#else ++ napi_gro_receive(&q_vector->napi, skb); ++#endif ++ } else { ++#endif /* CONFIG_IXGBE_NAPI */ ++ ++#ifdef NETIF_F_HW_VLAN_TX ++ if (vlan_tag & VLAN_VID_MASK) { ++ if (adapter->vlgrp) ++ ret = vlan_hwaccel_rx(skb, ++ adapter->vlgrp, ++ vlan_tag); ++ else ++ dev_kfree_skb_any(skb); ++ } else { ++ ret = netif_rx(skb); ++ } ++#else ++ ret = netif_rx(skb); ++#endif ++#ifndef CONFIG_IXGBE_NAPI ++ if (ret == NET_RX_DROP) ++ adapter->rx_dropped_backlog++; ++#endif ++#ifdef CONFIG_IXGBE_NAPI ++ } ++#endif /* CONFIG_IXGBE_NAPI */ ++} ++ ++/** ++ * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum ++ * @adapter: address of board private structure ++ * @rx_desc: current Rx descriptor being processed ++ * @skb: skb currently being received and modified ++ **/ ++static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, ++ union ixgbe_adv_rx_desc *rx_desc, ++ struct sk_buff *skb) ++{ ++ u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); ++ skb->ip_summed = CHECKSUM_NONE; ++ ++ /* Rx csum disabled */ ++ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) ++ return; ++ ++ /* if IP and error */ ++ if ((status_err & IXGBE_RXD_STAT_IPCS) && ++ (status_err & IXGBE_RXDADV_ERR_IPE)) { ++ adapter->hw_csum_rx_error++; ++ return; ++ } ++ ++ if (!(status_err & IXGBE_RXD_STAT_L4CS)) ++ return; ++ ++ if (status_err & IXGBE_RXDADV_ERR_TCPE) { ++ u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; ++ ++ /* ++ * 82599 errata, UDP frames with a 0 checksum can be marked as ++ * checksum errors. ++ */ ++ if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && ++ (adapter->hw.mac.type == ixgbe_mac_82599EB)) ++ return; ++ ++ adapter->hw_csum_rx_error++; ++ return; ++ } ++ ++ /* It must be a TCP or UDP packet with a valid checksum */ ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++} ++ ++static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) ++{ ++ /* ++ * Force memory writes to complete before letting h/w ++ * know there are new descriptors to fetch. (Only ++ * applicable for weak-ordered memory model archs, ++ * such as IA-64). ++ */ ++ wmb(); ++ writel(val, rx_ring->tail); ++} ++ ++/** ++ * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split ++ * @rx_ring: ring to place buffers on ++ * @cleaned_count: number of buffers to replace ++ **/ ++void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) ++{ ++ union ixgbe_adv_rx_desc *rx_desc; ++ struct ixgbe_rx_buffer *bi; ++ struct sk_buff *skb; ++ u16 i = rx_ring->next_to_use; ++ ++ /* do nothing if no valid netdev defined */ ++ if (!rx_ring->netdev) ++ return; ++ ++ while (cleaned_count--) { ++ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); ++ bi = &rx_ring->rx_buffer_info[i]; ++ skb = bi->skb; ++ ++ if (!skb) { ++ skb = netdev_alloc_skb_ip_align(rx_ring->netdev, ++ rx_ring->rx_buf_len); ++ if (!skb) { ++ rx_ring->rx_stats.alloc_rx_buff_failed++; ++ goto no_buffers; ++ } ++ /* initialize queue mapping */ ++ skb_record_rx_queue(skb, rx_ring->queue_index); ++ bi->skb = skb; ++ } ++ ++ if (!bi->dma) { ++ bi->dma = dma_map_single(rx_ring->dev, ++ skb->data, ++ rx_ring->rx_buf_len, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(rx_ring->dev, bi->dma)) { ++ rx_ring->rx_stats.alloc_rx_buff_failed++; ++ bi->dma = 0; ++ goto no_buffers; ++ } ++ } ++ ++ if (ring_is_ps_enabled(rx_ring)) { ++ if (!bi->page) { ++ bi->page = netdev_alloc_page(rx_ring->netdev); ++ if (!bi->page) { ++ rx_ring->rx_stats.alloc_rx_page_failed++; ++ goto no_buffers; ++ } ++ } ++ ++ if (!bi->page_dma) { ++ /* use a half page if we're re-using */ ++ bi->page_offset ^= PAGE_SIZE / 2; ++ bi->page_dma = dma_map_page(rx_ring->dev, ++ bi->page, ++ bi->page_offset, ++ PAGE_SIZE / 2, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(rx_ring->dev, ++ bi->page_dma)) { ++ rx_ring->rx_stats.alloc_rx_page_failed++; ++ bi->page_dma = 0; ++ goto no_buffers; ++ } ++ } ++ ++ /* Refresh the desc even if buffer_addrs didn't change ++ * because each write-back erases this info. */ ++ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); ++ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); ++ } else { ++ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); ++ rx_desc->read.hdr_addr = 0; ++ } ++ ++ i++; ++ if (i == rx_ring->count) ++ i = 0; ++ } ++ ++no_buffers: ++ if (rx_ring->next_to_use != i) { ++ rx_ring->next_to_use = i; ++ ixgbe_release_rx_desc(rx_ring, i); ++ } ++} ++ ++static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) ++{ ++ /* HW will not DMA in data larger than the given buffer, even if it ++ * parses the (NFS, of course) header to be larger. In that case, it ++ * fills the header buffer and spills the rest into the page. ++ */ ++ u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info); ++ u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> ++ IXGBE_RXDADV_HDRBUFLEN_SHIFT; ++ if (hlen > IXGBE_RX_HDR_SIZE) ++ hlen = IXGBE_RX_HDR_SIZE; ++ return hlen; ++} ++/** ++ * ixgbe_transform_rsc_queue - change rsc queue into a full packet ++ * @skb: pointer to the last skb in the rsc queue ++ * ++ * This function changes a queue full of hw rsc buffers into a completed ++ * packet. It uses the ->prev pointers to find the first packet and then ++ * turns it into the frag list owner. ++ **/ ++static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) ++{ ++ unsigned int frag_list_size = 0; ++ unsigned int skb_cnt = 1; ++ ++ while (skb->prev) { ++ struct sk_buff *prev = skb->prev; ++ frag_list_size += skb->len; ++ skb->prev = NULL; ++ skb = prev; ++ skb_cnt++; ++ } ++ ++ skb_shinfo(skb)->frag_list = skb->next; ++ skb->next = NULL; ++ skb->len += frag_list_size; ++ skb->data_len += frag_list_size; ++ skb->truesize += frag_list_size; ++ IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt; ++ ++ return skb; ++} ++ ++#ifndef IXGBE_NO_LRO ++/** ++ * ixgbe_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled ++ * @rx_ring: structure containing ring specific data ++ * @rx_desc: pointer to the rx descriptor ++ * ++ **/ ++static inline bool ixgbe_can_lro(struct ixgbe_ring *rx_ring, ++ union ixgbe_adv_rx_desc *rx_desc) ++{ ++ u16 pkt_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info); ++ ++ return (ring_is_lro_enabled(rx_ring) && ++ !(rx_ring->netdev->flags & IFF_PROMISC) && ++ (pkt_info & IXGBE_RXDADV_PKTTYPE_IPV4) && ++ (pkt_info & IXGBE_RXDADV_PKTTYPE_TCP)); ++} ++ ++/** ++ * ixgbe_lro_flush - Indicate packets to upper layer. ++ * ++ * Update IP and TCP header part of head skb if more than one ++ * skb's chained and indicate packets to upper layer. ++ **/ ++static void ixgbe_lro_flush(struct ixgbe_q_vector *q_vector, ++ struct ixgbe_lro_desc *lrod) ++{ ++ struct ixgbe_lro_list *lrolist = q_vector->lrolist; ++ struct iphdr *iph; ++ struct tcphdr *th; ++ struct sk_buff *skb; ++ u32 *ts_ptr; ++ ++ hlist_del(&lrod->lro_node); ++ lrolist->active_cnt--; ++ ++ skb = lrod->skb; ++ lrod->skb = NULL; ++ ++ if (lrod->append_cnt) { ++ /* take the lro queue and convert to skb format */ ++ skb = ixgbe_transform_rsc_queue(skb); ++ ++ /* incorporate ip header and re-calculate checksum */ ++ iph = (struct iphdr *)skb->data; ++ iph->tot_len = ntohs(skb->len); ++ iph->check = 0; ++ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); ++ ++ /* incorporate the latest ack into the tcp header */ ++ th = (struct tcphdr *) ((char *)skb->data + sizeof(*iph)); ++ th->ack_seq = lrod->ack_seq; ++ th->psh = lrod->psh; ++ th->window = lrod->window; ++ th->check = 0; ++ ++ /* incorporate latest timestamp into the tcp header */ ++ if (lrod->opt_bytes) { ++ ts_ptr = (u32 *)(th + 1); ++ ts_ptr[1] = htonl(lrod->tsval); ++ ts_ptr[2] = lrod->tsecr; ++ } ++ } ++ ++#ifdef NETIF_F_TSO ++ skb_shinfo(skb)->gso_size = lrod->mss; ++#endif ++ ixgbe_receive_skb(q_vector, skb, lrod->vlan_tag); ++ lrolist->stats.flushed++; ++ ++ ++ hlist_add_head(&lrod->lro_node, &lrolist->free); ++} ++ ++static void ixgbe_lro_flush_all(struct ixgbe_q_vector *q_vector) ++{ ++ struct ixgbe_lro_desc *lrod; ++ struct hlist_node *node, *node2; ++ struct ixgbe_lro_list *lrolist = q_vector->lrolist; ++ ++ hlist_for_each_entry_safe(lrod, node, node2, &lrolist->active, lro_node) ++ ixgbe_lro_flush(q_vector, lrod); ++} ++ ++/* ++ * ixgbe_lro_header_ok - Main LRO function. ++ **/ ++static u16 ixgbe_lro_header_ok(struct sk_buff *new_skb, struct iphdr *iph, ++ struct tcphdr *th) ++{ ++ int opt_bytes, tcp_data_len; ++ u32 *ts_ptr = NULL; ++ ++ /* If we see CE codepoint in IP header, packet is not mergeable */ ++ if (INET_ECN_is_ce(ipv4_get_dsfield(iph))) ++ return -1; ++ ++ /* ensure there are no options */ ++ if ((iph->ihl << 2) != sizeof(*iph)) ++ return -1; ++ ++ /* .. and the packet is not fragmented */ ++ if (iph->frag_off & htons(IP_MF|IP_OFFSET)) ++ return -1; ++ ++ /* ensure no bits set besides ack or psh */ ++ if (th->fin || th->syn || th->rst || ++ th->urg || th->ece || th->cwr || !th->ack) ++ return -1; ++ ++ /* ensure that the checksum is valid */ ++ if (new_skb->ip_summed != CHECKSUM_UNNECESSARY) ++ return -1; ++ ++ /* ++ * check for timestamps. Since the only option we handle are timestamps, ++ * we only have to handle the simple case of aligned timestamps ++ */ ++ ++ opt_bytes = (th->doff << 2) - sizeof(*th); ++ if (opt_bytes != 0) { ++ ts_ptr = (u32 *)(th + 1); ++ if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) || ++ (*ts_ptr != ntohl((TCPOPT_NOP << 24) | ++ (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | ++ TCPOLEN_TIMESTAMP))) { ++ return -1; ++ } ++ } ++ ++ tcp_data_len = ntohs(iph->tot_len) - (th->doff << 2) - sizeof(*iph); ++ ++ return tcp_data_len; ++} ++ ++/** ++ * ixgbe_lro_queue - if able, queue skb into lro chain ++ * @q_vector: structure containing interrupt and ring information ++ * @new_skb: pointer to current skb being checked ++ * @tag: vlan tag for skb ++ * ++ * Checks whether the skb given is eligible for LRO and if that's ++ * fine chains it to the existing lro_skb based on flowid. If an LRO for ++ * the flow doesn't exist create one. ++ **/ ++static struct sk_buff *ixgbe_lro_queue(struct ixgbe_q_vector *q_vector, ++ struct sk_buff *new_skb, ++ u16 tag) ++{ ++ struct sk_buff *lro_skb; ++ struct ixgbe_lro_desc *lrod; ++ struct hlist_node *node; ++ struct skb_shared_info *new_skb_info = skb_shinfo(new_skb); ++ struct ixgbe_lro_list *lrolist = q_vector->lrolist; ++ struct iphdr *iph = (struct iphdr *)new_skb->data; ++ struct tcphdr *th = (struct tcphdr *)(iph + 1); ++ int tcp_data_len = ixgbe_lro_header_ok(new_skb, iph, th); ++ u16 opt_bytes = (th->doff << 2) - sizeof(*th); ++ u32 *ts_ptr = (opt_bytes ? (u32 *)(th + 1) : NULL); ++ u32 seq = ntohl(th->seq); ++ ++ /* ++ * we have a packet that might be eligible for LRO, ++ * so see if it matches anything we might expect ++ */ ++ hlist_for_each_entry(lrod, node, &lrolist->active, lro_node) { ++ if (lrod->source_port != th->source || ++ lrod->dest_port != th->dest || ++ lrod->source_ip != iph->saddr || ++ lrod->dest_ip != iph->daddr || ++ lrod->vlan_tag != tag) ++ continue; ++ ++ /* malformed header, no tcp data, resultant packet would be too large */ ++ if (tcp_data_len <= 0 || (tcp_data_len + lrod->len) > 65535) { ++ ixgbe_lro_flush(q_vector, lrod); ++ break; ++ } ++ ++ /* out of order packet */ ++ if (seq != lrod->next_seq) { ++ ixgbe_lro_flush(q_vector, lrod); ++ tcp_data_len = -1; ++ break; ++ } ++ ++ /* packet without timestamp, or timestamp suddenly added to flow */ ++ if (lrod->opt_bytes != opt_bytes) { ++ ixgbe_lro_flush(q_vector, lrod); ++ break; ++ } ++ ++ if (opt_bytes) { ++ u32 tsval = ntohl(*(ts_ptr + 1)); ++ /* make sure timestamp values are increasing */ ++ if (opt_bytes != lrod->opt_bytes || ++ lrod->tsval > tsval || *(ts_ptr + 2) == 0) { ++ ixgbe_lro_flush(q_vector, lrod); ++ tcp_data_len = -1; ++ break; ++ } ++ ++ lrod->tsval = tsval; ++ lrod->tsecr = *(ts_ptr + 2); ++ } ++ ++ /* remove any padding from the end of the skb */ ++ __pskb_trim(new_skb, ntohs(iph->tot_len)); ++ /* Remove IP and TCP header*/ ++ skb_pull(new_skb, ntohs(iph->tot_len) - tcp_data_len); ++ ++ lrod->next_seq += tcp_data_len; ++ lrod->ack_seq = th->ack_seq; ++ lrod->window = th->window; ++ lrod->len += tcp_data_len; ++ lrod->psh |= th->psh; ++ lrod->append_cnt++; ++ lrolist->stats.coal++; ++ ++ if (tcp_data_len > lrod->mss) ++ lrod->mss = tcp_data_len; ++ ++ lro_skb = lrod->skb; ++ ++ /* if header is empty pull pages into current skb */ ++ if (!skb_headlen(new_skb) && ++ ((skb_shinfo(lro_skb)->nr_frags + ++ skb_shinfo(new_skb)->nr_frags) <= MAX_SKB_FRAGS )) { ++ struct skb_shared_info *lro_skb_info = skb_shinfo(lro_skb); ++ ++ /* copy frags into the last skb */ ++ memcpy(lro_skb_info->frags + lro_skb_info->nr_frags, ++ new_skb_info->frags, ++ new_skb_info->nr_frags * sizeof(skb_frag_t)); ++ ++ lro_skb_info->nr_frags += new_skb_info->nr_frags; ++ lro_skb->len += tcp_data_len; ++ lro_skb->data_len += tcp_data_len; ++ lro_skb->truesize += tcp_data_len; ++ ++ new_skb_info->nr_frags = 0; ++ new_skb->truesize -= tcp_data_len; ++ new_skb->len = new_skb->data_len = 0; ++ new_skb->data = skb_mac_header(new_skb); ++ __pskb_trim(new_skb, 0); ++ new_skb->protocol = 0; ++ lrolist->stats.recycled++; ++ } else { ++ /* Chain this new skb in frag_list */ ++ new_skb->prev = lro_skb; ++ lro_skb->next = new_skb; ++ lrod->skb = new_skb ; ++ new_skb = NULL; ++ } ++ ++ if (lrod->psh) ++ ixgbe_lro_flush(q_vector, lrod); ++ ++ return new_skb; ++ } ++ ++ /* start a new packet */ ++ if (tcp_data_len > 0 && !hlist_empty(&lrolist->free) && !th->psh) { ++ lrod = hlist_entry(lrolist->free.first, struct ixgbe_lro_desc, ++ lro_node); ++ ++ lrod->skb = new_skb; ++ lrod->source_ip = iph->saddr; ++ lrod->dest_ip = iph->daddr; ++ lrod->source_port = th->source; ++ lrod->dest_port = th->dest; ++ lrod->vlan_tag = tag; ++ lrod->len = new_skb->len; ++ lrod->next_seq = seq + tcp_data_len; ++ lrod->ack_seq = th->ack_seq; ++ lrod->window = th->window; ++ lrod->mss = tcp_data_len; ++ lrod->opt_bytes = opt_bytes; ++ lrod->psh = 0; ++ lrod->append_cnt = 0; ++ ++ /* record timestamp if it is present */ ++ if (opt_bytes) { ++ lrod->tsval = ntohl(*(ts_ptr + 1)); ++ lrod->tsecr = *(ts_ptr + 2); ++ } ++ /* remove first packet from freelist.. */ ++ hlist_del(&lrod->lro_node); ++ /* .. and insert at the front of the active list */ ++ hlist_add_head(&lrod->lro_node, &lrolist->active); ++ lrolist->active_cnt++; ++ lrolist->stats.coal++; ++ return NULL; ++ } ++ ++ /* packet not handled by any of the above, pass it to the stack */ ++ ixgbe_receive_skb(q_vector, new_skb, tag); ++ return NULL; ++} ++ ++static void ixgbe_lro_ring_exit(struct ixgbe_lro_list *lrolist) ++{ ++ struct hlist_node *node, *node2; ++ struct ixgbe_lro_desc *lrod; ++ ++ hlist_for_each_entry_safe(lrod, node, node2, &lrolist->active, ++ lro_node) { ++ hlist_del(&lrod->lro_node); ++ kfree(lrod); ++ } ++ ++ hlist_for_each_entry_safe(lrod, node, node2, &lrolist->free, ++ lro_node) { ++ hlist_del(&lrod->lro_node); ++ kfree(lrod); ++ } ++} ++ ++static void ixgbe_lro_ring_init(struct ixgbe_lro_list *lrolist) ++{ ++ int j, bytes; ++ struct ixgbe_lro_desc *lrod; ++ ++ bytes = sizeof(struct ixgbe_lro_desc); ++ ++ INIT_HLIST_HEAD(&lrolist->free); ++ INIT_HLIST_HEAD(&lrolist->active); ++ ++ for (j = 0; j < IXGBE_LRO_MAX; j++) { ++ lrod = kzalloc(bytes, GFP_KERNEL); ++ if (lrod != NULL) { ++ INIT_HLIST_NODE(&lrod->lro_node); ++ hlist_add_head(&lrod->lro_node, &lrolist->free); ++ } ++ } ++} ++ ++#endif /* IXGBE_NO_LRO */ ++ ++static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) ++{ ++ return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & ++ IXGBE_RXDADV_RSCCNT_MASK); ++} ++ ++static void ixgbe_rx_status_indication(u32 staterr, ++ struct ixgbe_adapter *adapter) ++{ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ if (staterr & IXGBE_RXD_STAT_FLM) ++ adapter->flm++; ++#ifndef IXGBE_NO_LLI ++ if (staterr & IXGBE_RXD_STAT_DYNINT) ++ adapter->lli_int++; ++#endif /* IXGBE_NO_LLI */ ++ break; ++ case ixgbe_mac_82598EB: ++#ifndef IXGBE_NO_LLI ++ if (staterr & IXGBE_RXD_STAT_DYNINT) ++ adapter->lli_int++; ++#endif /* IXGBE_NO_LLI */ ++ break; ++ default: ++ break; ++ } ++} ++ ++#ifdef CONFIG_IXGBE_NAPI ++static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ++ struct ixgbe_ring *rx_ring, ++ int *work_done, int work_to_do) ++#else ++static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ++ struct ixgbe_ring *rx_ring) ++#endif ++{ ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ union ixgbe_adv_rx_desc *rx_desc, *next_rxd; ++ struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; ++ struct sk_buff *skb; ++ unsigned int total_rx_bytes = 0, total_rx_packets = 0; ++ const int current_node = numa_node_id(); ++#ifdef IXGBE_FCOE ++ int ddp_bytes = 0; ++#endif /* IXGBE_FCOE */ ++ u32 staterr; ++ u16 vlan_tag, i; ++ u16 cleaned_count = 0; ++#ifndef CONFIG_IXGBE_NAPI ++ u16 work_to_do = rx_ring->work_limit, local_work_done = 0; ++ u16 *work_done = &local_work_done; ++#endif ++ bool pkt_is_rsc = false; ++ ++ i = rx_ring->next_to_clean; ++ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); ++ staterr = le32_to_cpu(rx_desc->wb.upper.status_error); ++ ++ while (staterr & IXGBE_RXD_STAT_DD) { ++ u32 upper_len = 0; ++ ++ rx_buffer_info = &rx_ring->rx_buffer_info[i]; ++ ++ skb = rx_buffer_info->skb; ++ rx_buffer_info->skb = NULL; ++ prefetch(skb->data - NET_IP_ALIGN); ++ ++ if (ring_is_rsc_enabled(rx_ring)) ++ pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); ++ ++ /* if this is a skb from previous receive dma will be 0 */ ++ if (rx_buffer_info->dma) { ++ u16 hlen; ++ if (pkt_is_rsc && !(staterr & IXGBE_RXD_STAT_EOP) && ++ !skb->prev) { ++ /* ++ * When HWRSC is enabled, delay unmapping ++ * of the first packet. It carries the ++ * header information, HW may still ++ * access the header after the writeback. ++ * Only unmap it when EOP is reached ++ */ ++ IXGBE_RSC_CB(skb)->delay_unmap = true; ++ IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; ++ } else { ++ dma_unmap_single(rx_ring->dev, ++ rx_buffer_info->dma, ++ rx_ring->rx_buf_len, ++ DMA_FROM_DEVICE); ++ } ++ rx_buffer_info->dma = 0; ++ ++ if (ring_is_ps_enabled(rx_ring)) { ++ hlen = ixgbe_get_hlen(rx_desc); ++ upper_len = le16_to_cpu(rx_desc->wb.upper.length); ++ } else { ++ hlen = le16_to_cpu(rx_desc->wb.upper.length); ++ } ++ ++ /* small packet padding for queue-to-queue loopback */ ++ if ((staterr & IXGBE_RXD_STAT_LB) ++ && hlen < 60 && upper_len == 0) { ++ memset(skb->data + hlen, 0, 60 - hlen); ++ hlen = 60; ++ } ++ ++ skb_put(skb, hlen); ++ } else { ++ /* assume packet split since header is unmapped */ ++ upper_len = le16_to_cpu(rx_desc->wb.upper.length); ++ } ++ ++ if (upper_len) { ++ dma_unmap_page(rx_ring->dev, ++ rx_buffer_info->page_dma, ++ PAGE_SIZE / 2, ++ DMA_FROM_DEVICE); ++ rx_buffer_info->page_dma = 0; ++ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, ++ rx_buffer_info->page, ++ rx_buffer_info->page_offset, ++ upper_len); ++ ++ if ((page_count(rx_buffer_info->page) == 1) && ++ (page_to_nid(rx_buffer_info->page) == current_node)) ++ get_page(rx_buffer_info->page); ++ else ++ rx_buffer_info->page = NULL; ++ ++ skb->len += upper_len; ++ skb->data_len += upper_len; ++ skb->truesize += upper_len; ++ } ++ ++ i++; ++ if (i == rx_ring->count) ++ i = 0; ++ ++ next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i); ++ prefetch(next_rxd); ++ cleaned_count++; ++ ++ if (pkt_is_rsc) { ++ u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> ++ IXGBE_RXDADV_NEXTP_SHIFT; ++ next_buffer = &rx_ring->rx_buffer_info[nextp]; ++ } else { ++ next_buffer = &rx_ring->rx_buffer_info[i]; ++ } ++ ++ if (!(staterr & IXGBE_RXD_STAT_EOP)) { ++ if (ring_is_ps_enabled(rx_ring)) { ++ rx_buffer_info->skb = next_buffer->skb; ++ rx_buffer_info->dma = next_buffer->dma; ++ next_buffer->skb = skb; ++ next_buffer->dma = 0; ++ } else { ++ skb->next = next_buffer->skb; ++ skb->next->prev = skb; ++ } ++ rx_ring->rx_stats.non_eop_descs++; ++ goto next_desc; ++ } ++ ++ ixgbe_rx_status_indication(staterr, adapter); ++ if (skb->prev) { ++ skb = ixgbe_transform_rsc_queue(skb); ++ /* if we got here without RSC the packet is invalid */ ++ if (!pkt_is_rsc) { ++ __pskb_trim(skb, 0); ++ rx_buffer_info->skb = skb; ++ goto next_desc; ++ } ++ } ++ ++ if (ring_is_rsc_enabled(rx_ring)) { ++ if (IXGBE_RSC_CB(skb)->delay_unmap) { ++ dma_unmap_single(rx_ring->dev, ++ IXGBE_RSC_CB(skb)->dma, ++ rx_ring->rx_buf_len, ++ DMA_FROM_DEVICE); ++ IXGBE_RSC_CB(skb)->dma = 0; ++ IXGBE_RSC_CB(skb)->delay_unmap = false; ++ } ++ } ++ if (pkt_is_rsc) { ++ if (ring_is_ps_enabled(rx_ring)) ++ rx_ring->rx_stats.rsc_count += skb_shinfo(skb)->nr_frags; ++ else ++ rx_ring->rx_stats.rsc_count += IXGBE_RSC_CB(skb)->skb_cnt; ++ rx_ring->rx_stats.rsc_flush++; ++ } ++ ++ /* ERR_MASK will only have valid bits if EOP set */ ++ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { ++ /* trim packet back to size 0 and recycle it */ ++ __pskb_trim(skb, 0); ++ rx_buffer_info->skb = skb; ++ goto next_desc; ++ } ++ ++ ixgbe_rx_checksum(adapter, rx_desc, skb); ++ ++ /* probably a little skewed due to removing CRC */ ++ total_rx_bytes += skb->len; ++ total_rx_packets++; ++ ++ skb->protocol = eth_type_trans(skb, rx_ring->netdev); ++ ++#ifdef IXGBE_FCOE ++ /* if ddp, not passing to ULD unless for FCP_RSP or error */ ++ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { ++ ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); ++ if (!ddp_bytes) { ++ rx_ring->netdev->last_rx = jiffies; ++ goto next_desc; ++ } ++ } ++ ++#endif /* IXGBE_FCOE */ ++ vlan_tag = ((staterr & IXGBE_RXD_STAT_VP) ? ++ le16_to_cpu(rx_desc->wb.upper.vlan) : 0); ++ ++#ifndef IXGBE_NO_LRO ++ if (ixgbe_can_lro(rx_ring, rx_desc)) ++ rx_buffer_info->skb = ixgbe_lro_queue(q_vector, skb, vlan_tag); ++ else ++#endif ++ ixgbe_receive_skb(q_vector, skb, vlan_tag); ++ ++ rx_ring->netdev->last_rx = jiffies; ++ ++next_desc: ++ rx_desc->wb.upper.status_error = 0; ++ ++ (*work_done)++; ++ if (*work_done >= work_to_do) ++ break; ++ ++ /* return some buffers to hardware, one at a time is too slow */ ++ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { ++ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); ++ cleaned_count = 0; ++ } ++ ++ /* use prefetched values */ ++ rx_desc = next_rxd; ++ staterr = le32_to_cpu(rx_desc->wb.upper.status_error); ++ } ++ ++#ifndef IXGBE_NO_LRO ++ if (ring_is_lro_enabled(rx_ring)) ++ ixgbe_lro_flush_all(q_vector); ++ ++#endif /* IXGBE_NO_LRO */ ++ rx_ring->next_to_clean = i; ++ cleaned_count = IXGBE_DESC_UNUSED(rx_ring); ++ ++ if (cleaned_count) ++ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); ++ ++#ifdef IXGBE_FCOE ++ /* include DDPed FCoE data */ ++ if (ddp_bytes > 0) { ++ unsigned int mss; ++ ++ mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - ++ sizeof(struct fc_frame_header) - ++ sizeof(struct fcoe_crc_eof); ++ if (mss > 512) ++ mss &= ~511; ++ total_rx_bytes += ddp_bytes; ++ total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); ++ } ++#endif /* IXGBE_FCOE */ ++ ++ rx_ring->total_packets += total_rx_packets; ++ rx_ring->total_bytes += total_rx_bytes; ++ rx_ring->stats.packets += total_rx_packets; ++ rx_ring->stats.bytes += total_rx_bytes; ++#ifndef CONFIG_IXGBE_NAPI ++ ++ /* re-arm the interrupt if we had to bail early and have more work */ ++ if ((*work_done >= work_to_do) && ++ (!test_bit(__IXGBE_DOWN, &adapter->state))) ++ ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx)); ++ ++ return local_work_done; ++#endif ++} ++ ++#ifdef CONFIG_IXGBE_NAPI ++static int ixgbe_clean_rxonly(struct napi_struct *, int); ++#endif ++/** ++ * ixgbe_configure_msix - Configure MSI-X hardware ++ * @adapter: board private structure ++ * ++ * ixgbe_configure_msix sets up the hardware to properly generate MSI-X ++ * interrupts. ++ **/ ++static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_q_vector *q_vector; ++ int i, q_vectors, v_idx, r_idx; ++ u32 mask; ++ ++ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ ++ /* ++ * Populate the IVAR table and set the ITR values to the ++ * corresponding register. ++ */ ++ for (v_idx = 0; v_idx < q_vectors; v_idx++) { ++ q_vector = adapter->q_vector[v_idx]; ++ /* XXX for_each_bit(...) */ ++ r_idx = find_first_bit(q_vector->rxr_idx, ++ adapter->num_rx_queues); ++ ++ for (i = 0; i < q_vector->rxr_count; i++) { ++ u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; ++ ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); ++ r_idx = find_next_bit(q_vector->rxr_idx, ++ adapter->num_rx_queues, ++ r_idx + 1); ++ } ++ r_idx = find_first_bit(q_vector->txr_idx, ++ adapter->num_tx_queues); ++ ++ for (i = 0; i < q_vector->txr_count; i++) { ++ u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; ++ ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); ++ r_idx = find_next_bit(q_vector->txr_idx, ++ adapter->num_tx_queues, ++ r_idx + 1); ++ } ++ ++ if (q_vector->txr_count && !q_vector->rxr_count) ++ /* tx only vector */ ++ q_vector->eitr = adapter->tx_eitr_param; ++ else if (q_vector->rxr_count) ++ /* rx or rx/tx vector */ ++ q_vector->eitr = adapter->rx_eitr_param; ++ ++ ixgbe_write_eitr(q_vector); ++ } ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, ++ v_idx); ++ break; ++ case ixgbe_mac_82599EB: ++ ixgbe_set_ivar(adapter, -1, 1, v_idx); ++ break; ++ ++ default: ++ break; ++ } ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); ++#ifdef IXGBE_TCP_TIMER ++ ixgbe_set_ivar(adapter, -1, 0, ++v_idx); ++#endif /* IXGBE_TCP_TIMER */ ++ ++ /* set up to autoclear timer, and the vectors */ ++ mask = IXGBE_EIMS_ENABLE_MASK; ++ mask &= ~(IXGBE_EIMS_OTHER | ++ IXGBE_EIMS_MAILBOX | ++ IXGBE_EIMS_LSC); ++ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); ++} ++ ++enum latency_range { ++ lowest_latency = 0, ++ low_latency = 1, ++ bulk_latency = 2, ++ latency_invalid = 255 ++}; ++ ++/** ++ * ixgbe_update_itr - update the dynamic ITR value based on statistics ++ * @adapter: pointer to adapter ++ * @eitr: eitr setting (ints per sec) to give last timeslice ++ * @itr_setting: current throttle rate in ints/second ++ * @packets: the number of packets during this measurement interval ++ * @bytes: the number of bytes during this measurement interval ++ * ++ * Stores a new ITR value based on packets and byte ++ * counts during the last interrupt. The advantage of per interrupt ++ * computation is faster updates and more accurate ITR for the current ++ * traffic pattern. Constants in this function were computed ++ * based on theoretical maximum wire speed and thresholds were set based ++ * on testing data as well as attempting to minimize response time ++ * while increasing bulk throughput. ++ * this functionality is controlled by the InterruptThrottleRate module ++ * parameter (see ixgbe_param.c) ++ **/ ++static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, ++ u32 eitr, u8 itr_setting, ++ int packets, int bytes) ++{ ++ unsigned int retval = itr_setting; ++ u32 timepassed_us; ++ u64 bytes_perint; ++ ++ if (packets == 0) ++ goto update_itr_done; ++ ++ ++ /* simple throttlerate management ++ * 0-20MB/s lowest (100000 ints/s) ++ * 20-100MB/s low (20000 ints/s) ++ * 100-1249MB/s bulk (8000 ints/s) ++ */ ++ /* what was last interrupt timeslice? */ ++ timepassed_us = 1000000/eitr; ++ bytes_perint = bytes / timepassed_us; /* bytes/usec */ ++ ++ switch (itr_setting) { ++ case lowest_latency: ++ if (bytes_perint > adapter->eitr_low) { ++ retval = low_latency; ++ } ++ break; ++ case low_latency: ++ if (bytes_perint > adapter->eitr_high) { ++ retval = bulk_latency; ++ } ++ else if (bytes_perint <= adapter->eitr_low) { ++ retval = lowest_latency; ++ } ++ break; ++ case bulk_latency: ++ if (bytes_perint <= adapter->eitr_high) { ++ retval = low_latency; ++ } ++ break; ++ } ++ ++update_itr_done: ++ return retval; ++} ++ ++/** ++ * ixgbe_write_eitr - write EITR register in hardware specific way ++ * @q_vector: structure containing interrupt and ring information ++ * ++ * This function is made to be called by ethtool and by the driver ++ * when it needs to update EITR registers at runtime. Hardware ++ * specific quirks/differences are taken care of here. ++ */ ++void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) ++{ ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ struct ixgbe_hw *hw = &adapter->hw; ++ int v_idx = q_vector->v_idx; ++ u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ /* must write high and low 16 bits to reset counter */ ++ itr_reg |= (itr_reg << 16); ++ break; ++ case ixgbe_mac_82599EB: ++ /* ++ * 82599 can support a value of zero, so allow it for ++ * max interrupt rate, but there is an errata where it can ++ * not be zero with RSC ++ */ ++ if (itr_reg == 8 && ++ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) ++ itr_reg = 0; ++ ++ /* ++ * set the WDIS bit to not clear the timer bits and cause an ++ * immediate assertion of the interrupt ++ */ ++ itr_reg |= IXGBE_EITR_CNT_WDIS; ++ break; ++ default: ++ break; ++ } ++ IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); ++} ++ ++static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) ++{ ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ u32 new_itr; ++ u8 current_itr, ret_itr; ++ int i, r_idx; ++ struct ixgbe_ring *rx_ring = NULL, *tx_ring = NULL; ++ ++ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); ++ for (i = 0; i < q_vector->txr_count; i++) { ++ tx_ring = adapter->tx_ring[r_idx]; ++ ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, ++ q_vector->tx_itr, ++ tx_ring->total_packets, ++ tx_ring->total_bytes); ++ /* if the result for this queue would decrease interrupt ++ * rate for this vector then use that result */ ++ q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? ++ q_vector->tx_itr - 1 : ret_itr); ++ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, ++ r_idx + 1); ++ } ++ ++ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); ++ for (i = 0; i < q_vector->rxr_count; i++) { ++ rx_ring = adapter->rx_ring[r_idx]; ++ ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, ++ q_vector->rx_itr, ++ rx_ring->total_packets, ++ rx_ring->total_bytes); ++ /* if the result for this queue would decrease interrupt ++ * rate for this vector then use that result */ ++ q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? ++ q_vector->rx_itr - 1 : ret_itr); ++ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, ++ r_idx + 1); ++ } ++ ++ current_itr = max(q_vector->rx_itr, q_vector->tx_itr); ++ ++ switch (current_itr) { ++ /* counts and packets in update_itr are dependent on these numbers */ ++ case lowest_latency: ++ new_itr = 100000; ++ break; ++ case low_latency: ++ new_itr = 20000; /* aka hwitr = ~200 */ ++ break; ++ case bulk_latency: ++ default: ++ new_itr = 8000; ++ break; ++ } ++ ++ if (new_itr != q_vector->eitr) { ++ /* do an exponential smoothing */ ++ new_itr = ((q_vector->eitr * 9) + new_itr)/10; ++ ++ /* save the algorithm value here, not the smoothed one */ ++ q_vector->eitr = new_itr; ++ ++ ixgbe_write_eitr(q_vector); ++ } ++} ++ ++/** ++ * ixgbe_check_overtemp_task - worker thread to check over tempurature ++ * @work: pointer to work_struct containing our data ++ **/ ++static void ixgbe_check_overtemp_task(struct work_struct *work) ++{ ++ struct ixgbe_adapter *adapter = container_of(work, ++ struct ixgbe_adapter, ++ check_overtemp_task); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 eicr = adapter->interrupt_event; ++ ++ if (test_bit(__IXGBE_DOWN, &adapter->state)) ++ return; ++ ++ if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) ++ return; ++ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_82599_T3_LOM: { ++ u32 autoneg; ++ bool link_up = false; ++ ++ if (hw->mac.ops.check_link) ++ hw->mac.ops.check_link(hw, &autoneg, &link_up, false); ++ ++ /* ++ * Since the warning interrupt is for both ports ++ * we don't have to check if: ++ * - This interrupt wasn't for our port. ++ * - We may have missed the interrupt so always have to ++ * check if we got a LSC ++ */ ++ if (((eicr & IXGBE_EICR_GPI_SDP0) && link_up) && ++ (!(eicr & IXGBE_EICR_LSC))) ++ return; ++ ++ /* Check if this is not due to overtemp */ ++ if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) ++ return; ++ } ++ break; ++ default: ++ if (!(eicr & IXGBE_EICR_GPI_SDP0)) ++ return; ++ break; ++ } ++ DPRINTK(PROBE, CRIT, "Network adapter has been stopped because it has " ++ "over heated. Restart the computer. If the problem persists, " ++ "power off the system and replace the adapter\n"); ++ /* write to clear the interrupt */ ++ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); ++} ++ ++static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && ++ (eicr & IXGBE_EICR_GPI_SDP1)) { ++ DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n"); ++ /* write to clear the interrupt */ ++ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); ++ } ++} ++ ++static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ if (eicr & IXGBE_EICR_GPI_SDP2) { ++ /* Clear the interrupt */ ++ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ schedule_work(&adapter->sfp_config_module_task); ++ } ++ ++ if (eicr & IXGBE_EICR_GPI_SDP1) { ++ /* Clear the interrupt */ ++ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ schedule_work(&adapter->multispeed_fiber_task); ++ } ++} ++ ++static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ adapter->lsc_int++; ++ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; ++ adapter->link_check_timeout = jiffies; ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { ++ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); ++ IXGBE_WRITE_FLUSH(hw); ++ schedule_work(&adapter->watchdog_task); ++ } ++} ++ ++static irqreturn_t ixgbe_msix_lsc(int irq, void *data) ++{ ++ struct net_device *netdev = data; ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 eicr; ++ ++ /* ++ * Workaround for Silicon errata. Use clear-by-write instead ++ * of clear-by-read. Reading with EICS will return the ++ * interrupt causes without clearing, which later be done ++ * with the write to EICR. ++ */ ++ eicr = IXGBE_READ_REG(hw, IXGBE_EICS); ++ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); ++ ++ if (eicr & IXGBE_EICR_LSC) ++ ixgbe_check_lsc(adapter); ++ ++ if (eicr & IXGBE_EICR_MAILBOX) ++ ixgbe_msg_task(adapter); ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82599EB: ++ if (eicr & IXGBE_EICR_ECC) { ++ DPRINTK(LINK, INFO, "Received unrecoverable ECC Err, " ++ "please reboot\n"); ++ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); ++ } ++#ifdef HAVE_TX_MQ ++ /* Handle Flow Director Full threshold interrupt */ ++ if (eicr & IXGBE_EICR_FLOW_DIR) { ++ int i; ++ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR); ++ /* Disable transmits before FDIR Re-initialization */ ++ netif_tx_stop_all_queues(netdev); ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ struct ixgbe_ring *tx_ring = ++ adapter->tx_ring[i]; ++ if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, ++ &tx_ring->state)) ++ schedule_work(&adapter->fdir_reinit_task); ++ } ++ } ++#endif ++ ixgbe_check_sfp_event(adapter, eicr); ++ adapter->interrupt_event = eicr; ++ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && ++ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { ++ adapter->interrupt_event = eicr; ++ schedule_work(&adapter->check_overtemp_task); ++ } ++ break; ++ default: ++ break; ++ } ++ ++ ixgbe_check_fan_failure(adapter, eicr); ++ ++ /* re-enable the original interrupt state, no lsc, no queues */ ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr & ++ ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE)); ++ ++ return IRQ_HANDLED; ++} ++ ++#ifdef IXGBE_TCP_TIMER ++static irqreturn_t ixgbe_msix_pba(int irq, void *data) ++{ ++ struct net_device *netdev = data; ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ int i; ++ ++ u32 pba = readl(adapter->msix_addr + IXGBE_MSIXPBA); ++ for (i = 0; i < MAX_MSIX_COUNT; i++) { ++ if (pba & (1 << i)) ++ adapter->msix_handlers[i](irq, data, regs); ++ else ++ adapter->pba_zero[i]++; ++ } ++ ++ adapter->msix_pba++; ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t ixgbe_msix_tcp_timer(int irq, void *data) ++{ ++ struct net_device *netdev = data; ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ adapter->msix_tcp_timer++; ++ ++ return IRQ_HANDLED; ++} ++ ++#endif /* IXGBE_TCP_TIMER */ ++void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) ++{ ++ u32 mask; ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ mask = (IXGBE_EIMS_RTX_QUEUE & qmask); ++ IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); ++ break; ++ case ixgbe_mac_82599EB: ++ mask = (qmask & 0xFFFFFFFF); ++ if (mask) ++ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); ++ mask = (qmask >> 32); ++ if (mask) ++ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); ++ break; ++ default: ++ break; ++ } ++ /* skip the flush */ ++} ++ ++void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, u64 qmask) ++{ ++ u32 mask; ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ mask = (IXGBE_EIMS_RTX_QUEUE & qmask); ++ IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); ++ break; ++ case ixgbe_mac_82599EB: ++ mask = (qmask & 0xFFFFFFFF); ++ if (mask) ++ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); ++ mask = (qmask >> 32); ++ if (mask) ++ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); ++ break; ++ default: ++ break; ++ } ++ /* skip the flush */ ++} ++ ++static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) ++{ ++ struct ixgbe_q_vector *q_vector = data; ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ struct ixgbe_ring *tx_ring; ++ int i, r_idx; ++#ifndef CONFIG_IXGBE_NAPI ++ bool tx_clean_complete = false; ++#endif ++ ++ if (!q_vector->txr_count) ++ return IRQ_HANDLED; ++ ++#ifndef CONFIG_IXGBE_NAPI ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ ixgbe_update_dca(q_vector); ++#endif ++ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); ++ for (i = 0; i < q_vector->txr_count; i++) { ++ tx_ring = adapter->tx_ring[r_idx]; ++ tx_ring->total_bytes = 0; ++ tx_ring->total_packets = 0; ++#ifndef CONFIG_IXGBE_NAPI ++ tx_clean_complete = ixgbe_clean_tx_irq(q_vector, tx_ring); ++#endif ++ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, ++ r_idx + 1); ++ } ++ ++#ifdef CONFIG_IXGBE_NAPI ++ /* EIAM disabled interrupts (on this vector) for us */ ++ napi_schedule(&q_vector->napi); ++#else ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { ++ u64 eics = ((u64)1 << q_vector->v_idx); ++ ixgbe_irq_enable_queues(adapter, eics); ++ if (!tx_clean_complete) ++ ixgbe_irq_rearm_queues(adapter, eics); ++ } ++#endif ++ /* ++ * possibly later we can enable tx auto-adjustment if necessary ++ * ++ if (adapter->itr_setting & 1) ++ ixgbe_set_itr_msix(q_vector); ++ */ ++ ++ return IRQ_HANDLED; ++} ++ ++/** ++ * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) ++ * @irq: unused ++ * @data: pointer to our q_vector struct for this interrupt vector ++ **/ ++static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) ++{ ++ struct ixgbe_q_vector *q_vector = data; ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ struct ixgbe_ring *rx_ring; ++ int r_idx; ++ int i; ++#ifndef CONFIG_IXGBE_NAPI ++ bool rx_clean_complete = false; ++#endif ++ ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ ixgbe_update_dca(q_vector); ++ ++ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); ++ for (i = 0; i < q_vector->rxr_count; i++) { ++ rx_ring = adapter->rx_ring[r_idx]; ++ rx_ring->total_bytes = 0; ++ rx_ring->total_packets = 0; ++#ifndef CONFIG_IXGBE_NAPI ++ rx_clean_complete = ixgbe_clean_rx_irq(q_vector, rx_ring); ++ ++ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, ++ r_idx + 1); ++ } ++ ++ if (adapter->rx_itr_setting & 1) ++ ixgbe_set_itr_msix(q_vector); ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { ++ u64 eics = ((u64)1 << q_vector->v_idx); ++ ixgbe_irq_enable_queues(adapter, eics); ++ if (!rx_clean_complete) ++ ixgbe_irq_rearm_queues(adapter, eics); ++ } ++#else ++ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, ++ r_idx + 1); ++ } ++ ++ if (!q_vector->rxr_count) ++ return IRQ_HANDLED; ++ ++ /* EIAM disabled interrupts (on this vector) for us */ ++ napi_schedule(&q_vector->napi); ++#endif ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) ++{ ++ struct ixgbe_q_vector *q_vector = data; ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ struct ixgbe_ring *ring; ++ int r_idx; ++ int i; ++#ifndef CONFIG_IXGBE_NAPI ++ bool clean_complete = true; ++#endif ++ ++ if (!q_vector->txr_count && !q_vector->rxr_count) ++ return IRQ_HANDLED; ++ ++#ifndef CONFIG_IXGBE_NAPI ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ ixgbe_update_dca(q_vector); ++#endif ++ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); ++ for (i = 0; i < q_vector->txr_count; i++) { ++ ring = adapter->tx_ring[r_idx]; ++ ring->total_bytes = 0; ++ ring->total_packets = 0; ++#ifndef CONFIG_IXGBE_NAPI ++ clean_complete = ixgbe_clean_tx_irq(q_vector, ring); ++#endif ++ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, ++ r_idx + 1); ++ } ++ ++ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); ++ for (i = 0; i < q_vector->rxr_count; i++) { ++ ring = adapter->rx_ring[r_idx]; ++ ring->total_bytes = 0; ++ ring->total_packets = 0; ++#ifndef CONFIG_IXGBE_NAPI ++ clean_complete &= ixgbe_clean_rx_irq(q_vector, ring); ++ ++ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, ++ r_idx + 1); ++ } ++ ++ if (adapter->rx_itr_setting & 1) ++ ixgbe_set_itr_msix(q_vector); ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { ++ u64 eics = ((u64)1 << q_vector->v_idx); ++ ixgbe_irq_enable_queues(adapter, eics); ++ if (!clean_complete) ++ ixgbe_irq_rearm_queues(adapter, eics); ++ } ++#else ++ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, ++ r_idx + 1); ++ } ++ ++ /* EIAM disabled interrupts (on this vector) for us */ ++ napi_schedule(&q_vector->napi); ++#endif ++ ++ return IRQ_HANDLED; ++} ++ ++#ifdef CONFIG_IXGBE_NAPI ++/** ++ * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine ++ * @napi: napi struct with our devices info in it ++ * @budget: amount of work driver is allowed to do this pass, in packets ++ * ++ * This function is optimized for cleaning one queue only on a single ++ * q_vector!!! ++ **/ ++static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) ++{ ++ struct ixgbe_q_vector *q_vector = ++ container_of(napi, struct ixgbe_q_vector, napi); ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ struct ixgbe_ring *rx_ring = NULL; ++ int work_done = 0; ++ long r_idx; ++ ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ ixgbe_update_dca(q_vector); ++ ++ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); ++ rx_ring = adapter->rx_ring[r_idx]; ++ ++ ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); ++ ++#ifndef HAVE_NETDEV_NAPI_LIST ++ if (!netif_running(adapter->netdev)) ++ work_done = 0; ++ ++#endif ++ /* If all Rx work done, exit the polling mode */ ++ if (work_done < budget) { ++ napi_complete(napi); ++ if (adapter->rx_itr_setting & 1) ++ ixgbe_set_itr_msix(q_vector); ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_enable_queues(adapter, ++ ((u64)1 << q_vector->v_idx)); ++ } ++ ++ return work_done; ++} ++ ++/** ++ * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine ++ * @napi: napi struct with our devices info in it ++ * @budget: amount of work driver is allowed to do this pass, in packets ++ * ++ * This function will clean more than one rx queue associated with a ++ * q_vector. ++ **/ ++static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) ++{ ++ struct ixgbe_q_vector *q_vector = ++ container_of(napi, struct ixgbe_q_vector, napi); ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ struct ixgbe_ring *ring = NULL; ++ long r_idx; ++ int work_done = 0, total_work = 0, i; ++ bool rx_clean_complete = true, tx_clean_complete = true; ++ ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ ixgbe_update_dca(q_vector); ++ ++ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); ++ for (i = 0; i < q_vector->txr_count; i++) { ++ ring = adapter->tx_ring[r_idx]; ++ tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); ++ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, ++ r_idx + 1); ++ } ++ ++ /* attempt to distribute budget to each queue fairly, but don't allow ++ * the budget to go below 1 because we'll exit polling */ ++ budget /= (q_vector->rxr_count ?: 1); ++ budget = max(budget, 1); ++ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); ++ for (i = 0; i < q_vector->rxr_count; i++) { ++ work_done = 0; ++ ring = adapter->rx_ring[r_idx]; ++ ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); ++ total_work += work_done; ++ rx_clean_complete &= (work_done < budget); ++ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, ++ r_idx + 1); ++ } ++ ++ if (!tx_clean_complete || !rx_clean_complete) ++ work_done = budget; ++ ++#ifndef HAVE_NETDEV_NAPI_LIST ++ if (!netif_running(adapter->netdev)) ++ work_done = 0; ++ ++#endif ++ /* If all Rx work done, exit the polling mode */ ++ if (work_done < budget) { ++ napi_complete(napi); ++ if (adapter->rx_itr_setting & 1) ++ ixgbe_set_itr_msix(q_vector); ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); ++ } ++ ++ return work_done; ++} ++ ++/** ++ * ixgbe_clean_txonly - msix (aka one shot) tx clean routine ++ * @napi: napi struct with our devices info in it ++ * @budget: amount of work driver is allowed to do this pass, in packets ++ * ++ * This function is optimized for cleaning one queue only on a single ++ * q_vector!!! ++ **/ ++static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) ++{ ++ struct ixgbe_q_vector *q_vector = ++ container_of(napi, struct ixgbe_q_vector, napi); ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ struct ixgbe_ring *tx_ring = NULL; ++ long r_idx; ++ int work_done = 0; ++ ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ ixgbe_update_dca(q_vector); ++ ++ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); ++ tx_ring = adapter->tx_ring[r_idx]; ++ ++ if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) ++ work_done = budget; ++ ++#ifndef HAVE_NETDEV_NAPI_LIST ++ if (!netif_running(adapter->netdev)) ++ work_done = 0; ++ ++#endif ++ /* If all Tx work done, exit the polling mode */ ++ if (work_done < budget) { ++ napi_complete(napi); ++ if (adapter->tx_itr_setting & 1) ++ ixgbe_set_itr_msix(q_vector); ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); ++ } ++ ++ return work_done; ++} ++ ++#endif /* CONFIG_IXGBE_NAPI */ ++static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, ++ int r_idx) ++{ ++ struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; ++ struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; ++ ++ set_bit(r_idx, q_vector->rxr_idx); ++ q_vector->rxr_count++; ++ rx_ring->q_vector = q_vector; ++} ++ ++static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, ++ int t_idx) ++{ ++ struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; ++ struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; ++ ++ set_bit(t_idx, q_vector->txr_idx); ++ q_vector->txr_count++; ++ tx_ring->q_vector = q_vector; ++} ++ ++/** ++ * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors ++ * @adapter: board private structure to initialize ++ * ++ * This function maps descriptor rings to the queue-specific vectors ++ * we were allotted through the MSI-X enabling code. Ideally, we'd have ++ * one vector per ring/queue, but on a constrained vector budget, we ++ * group the rings as "efficiently" as possible. You would add new ++ * mapping configurations in here. ++ **/ ++static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) ++{ ++ int q_vectors; ++ int q_split; ++ int v_start = 0; ++ int rxr_idx = 0, txr_idx = 0; ++ int rxr_remaining = adapter->num_rx_queues; ++ int txr_remaining = adapter->num_tx_queues; ++ int i, j; ++ int rqpv, tqpv; ++ int err = 0; ++ ++ /* No mapping required if MSI-X is disabled. */ ++ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) ++ goto out; ++ ++ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ ++ /* ++ * The ideal configuration... ++ * We have enough vectors to map one per queue. ++ */ ++ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { ++ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) ++ map_vector_to_rxq(adapter, v_start, rxr_idx); ++ ++ for (; txr_idx < txr_remaining; v_start++, txr_idx++) ++ map_vector_to_txq(adapter, v_start, txr_idx); ++ goto out; ++ } ++ ++ /* ++ * If we don't have enough vectors for a 1-to-1 ++ * mapping, we'll have to group them so there are ++ * multiple queues per vector. ++ */ ++ /* Re-adjusting *qpv takes care of the remainder. */ ++ ++ q_split = q_vectors; ++ ++ for (i = v_start; i < q_split; i++) { ++ rqpv = DIV_ROUND_UP(rxr_remaining, q_split - i); ++ for (j = 0; j < rqpv; j++) { ++ map_vector_to_rxq(adapter, i, rxr_idx); ++ rxr_idx++; ++ rxr_remaining--; ++ } ++ } ++ ++ ++ for (i = v_start; i < q_vectors; i++) { ++ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); ++ for (j = 0; j < tqpv; j++) { ++ map_vector_to_txq(adapter, i, txr_idx); ++ txr_idx++; ++ txr_remaining--; ++ } ++ } ++ ++out: ++ return err; ++} ++ ++/** ++ * ixgbe_request_msix_irqs - Initialize MSI-X interrupts ++ * @adapter: board private structure ++ * ++ * ixgbe_request_msix_irqs allocates MSI-X vectors and requests ++ * interrupts from the kernel. ++ **/ ++static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) ++{ ++ struct net_device *netdev = adapter->netdev; ++ irqreturn_t (*handler)(int, void *); ++ int i, vector, q_vectors, err; ++ int ri = 0, ti = 0; ++ ++ /* Decrement for Other and TCP Timer vectors */ ++ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ ++#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ ++ ? &ixgbe_msix_clean_many : \ ++ (_v)->rxr_count ? &ixgbe_msix_clean_rx : \ ++ (_v)->txr_count ? &ixgbe_msix_clean_tx : \ ++ NULL) ++ for (vector = 0; vector < q_vectors; vector++) { ++ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; ++ handler = SET_HANDLER(q_vector); ++ ++ if (handler == &ixgbe_msix_clean_rx) { ++ sprintf(q_vector->name, "%s-%s-%d", ++ netdev->name, "rx", ri++); ++ } else if (handler == &ixgbe_msix_clean_tx) { ++ sprintf(q_vector->name, "%s-%s-%d", ++ netdev->name, "tx", ti++); ++ } else if (handler == &ixgbe_msix_clean_many) { ++ sprintf(q_vector->name, "%s-%s-%d", ++ netdev->name, "TxRx", ri++); ++ ti++; ++ } else { ++ /* skip this unused q_vector */ ++ continue; ++ } ++ err = request_irq(adapter->msix_entries[vector].vector, ++ handler, 0, q_vector->name, ++ q_vector); ++ if (err) { ++ DPRINTK(PROBE, ERR, ++ "request_irq failed for MSIX interrupt " ++ "Error: %d\n", err); ++ goto free_queue_irqs; ++ } ++ } ++ ++ sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); ++ err = request_irq(adapter->msix_entries[vector].vector, ++ &ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev); ++ if (err) { ++ DPRINTK(PROBE, ERR, ++ "request_irq for msix_lsc failed: %d\n", err); ++ goto free_queue_irqs; ++ } ++ ++#ifdef IXGBE_TCP_TIMER ++ vector++; ++ sprintf(adapter->tcp_timer_name, "%s:timer", netdev->name); ++ err = request_irq(adapter->msix_entries[vector].vector, ++ &ixgbe_msix_tcp_timer, 0, adapter->tcp_timer_name, ++ netdev); ++ if (err) { ++ DPRINTK(PROBE, ERR, ++ "request_irq for msix_tcp_timer failed: %d\n", err); ++ /* Free "Other" interrupt */ ++ free_irq(adapter->msix_entries[--vector].vector, netdev); ++ goto free_queue_irqs; ++ } ++ ++#endif ++ return 0; ++ ++free_queue_irqs: ++ for (i = vector - 1; i >= 0; i--) ++ free_irq(adapter->msix_entries[--vector].vector, ++ adapter->q_vector[i]); ++ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; ++ pci_disable_msix(adapter->pdev); ++ kfree(adapter->msix_entries); ++ adapter->msix_entries = NULL; ++ return err; ++} ++ ++static void ixgbe_set_itr(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; ++ u8 current_itr; ++ u32 new_itr = q_vector->eitr; ++ struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; ++ struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; ++ ++ q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, ++ q_vector->tx_itr, ++ tx_ring->total_packets, ++ tx_ring->total_bytes); ++ q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, ++ q_vector->rx_itr, ++ rx_ring->total_packets, ++ rx_ring->total_bytes); ++ ++ current_itr = max(q_vector->rx_itr, q_vector->tx_itr); ++ ++ switch (current_itr) { ++ /* counts and packets in update_itr are dependent on these numbers */ ++ case lowest_latency: ++ new_itr = 100000; ++ break; ++ case low_latency: ++ new_itr = 20000; /* aka hwitr = ~200 */ ++ break; ++ case bulk_latency: ++ new_itr = 8000; ++ break; ++ default: ++ break; ++ } ++ ++ if (new_itr != q_vector->eitr) { ++ ++ /* do an exponential smoothing */ ++ new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); ++ ++ /* save the algorithm value here */ ++ q_vector->eitr = new_itr; ++ ++ ixgbe_write_eitr(q_vector); ++ } ++ ++ return; ++} ++ ++/** ++ * ixgbe_irq_enable - Enable default interrupt generation settings ++ * @adapter: board private structure ++ **/ ++static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, bool flush) ++{ ++ u32 mask; ++ u64 qmask; ++ ++ mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); ++ qmask = ~0; ++ ++ /* don't reenable LSC while waiting for link */ ++ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) ++ mask &= ~IXGBE_EIMS_LSC; ++ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) ++ mask |= IXGBE_EIMS_GPI_SDP0; ++ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) ++ mask |= IXGBE_EIMS_GPI_SDP1; ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ mask |= IXGBE_EIMS_ECC; ++ mask |= IXGBE_EIMS_GPI_SDP1; ++ mask |= IXGBE_EIMS_GPI_SDP2; ++ mask |= IXGBE_EIMS_MAILBOX; ++ break; ++ default: ++ break; ++ } ++ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || ++ adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) ++ mask |= IXGBE_EIMS_FLOW_DIR; ++ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); ++ if (queues) ++ ixgbe_irq_enable_queues(adapter, qmask); ++ if (flush) ++ IXGBE_WRITE_FLUSH(&adapter->hw); ++ ++ if (adapter->num_vfs > 32) { ++ u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); ++ } ++} ++ ++/** ++ * ixgbe_intr - legacy mode Interrupt Handler ++ * @irq: interrupt number ++ * @data: pointer to a network interface device structure ++ **/ ++static irqreturn_t ixgbe_intr(int irq, void *data) ++{ ++ struct net_device *netdev = data; ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; ++ u32 eicr; ++ ++ /* ++ * Workaround of Silicon errata on 82598. Mask the interrupt ++ * before the read of EICR. ++ */ ++ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); ++ ++ /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read ++ * therefore no explict interrupt disable is necessary */ ++ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); ++ if (!eicr) { ++ /* ++ * shared interrupt alert! ++ * make sure interrupts are enabled because the read will ++ * have disabled interrupts due to EIAM ++ * finish the workaround of silicon errata on 82598. Unmask ++ * the interrupt that we masked before the EICR read. ++ */ ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_enable(adapter, true, true); ++ return IRQ_NONE; /* Not our interrupt */ ++ } ++ ++ if (eicr & IXGBE_EICR_LSC) ++ ixgbe_check_lsc(adapter); ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82599EB: ++ if (eicr & IXGBE_EICR_ECC) ++ DPRINTK(LINK, INFO, "Received unrecoverable ECC Err, " ++ "please reboot\n"); ++ ixgbe_check_sfp_event(adapter, eicr); ++ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && ++ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { ++ adapter->interrupt_event = eicr; ++ schedule_work(&adapter->check_overtemp_task); ++ } ++ break; ++ default: ++ break; ++ } ++ ++ ixgbe_check_fan_failure(adapter, eicr); ++ ++#ifdef CONFIG_IXGBE_NAPI ++ if (napi_schedule_prep(&(q_vector->napi))) { ++ adapter->tx_ring[0]->total_packets = 0; ++ adapter->tx_ring[0]->total_bytes = 0; ++ adapter->rx_ring[0]->total_packets = 0; ++ adapter->rx_ring[0]->total_bytes = 0; ++ /* would disable interrupts here but EIAM disabled it */ ++ __napi_schedule(&(q_vector->napi)); ++ } ++ ++ /* ++ * re-enable link(maybe) and non-queue interrupts, no flush. ++ * ixgbe_poll will re-enable the queue interrupts ++ */ ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_enable(adapter, false, false); ++#else ++ adapter->tx_ring[0]->total_packets = 0; ++ adapter->tx_ring[0]->total_bytes = 0; ++ adapter->rx_ring[0]->total_packets = 0; ++ adapter->rx_ring[0]->total_bytes = 0; ++ ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); ++ ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0]); ++ ++ /* dynamically adjust throttle */ ++ if (adapter->rx_itr_setting & 1) ++ ixgbe_set_itr(adapter); ++ ++ /* ++ * Workaround of Silicon errata on 82598. Unmask ++ * the interrupt that we masked before the EICR read ++ * no flush of the re-enable is necessary here ++ */ ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_enable(adapter, true, false); ++#endif ++ return IRQ_HANDLED; ++} ++ ++static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) ++{ ++ int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ ++ for (i = 0; i < adapter->num_rx_queues; i++) ++ adapter->rx_ring[i]->q_vector = NULL; ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ adapter->tx_ring[i]->q_vector = NULL; ++ ++ for (i = 0; i < q_vectors; i++) { ++ struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; ++ bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); ++ bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); ++ q_vector->rxr_count = 0; ++ q_vector->txr_count = 0; ++ q_vector->eitr = adapter->rx_eitr_param; ++ } ++} ++ ++/** ++ * ixgbe_request_irq - initialize interrupts ++ * @adapter: board private structure ++ * ++ * Attempts to configure interrupts using the best available ++ * capabilities of the hardware and kernel. ++ **/ ++static int ixgbe_request_irq(struct ixgbe_adapter *adapter) ++{ ++ struct net_device *netdev = adapter->netdev; ++ int err; ++ ++#ifdef HAVE_DEVICE_NUMA_NODE ++ DPRINTK(TX_ERR, INFO, "numa_node before request_irq %d\n", ++ dev_to_node(&adapter->pdev->dev)); ++#endif /* HAVE_DEVICE_NUMA_NODE */ ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { ++ err = ixgbe_request_msix_irqs(adapter); ++ } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { ++ err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, ++ netdev->name, netdev); ++ } else { ++ err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, ++ netdev->name, netdev); ++ } ++ ++ if (err) ++ DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); ++ ++ return err; ++} ++ ++static void ixgbe_free_irq(struct ixgbe_adapter *adapter) ++{ ++ struct net_device *netdev = adapter->netdev; ++ ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { ++ int i, q_vectors; ++ ++ q_vectors = adapter->num_msix_vectors; ++ i = q_vectors - 1; ++#ifdef IXGBE_TCP_TIMER ++ free_irq(adapter->msix_entries[i].vector, netdev); ++ i--; ++#endif ++ free_irq(adapter->msix_entries[i].vector, netdev); ++ i--; ++ ++ /* free only the irqs that were actually requested */ ++ for (; i >= 0; i--) { ++ if (adapter->q_vector[i]->rxr_count || ++ adapter->q_vector[i]->txr_count) ++ free_irq(adapter->msix_entries[i].vector, ++ adapter->q_vector[i]); ++ } ++ ++ ixgbe_reset_q_vectors(adapter); ++ } else { ++ free_irq(adapter->pdev->irq, netdev); ++ } ++} ++ ++/** ++ * ixgbe_irq_disable - Mask off interrupt generation on the NIC ++ * @adapter: board private structure ++ **/ ++static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) ++{ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); ++ break; ++ case ixgbe_mac_82599EB: ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); ++ if (adapter->num_vfs > 32) ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); ++ break; ++ default: ++ break; ++ } ++ IXGBE_WRITE_FLUSH(&adapter->hw); ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { ++ int i; ++ for (i = 0; i < adapter->num_msix_vectors; i++) ++ synchronize_irq(adapter->msix_entries[i].vector); ++ } else { ++ synchronize_irq(adapter->pdev->irq); ++ } ++} ++ ++/** ++ * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts ++ * ++ **/ ++static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_EITR(0), ++ EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); ++ ++ ixgbe_set_ivar(adapter, 0, 0, 0); ++ ixgbe_set_ivar(adapter, 1, 0, 0); ++ ++ map_vector_to_rxq(adapter, 0, 0); ++ map_vector_to_txq(adapter, 0, 0); ++ ++ DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n"); ++} ++ ++/** ++ * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset ++ * @adapter: board private structure ++ * @ring: structure containing ring specific data ++ * ++ * Configure the Tx descriptor ring after a reset. ++ **/ ++void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *ring) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u64 tdba = ring->dma; ++ int wait_loop = 10; ++ u32 txdctl; ++ u8 reg_idx = ring->reg_idx; ++ ++ /* disable queue to avoid issues while updating state */ ++ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); ++ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), ++ txdctl & ~IXGBE_TXDCTL_ENABLE); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), ++ (tdba & DMA_BIT_MASK(32))); ++ IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); ++ IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), ++ ring->count * sizeof(union ixgbe_adv_tx_desc)); ++ IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); ++ ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); ++ ++ /* configure fetching thresholds */ ++ if (adapter->rx_itr_setting == 0) { ++ /* cannot set wthresh when itr==0 */ ++ txdctl &= ~0x007F0000; ++ } else { ++ /* enable WTHRESH=8 descriptors, to encourage burst writeback */ ++ txdctl |= (8 << 16); ++ } ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ /* PThresh workaround for Tx hang with DFP enabled. */ ++ txdctl |= 32; ++ } ++ ++ /* reinitialize flowdirector state */ ++ set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); ++ ++ /* enable queue */ ++ txdctl |= IXGBE_TXDCTL_ENABLE; ++ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); ++ ++ /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ ++ if (hw->mac.type == ixgbe_mac_82598EB && ++ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) ++ return; ++ ++ /* poll to verify queue is enabled */ ++ do { ++ msleep(1); ++ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); ++ } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); ++ if (!wait_loop) ++ DPRINTK(DRV, ERR, "Could not enable Tx Queue %d\n", reg_idx); ++} ++ ++static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 rttdcs; ++ u32 mask; ++ ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ return; ++ ++ /* disable the arbiter while setting MTQC */ ++ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); ++ rttdcs |= IXGBE_RTTDCS_ARBDIS; ++ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); ++ ++ /* set transmit pool layout */ ++ mask = IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED; ++ mask |= IXGBE_FLAG_DCB_ENABLED; ++ switch (adapter->flags & mask) { ++ ++ case (IXGBE_FLAG_VMDQ_ENABLED): ++ case (IXGBE_FLAG_SRIOV_ENABLED): ++ case (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED): ++ IXGBE_WRITE_REG(hw, IXGBE_MTQC, ++ (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); ++ break; ++ case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_DCB_ENABLED): ++ case (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED): ++ case (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED ++ | IXGBE_FLAG_DCB_ENABLED): ++ IXGBE_WRITE_REG(hw, IXGBE_MTQC, ++ (IXGBE_MTQC_RT_ENA ++ | IXGBE_MTQC_VT_ENA ++ | IXGBE_MTQC_4TC_4TQ)); ++ break; ++ ++ case (IXGBE_FLAG_DCB_ENABLED): ++ IXGBE_WRITE_REG(hw, IXGBE_MTQC, ++ IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ); ++ break; ++ ++ default: ++ IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); ++ break; ++ } ++ ++ /* re-enable the arbiter */ ++ rttdcs &= ~IXGBE_RTTDCS_ARBDIS; ++ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); ++} ++ ++/** ++ * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset ++ * @adapter: board private structure ++ * ++ * Configure the Tx unit of the MAC after a reset. ++ **/ ++static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 dmatxctl; ++ u32 i; ++ ++#ifdef CONFIG_NETDEVICES_MULTIQUEUE ++ if (adapter->num_tx_queues > 1) ++ adapter->netdev->features |= NETIF_F_MULTI_QUEUE; ++ else ++ adapter->netdev->features &= ~NETIF_F_MULTI_QUEUE; ++ ++#endif ++ ixgbe_setup_mtqc(adapter); ++ ++ if (hw->mac.type != ixgbe_mac_82598EB) { ++ /* DMATXCTL.EN must be before Tx queues are enabled */ ++ dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); ++ dmatxctl |= IXGBE_DMATXCTL_TE; ++ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); ++ } ++ ++ /* Setup the HW Tx Head and Tail descriptor pointers */ ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); ++} ++ ++#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 ++ ++static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *rx_ring) ++{ ++ u32 srrctl; ++ u8 reg_idx = rx_ring->reg_idx; ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: { ++ struct ixgbe_ring_feature *feature = adapter->ring_feature; ++ /* program one srrctl register per VMDq index */ ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { ++ unsigned long mask; ++ long shift, len; ++ mask = (unsigned long) feature[RING_F_VMDQ].mask; ++ len = sizeof(feature[RING_F_VMDQ].mask) * 8; ++ shift = find_first_bit(&mask, len); ++ reg_idx = (reg_idx & mask) >> shift; ++ } else { ++ /* ++ * if VMDq is not active we must program one srrctl ++ * register per RSS queue since we have enabled ++ * RDRXCTL.MVMEN ++ */ ++ const int mask = feature[RING_F_RSS].mask; ++ reg_idx = reg_idx & mask; ++ } ++ } ++ break; ++ case ixgbe_mac_82599EB: ++ default: ++ break; ++ } ++ ++ srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); ++ ++ srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; ++ srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; ++ if (adapter->num_vfs) ++ srrctl |= IXGBE_SRRCTL_DROP_EN; ++ ++ srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & ++ IXGBE_SRRCTL_BSIZEHDR_MASK; ++ ++ if (ring_is_ps_enabled(rx_ring)) { ++#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER ++ srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; ++#else ++ srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; ++#endif ++ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; ++ } else { ++ srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> ++ IXGBE_SRRCTL_BSIZEPKT_SHIFT; ++ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; ++ } ++ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); ++} ++ ++static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, ++ 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, ++ 0x6A3E67EA, 0x14364D17, 0x3BED200D}; ++ u32 mrqc = 0, reta = 0; ++ u32 rxcsum; ++ int i, j; ++ int mask; ++ ++ /* Fill out hash function seeds */ ++ for (i = 0; i < 10; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); ++ ++ /* Fill out redirection table */ ++ for (i = 0, j = 0; i < 128; i++, j++) { ++ if (j == adapter->ring_feature[RING_F_RSS].indices) ++ j = 0; ++ /* reta = 4-byte sliding window of ++ * 0x00..(indices-1)(indices-1)00..etc. */ ++ reta = (reta << 8) | (j * 0x11); ++ if ((i & 3) == 3) ++ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); ++ } ++ ++ /* Disable indicating checksum in descriptor, enables RSS hash */ ++ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); ++ rxcsum |= IXGBE_RXCSUM_PCSD; ++ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); ++ ++ if (adapter->hw.mac.type == ixgbe_mac_82598EB) ++ mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED; ++ else ++ mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED ++ | IXGBE_FLAG_DCB_ENABLED ++ | IXGBE_FLAG_VMDQ_ENABLED ++ | IXGBE_FLAG_SRIOV_ENABLED ++ ); ++ ++ switch (mask) { ++ case (IXGBE_FLAG_RSS_ENABLED): ++ mrqc = IXGBE_MRQC_RSSEN; ++ break; ++ case (IXGBE_FLAG_SRIOV_ENABLED): ++ mrqc = IXGBE_MRQC_VMDQEN; ++ break; ++ case (IXGBE_FLAG_VMDQ_ENABLED): ++ case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED): ++ mrqc = IXGBE_MRQC_VMDQEN; ++ break; ++ case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED): ++ if (adapter->ring_feature[RING_F_RSS].indices == 4) ++ mrqc = IXGBE_MRQC_VMDQRSS32EN; ++ else if (adapter->ring_feature[RING_F_RSS].indices == 2) ++ mrqc = IXGBE_MRQC_VMDQRSS64EN; ++ else ++ mrqc = IXGBE_MRQC_VMDQEN; ++ break; ++ case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_VMDQ_ENABLED): ++ case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_VMDQ_ENABLED ++ | IXGBE_FLAG_SRIOV_ENABLED): ++ mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ ++ break; ++ case (IXGBE_FLAG_DCB_ENABLED): ++ mrqc = IXGBE_MRQC_RT8TCEN; ++ break; ++ default: ++ break; ++ } ++ ++ /* Perform hash on these packet types */ ++ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 ++ | IXGBE_MRQC_RSS_FIELD_IPV4_TCP ++ | IXGBE_MRQC_RSS_FIELD_IPV6 ++ | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); ++} ++ ++/** ++ * ixgbe_configure_rscctl - enable RSC for the indicated ring ++ * @adapter: address of board private structure ++ * @ring: structure containing ring specific data ++ **/ ++void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *ring) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 rscctrl; ++ int rx_buf_len; ++ u8 reg_idx = ring->reg_idx; ++ ++ if (!ring_is_rsc_enabled(ring)) ++ return; ++ ++ rx_buf_len = ring->rx_buf_len; ++ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); ++ rscctrl |= IXGBE_RSCCTL_RSCEN; ++ /* ++ * we must limit the number of descriptors so that ++ * the total size of max desc * buf_len is not greater ++ * than 65535 ++ */ ++ if (ring_is_ps_enabled(ring)) { ++#if (MAX_SKB_FRAGS > 16) ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_16; ++#elif (MAX_SKB_FRAGS > 8) ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_8; ++#elif (MAX_SKB_FRAGS > 4) ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_4; ++#else ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_1; ++#endif ++ } else { ++ if (rx_buf_len < IXGBE_RXBUFFER_4096) ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_16; ++ else if (rx_buf_len < IXGBE_RXBUFFER_8192) ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_8; ++ else ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_4; ++ } ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); ++} ++ ++/** ++ * ixgbe_clear_rscctl - disable RSC for the indicated ring ++ * @adapter: address of board private structure ++ * @ring: structure containing ring specific data ++ **/ ++void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *ring) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 rscctrl; ++ u8 reg_idx = ring->reg_idx; ++ ++ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); ++ rscctrl &= ~IXGBE_RSCCTL_RSCEN; ++ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); ++ ++ clear_ring_rsc_enabled(ring); ++} ++ ++/** ++ * ixgbe_set_uta - Set unicast filter table address ++ * @adapter: board private structure ++ * ++ * The unicast table address is a register array of 32-bit registers. ++ * The table is meant to be used in a way similar to how the MTA is used ++ * however due to certain limitations in the hardware it is necessary to ++ * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous ++ * enable bit to allow vlan tag stripping when promiscuous mode is enabled ++ **/ ++static void ixgbe_set_uta(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int i; ++ ++ /* The UTA table only exists on 82599 hardware and newer */ ++ if (hw->mac.type < ixgbe_mac_82599EB) ++ return; ++ ++ /* we only need to do this if VMDq is enabled */ ++ if (!(adapter->flags & ++ (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED))) ++ return; ++ ++ for (i = 0; i < 128; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); ++} ++ ++static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *ring) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int wait_loop = IXGBE_MAX_RX_DESC_POLL; ++ u32 rxdctl; ++ u8 reg_idx = ring->reg_idx; ++ ++ /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ ++ if (hw->mac.type == ixgbe_mac_82598EB && ++ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) ++ return; ++ ++ do { ++ msleep(1); ++ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); ++ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); ++ ++ if (!wait_loop) { ++ DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " ++ "not set within the polling period\n", reg_idx); ++ } ++} ++ ++void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *ring) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u64 rdba = ring->dma; ++ u32 rxdctl; ++ u8 reg_idx = ring->reg_idx; ++ ++ /* disable queue to avoid issues while updating state */ ++ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); ++ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), ++ rxdctl & ~IXGBE_RXDCTL_ENABLE); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); ++ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); ++ IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), ++ ring->count * sizeof(union ixgbe_adv_rx_desc)); ++ IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); ++ ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx); ++ ++ ixgbe_configure_srrctl(adapter, ring); ++ ixgbe_configure_rscctl(adapter, ring); ++ ++ if (hw->mac.type == ixgbe_mac_82598EB) { ++ /* ++ * enable cache line friendly hardware writes: ++ * PTHRESH=32 descriptors (half the internal cache), ++ * this also removes ugly rx_no_buffer_count increment ++ * HTHRESH=4 descriptors (to minimize latency on fetch) ++ * WTHRESH=8 burst writeback up to two cache lines ++ */ ++ rxdctl &= ~0x3FFFFF; ++ rxdctl |= 0x080420; ++ } ++ ++ /* enable receive descriptor ring */ ++ rxdctl |= IXGBE_RXDCTL_ENABLE; ++ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ++ ++ ixgbe_rx_desc_queue_enable(adapter, ring); ++ ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring)); ++} ++ ++static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int p; ++ ++ /* PSRTYPE must be initialized in non 82598 adapters */ ++ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | ++ IXGBE_PSRTYPE_UDPHDR | ++ IXGBE_PSRTYPE_IPV4HDR | ++ IXGBE_PSRTYPE_L2HDR | ++ IXGBE_PSRTYPE_IPV6HDR; ++ ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ return; ++ ++ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) ++ psrtype |= (adapter->num_rx_queues_per_pool << 29); ++ ++ for (p = 0; p < adapter->num_rx_pools; p++) ++ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), psrtype); ++} ++ ++static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 gcr_ext; ++ u32 vt_reg; ++ u32 vt_reg_bits; ++ u32 pool; ++ u32 vmdctl; ++ ++ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED || ++ adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) ++ return; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ vt_reg = IXGBE_VMD_CTL; ++ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN; ++ vmdctl = IXGBE_READ_REG(hw, vt_reg); ++ IXGBE_WRITE_REG(hw, vt_reg, vmdctl | vt_reg_bits); ++ break; ++ case ixgbe_mac_82599EB: ++ vt_reg = IXGBE_VT_CTL; ++ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN ++ | IXGBE_VT_CTL_REPLEN; ++ if (adapter->num_vfs) { ++ vt_reg_bits &= ~IXGBE_VT_CTL_POOL_MASK; ++ vt_reg_bits |= (adapter->num_vfs << ++ IXGBE_VT_CTL_POOL_SHIFT); ++ } ++ vmdctl = IXGBE_READ_REG(hw, vt_reg); ++ IXGBE_WRITE_REG(hw, vt_reg, vmdctl | vt_reg_bits); ++ for (pool = 1; pool < adapter->num_rx_pools; pool++) { ++ u32 vmolr; ++ int vmdq_pool = VMDQ_P(pool); ++ ++ /* ++ * accept untagged packets until a vlan tag ++ * is specifically set for the VMDQ queue/pool ++ */ ++ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vmdq_pool)); ++ vmolr |= IXGBE_VMOLR_AUPE; ++ vmolr |= IXGBE_VMOLR_BAM; ++ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vmdq_pool), vmolr); ++ } ++ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0xFFFFFFFF); ++ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0xFFFFFFFF); ++ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0xFFFFFFFF); ++ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0xFFFFFFFF); ++ break; ++ default: ++ break; ++ } ++ ++ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) ++ return; ++ ++ /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ ++ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); ++ ++ /* ++ * Set up VF register offsets for selected VT Mode, ++ * i.e. 32 or 64 VFs for SR-IOV ++ */ ++ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); ++ gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; ++ gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; ++ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); ++ ++ /* enable Tx loopback for VF/PF communication */ ++ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); ++} ++ ++static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct net_device *netdev = adapter->netdev; ++ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; ++ int rx_buf_len; ++ struct ixgbe_ring *rx_ring; ++ int i; ++ u32 mhadd, hlreg0; ++ ++ /* Decide whether to use packet split mode or not */ ++ if (netdev->mtu > ETH_DATA_LEN) { ++ if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE) ++ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; ++ else ++ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; ++ } else { ++ if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE) ++ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; ++ else ++ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; ++ } ++ ++ /* Set the RX buffer length according to the mode */ ++ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { ++ rx_buf_len = IXGBE_RX_HDR_SIZE; ++ } else { ++ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && ++ (netdev->mtu <= ETH_DATA_LEN)) ++ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; ++ else ++ rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024); ++ } ++ ++#ifdef IXGBE_FCOE ++ /* adjust max frame to be able to do baby jumbo for FCoE */ ++ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && ++ (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) ++ max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; ++ ++#endif /* IXGBE_FCOE */ ++ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); ++ if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { ++ mhadd &= ~IXGBE_MHADD_MFS_MASK; ++ mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); ++ } ++ ++ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); ++ /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ ++ hlreg0 |= IXGBE_HLREG0_JUMBOEN; ++ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); ++ ++ /* ++ * Setup the HW Rx Head and Tail Descriptor Pointers and ++ * the Base and Length of the Rx Descriptor Ring ++ */ ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ rx_ring = adapter->rx_ring[i]; ++ rx_ring->rx_buf_len = rx_buf_len; ++ ++ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) ++ set_ring_ps_enabled(rx_ring); ++ else ++ clear_ring_ps_enabled(rx_ring); ++ ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { ++ set_ring_rsc_enabled(rx_ring); ++#ifndef IXGBE_NO_LRO ++ clear_ring_lro_enabled(rx_ring); ++ } else if (adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED) { ++ set_ring_lro_enabled(rx_ring); ++ clear_ring_rsc_enabled(rx_ring); ++ } else { ++ clear_ring_lro_enabled(rx_ring); ++#endif ++ clear_ring_rsc_enabled(rx_ring); ++ } ++ ++#ifdef IXGBE_FCOE ++ if (netdev->features & NETIF_F_FCOE_MTU) ++ { ++ struct ixgbe_ring_feature *f; ++ f = &adapter->ring_feature[RING_F_FCOE]; ++ if ((i >= f->mask) && (i < f->mask + f->indices)) { ++ clear_ring_ps_enabled(rx_ring); ++ if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) ++ rx_ring->rx_buf_len = ++ IXGBE_FCOE_JUMBO_FRAME_SIZE; ++ } else if (!ring_is_rsc_enabled(rx_ring) && ++ !ring_is_ps_enabled(rx_ring)) { ++ rx_ring->rx_buf_len = ++ IXGBE_FCOE_JUMBO_FRAME_SIZE; ++ } ++ } ++#endif /* IXGBE_FCOE */ ++ } ++} ++ ++static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ /* ++ * For VMDq support of different descriptor types or ++ * buffer sizes through the use of multiple SRRCTL ++ * registers, RDRXCTL.MVMEN must be set to 1 ++ * ++ * also, the manual doesn't mention it clearly but DCA hints ++ * will only use queue 0's tags unless this bit is set. Side ++ * effects of setting this bit are only that SRRCTL must be ++ * fully programmed [0..15] ++ */ ++ rdrxctl |= IXGBE_RDRXCTL_MVMEN; ++ break; ++ case ixgbe_mac_82599EB: ++ /* Disable RSC for ACK packets */ ++ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, ++ (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); ++ rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; ++ /* hardware requires some bits to be set by default */ ++ rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); ++ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; ++ break; ++ default: ++ /* We should do nothing since we don't know this hardware */ ++ return; ++ } ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); ++} ++ ++/** ++ * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset ++ * @adapter: board private structure ++ * ++ * Configure the Rx unit of the MAC after a reset. ++ **/ ++static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int i; ++ u32 rxctrl; ++ ++ /* disable receives while setting up the descriptors */ ++ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); ++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); ++ ++ ixgbe_setup_psrtype(adapter); ++ ixgbe_setup_rdrxctl(adapter); ++ ++ /* Program registers for the distribution of queues */ ++ ixgbe_setup_mrqc(adapter); ++ ++ ixgbe_set_uta(adapter); ++ ++ /* set_rx_buffer_len must be called before ring initialization */ ++ ixgbe_set_rx_buffer_len(adapter); ++ ++ /* ++ * Setup the HW Rx Head and Tail Descriptor Pointers and ++ * the Base and Length of the Rx Descriptor Ring ++ */ ++ for (i = 0; i < adapter->num_rx_queues; i++) ++ ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); ++ ++ /* disable drop enable for 82598 parts */ ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ rxctrl |= IXGBE_RXCTRL_DMBYPS; ++ ++ /* enable all receives */ ++ rxctrl |= IXGBE_RXCTRL_RXEN; ++ ixgbe_enable_rx_dma(hw, rxctrl); ++ ++} ++ ++#ifdef NETIF_F_HW_VLAN_TX ++static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ int pool_ndx = adapter->num_vfs; ++#ifndef HAVE_NETDEV_VLAN_FEATURES ++ struct net_device *v_netdev; ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++ int i; ++ ++ /* add VID to filter table */ ++ if (hw->mac.ops.set_vfta) { ++ hw->mac.ops.set_vfta(hw, vid, pool_ndx, true); ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ /* enable vlan id for all pools */ ++ for (i = 1; i < adapter->num_rx_pools; i++) { ++ hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), true); ++ } ++ break; ++ default: ++ break; ++ } ++ } ++ } ++ ++#ifndef HAVE_NETDEV_VLAN_FEATURES ++ /* ++ * Copy feature flags from netdev to the vlan netdev for this vid. ++ * This allows things like TSO to bubble down to our vlan device. ++ * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so ++ * we will not have a netdev that needs updating. ++ */ ++ if (adapter->vlgrp) { ++ v_netdev = vlan_group_get_device(adapter->vlgrp, vid); ++ if (v_netdev) { ++ v_netdev->features |= adapter->netdev->features; ++ vlan_group_set_device(adapter->vlgrp, vid, v_netdev); ++ } ++ } ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++} ++ ++static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ int pool_ndx = adapter->num_vfs; ++ int i; ++ ++ /* User is not allowed to remove vlan ID 0 */ ++ if (!vid) ++ return; ++ ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_disable(adapter); ++ ++ vlan_group_set_device(adapter->vlgrp, vid, NULL); ++ ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_enable(adapter, true, true); ++ ++ /* remove VID from filter table */ ++ ++ if (hw->mac.ops.set_vfta) { ++ hw->mac.ops.set_vfta(hw, vid, pool_ndx, false); ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ /* remove vlan id from all pools */ ++ for (i = 1; i < adapter->num_rx_pools; i++) { ++ hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), false); ++ } ++ break; ++ default: ++ break; ++ } ++ } ++ } ++} ++#endif ++ ++/** ++ * ixgbe_vlan_stripping_disable - helper to disable vlan tag stripping ++ * @adapter: driver data ++ */ ++static void ixgbe_vlan_stripping_disable(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 vlnctrl; ++ int i; ++ ++ /* leave vlan tag stripping enabled for DCB */ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) ++ return; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ++ vlnctrl &= ~IXGBE_VLNCTRL_VME; ++ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); ++ break; ++ case ixgbe_mac_82599EB: ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ u8 reg_idx = adapter->rx_ring[i]->reg_idx; ++ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); ++ vlnctrl &= ~IXGBE_RXDCTL_VME; ++ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl); ++ } ++ break; ++ default: ++ break; ++ } ++} ++ ++/** ++ * ixgbe_vlan_stripping_enable - helper to enable vlan tag stripping ++ * @adapter: driver data ++ */ ++static void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 vlnctrl; ++ int i; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ++ vlnctrl |= IXGBE_VLNCTRL_VME; ++ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); ++ break; ++ case ixgbe_mac_82599EB: ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ u8 reg_idx = adapter->rx_ring[i]->reg_idx; ++ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); ++ vlnctrl |= IXGBE_RXDCTL_VME; ++ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl); ++ } ++ break; ++ default: ++ break; ++ } ++} ++ ++#ifdef NETIF_F_HW_VLAN_TX ++static void ixgbe_vlan_rx_register(struct net_device *netdev, ++ struct vlan_group *grp) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_disable(adapter); ++ adapter->vlgrp = grp; ++ ++ if (grp || (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) ++ /* enable VLAN tag insert/strip */ ++ ixgbe_vlan_stripping_enable(adapter); ++ else ++ /* disable VLAN tag insert/strip */ ++ ixgbe_vlan_stripping_disable(adapter); ++ ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_enable(adapter, true, true); ++} ++ ++static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) ++{ ++ ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); ++ ++ /* add vlan ID 0 so we always accept priority-tagged traffic */ ++ ixgbe_vlan_rx_add_vid(adapter->netdev, 0); ++ ++ if (adapter->vlgrp) { ++ u16 vid; ++ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { ++ if (!vlan_group_get_device(adapter->vlgrp, vid)) ++ continue; ++ ixgbe_vlan_rx_add_vid(adapter->netdev, vid); ++ } ++ } ++} ++ ++#endif ++static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) ++{ ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ struct netdev_hw_addr *mc_ptr; ++#else ++ struct dev_mc_list *mc_ptr; ++#endif ++ struct ixgbe_adapter *adapter = hw->back; ++ u8 *addr = *mc_addr_ptr; ++ ++ *vmdq = adapter->num_vfs; ++ ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); ++ if (mc_ptr->list.next) { ++ struct netdev_hw_addr *ha; ++ ++ ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); ++ *mc_addr_ptr = ha->addr; ++ } ++#else ++ mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); ++ if (mc_ptr->next) ++ *mc_addr_ptr = mc_ptr->next->dmi_addr; ++#endif ++ else ++ *mc_addr_ptr = NULL; ++ ++ return addr; ++} ++ ++/** ++ * ixgbe_write_mc_addr_list - write multicast addresses to MTA ++ * @netdev: network interface device structure ++ * ++ * Writes multicast address list to the MTA hash table. ++ * Returns: -ENOMEM on failure ++ * 0 on no addresses written ++ * X on writing X addresses to MTA ++ **/ ++static int ixgbe_write_mc_addr_list(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ struct netdev_hw_addr *ha; ++#endif ++ u8 *addr_list = NULL; ++ int addr_count; ++ ++ if (netdev_mc_empty(netdev)) { ++ /* nothing to program, so clear mc list */ ++ hw->mac.ops.update_mc_addr_list(hw, NULL, 0, ixgbe_addr_list_itr); ++ return 0; ++ } ++ ++ if (!hw->mac.ops.update_mc_addr_list) ++ return -ENOMEM; ++ ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ ha = list_first_entry(&netdev->mc.list, struct netdev_hw_addr, list); ++ addr_list = ha->addr; ++#else ++ addr_list = netdev->mc_list->dmi_addr; ++#endif ++ addr_count = netdev_mc_count(netdev); ++ ++ hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, ixgbe_addr_list_itr); ++ ++ return addr_count; ++} ++ ++#ifdef HAVE_SET_RX_MODE ++/** ++ * ixgbe_write_uc_addr_list - write unicast addresses to RAR table ++ * @netdev: network interface device structure ++ * ++ * Writes unicast address list to the RAR table. ++ * Returns: -ENOMEM on failure/insufficient address space ++ * 0 on no addresses written ++ * X on writing X addresses to the RAR table ++ **/ ++static int ixgbe_write_uc_addr_list(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ unsigned int vfn = adapter->num_vfs; ++ unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1); ++ int count = 0; ++ ++ /* return ENOMEM indicating insufficient memory for addresses */ ++ if (netdev_uc_count(netdev) > rar_entries) ++ return -ENOMEM; ++ ++ if (!netdev_uc_empty(netdev) && rar_entries) { ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ struct netdev_hw_addr *ha; ++#else ++ struct dev_mc_list *ha; ++#endif ++ /* return error if we do not support writing to RAR table */ ++ if (!hw->mac.ops.set_rar) ++ return -ENOMEM; ++ ++ netdev_for_each_uc_addr(ha, netdev) { ++ if (!rar_entries) ++ break; ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, ++ vfn, IXGBE_RAH_AV); ++#else ++ hw->mac.ops.set_rar(hw, rar_entries--, ha->da_addr, ++ vfn, IXGBE_RAH_AV); ++#endif ++ count++; ++ } ++ } ++ /* write the addresses in reverse order to avoid write combining */ ++ for (; rar_entries > 0 ; rar_entries--) ++ hw->mac.ops.clear_rar(hw, rar_entries); ++ ++ return count; ++} ++ ++#endif ++/** ++ * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set ++ * @netdev: network interface device structure ++ * ++ * The set_rx_method entry point is called whenever the unicast/multicast ++ * address list or the network interface flags are updated. This routine is ++ * responsible for configuring the hardware for proper unicast, multicast and ++ * promiscuous mode. ++ **/ ++void ixgbe_set_rx_mode(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; ++ u32 vlnctrl; ++ int count; ++ ++ /* Check for Promiscuous and All Multicast modes */ ++ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ++ ++ /* set all bits that we expect to always be set */ ++ fctrl |= IXGBE_FCTRL_BAM; ++ fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ ++ fctrl |= IXGBE_FCTRL_PMCF; ++ ++ /* clear the bits we are changing the status of */ ++ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); ++ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); ++ ++ if (netdev->flags & IFF_PROMISC) { ++ hw->addr_ctrl.user_set_promisc = true; ++ fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); ++ vmolr |= IXGBE_VMOLR_MPE; ++ } else { ++ if (netdev->flags & IFF_ALLMULTI) { ++ fctrl |= IXGBE_FCTRL_MPE; ++ vmolr |= IXGBE_VMOLR_MPE; ++ } else { ++ /* ++ * Write addresses to the MTA, if the attempt fails ++ * then we should just turn on promiscous mode so ++ * that we can at least receive multicast traffic ++ */ ++ count = ixgbe_write_mc_addr_list(netdev); ++ if (count < 0) { ++ fctrl |= IXGBE_FCTRL_MPE; ++ vmolr |= IXGBE_VMOLR_MPE; ++ } else if (count) { ++ vmolr |= IXGBE_VMOLR_ROMPE; ++ } ++ } ++#ifdef NETIF_F_HW_VLAN_TX ++ /* enable hardware vlan filtering */ ++ vlnctrl |= IXGBE_VLNCTRL_VFE; ++#endif ++ hw->addr_ctrl.user_set_promisc = false; ++#ifdef HAVE_SET_RX_MODE ++ /* ++ * Write addresses to available RAR registers, if there is not ++ * sufficient space to store all the addresses then enable ++ * unicast promiscous mode ++ */ ++ count = ixgbe_write_uc_addr_list(netdev); ++ if (count < 0) { ++ fctrl |= IXGBE_FCTRL_UPE; ++ vmolr |= IXGBE_VMOLR_ROPE; ++ } ++#endif ++ } ++ ++#ifdef CONFIG_PCI_IOV ++ if (adapter->num_vfs) ++ ixgbe_restore_vf_multicasts(adapter); ++ ++#endif ++ if (hw->mac.type != ixgbe_mac_82598EB) { ++ vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & ++ ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | ++ IXGBE_VMOLR_ROPE); ++ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); ++ } ++ ++ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); ++ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); ++} ++ ++static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) ++{ ++#ifdef CONFIG_IXGBE_NAPI ++ int q_idx; ++ struct ixgbe_q_vector *q_vector; ++ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ ++ /* legacy and MSI only use one vector */ ++ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) ++ q_vectors = 1; ++ ++ for (q_idx = 0; q_idx < q_vectors; q_idx++) { ++ struct napi_struct *napi; ++ q_vector = adapter->q_vector[q_idx]; ++ napi = &q_vector->napi; ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { ++ if (!q_vector->rxr_count || !q_vector->txr_count) { ++ if (q_vector->txr_count == 1) ++ napi->poll = &ixgbe_clean_txonly; ++ else if (q_vector->rxr_count == 1) ++ napi->poll = &ixgbe_clean_rxonly; ++ } ++ } ++ ++ napi_enable(napi); ++ } ++#endif /* CONFIG_IXGBE_NAPI */ ++} ++ ++static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) ++{ ++#ifdef CONFIG_IXGBE_NAPI ++ int q_idx; ++ struct ixgbe_q_vector *q_vector; ++ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ ++ /* legacy and MSI only use one vector */ ++ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) ++ q_vectors = 1; ++ ++ for (q_idx = 0; q_idx < q_vectors; q_idx++) { ++ q_vector = adapter->q_vector[q_idx]; ++ napi_disable(&q_vector->napi); ++ } ++#endif ++} ++ ++/* ++ * ixgbe_configure_dcb - Configure DCB hardware ++ * @adapter: ixgbe adapter struct ++ * ++ * This is called by the driver on open to configure the DCB hardware. ++ * This is also called by the gennetlink interface when reconfiguring ++ * the DCB state. ++ */ ++static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ s32 err; ++ u32 mtu = adapter->netdev->mtu; ++ ++ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ netif_set_gso_max_size(adapter->netdev, 65536); ++ return; ++ } ++ ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ netif_set_gso_max_size(adapter->netdev, 32768); ++ ++#ifdef IXGBE_FCOE ++ if (adapter->netdev->features & NETIF_F_FCOE_MTU) ++ mtu = max(mtu, (unsigned) IXGBE_FCOE_JUMBO_FRAME_SIZE); ++ ++#endif ++ adapter->dcb_cfg.num_tcs.pg_tcs = adapter->ring_feature[RING_F_DCB].indices; ++ err = ixgbe_dcb_check_config(&adapter->dcb_cfg); ++ if (err) ++ DPRINTK(DRV, ERR, "err in dcb_check_config\n"); ++ err = ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, mtu, ++ DCB_TX_CONFIG); ++ if (err) ++ DPRINTK(DRV, ERR, "err in dcb_calculate_tc_credits (TX)\n"); ++ err = ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, mtu, ++ DCB_RX_CONFIG); ++ if (err) ++ DPRINTK(DRV, ERR, "err in dcb_calculate_tc_credits (RX)\n"); ++ ++ /* reconfigure the hardware */ ++ ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); ++} ++ ++#ifndef IXGBE_NO_LLI ++static void ixgbe_configure_lli_82599(struct ixgbe_adapter *adapter) ++{ ++ u16 port; ++ ++ if (adapter->lli_etype) { ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), ++ (IXGBE_IMIR_LLI_EN_82599 | IXGBE_IMIR_SIZE_BP_82599 | ++ IXGBE_IMIR_CTRL_BP_82599)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQS(0), IXGBE_ETQS_LLI); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQF(0), ++ (adapter->lli_etype | IXGBE_ETQF_FILTER_EN)); ++ } ++ ++ if (adapter->lli_port) { ++ port = ntohs((u16)adapter->lli_port); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), ++ (IXGBE_IMIR_LLI_EN_82599 | IXGBE_IMIR_SIZE_BP_82599 | ++ IXGBE_IMIR_CTRL_BP_82599)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), ++ (IXGBE_FTQF_POOL_MASK_EN | ++ (IXGBE_FTQF_PRIORITY_MASK << ++ IXGBE_FTQF_PRIORITY_SHIFT) | ++ (IXGBE_FTQF_DEST_PORT_MASK << ++ IXGBE_FTQF_5TUPLE_MASK_SHIFT))); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SDPQF(0), (port << 16)); ++ } ++ ++ if (adapter->flags & IXGBE_FLAG_LLI_PUSH) { ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), ++ (IXGBE_IMIR_LLI_EN_82599 | IXGBE_IMIR_SIZE_BP_82599 | ++ IXGBE_IMIR_CTRL_PSH_82599 | IXGBE_IMIR_CTRL_SYN_82599 | ++ IXGBE_IMIR_CTRL_URG_82599 | IXGBE_IMIR_CTRL_ACK_82599 | ++ IXGBE_IMIR_CTRL_RST_82599 | IXGBE_IMIR_CTRL_FIN_82599)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, 0xfc000000); ++ break; ++ default: ++ break; ++ } ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), ++ (IXGBE_FTQF_POOL_MASK_EN | ++ (IXGBE_FTQF_PRIORITY_MASK << ++ IXGBE_FTQF_PRIORITY_SHIFT) | ++ (IXGBE_FTQF_5TUPLE_MASK_MASK << ++ IXGBE_FTQF_5TUPLE_MASK_SHIFT))); ++ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYNQF, 0x80000100); ++ } ++ ++ if (adapter->lli_size) { ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), ++ (IXGBE_IMIR_LLI_EN_82599 | IXGBE_IMIR_CTRL_BP_82599)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, adapter->lli_size); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), ++ (IXGBE_FTQF_POOL_MASK_EN | ++ (IXGBE_FTQF_PRIORITY_MASK << ++ IXGBE_FTQF_PRIORITY_SHIFT) | ++ (IXGBE_FTQF_5TUPLE_MASK_MASK << ++ IXGBE_FTQF_5TUPLE_MASK_SHIFT))); ++ } ++ ++ if (adapter->lli_vlan_pri) { ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIRVP, ++ (IXGBE_IMIRVP_PRIORITY_EN | adapter->lli_vlan_pri)); ++ } ++} ++ ++static void ixgbe_configure_lli(struct ixgbe_adapter *adapter) ++{ ++ u16 port; ++ ++ /* lli should only be enabled with MSI-X and MSI */ ++ if (!(adapter->flags & IXGBE_FLAG_MSI_ENABLED) && ++ !(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) ++ return; ++ ++ if (adapter->hw.mac.type != ixgbe_mac_82598EB) { ++ ixgbe_configure_lli_82599(adapter); ++ return; ++ } ++ ++ if (adapter->lli_port) { ++ /* use filter 0 for port */ ++ port = ntohs((u16)adapter->lli_port); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(0), ++ (port | IXGBE_IMIR_PORT_IM_EN)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(0), ++ (IXGBE_IMIREXT_SIZE_BP | ++ IXGBE_IMIREXT_CTRL_BP)); ++ } ++ ++ if (adapter->flags & IXGBE_FLAG_LLI_PUSH) { ++ /* use filter 1 for push flag */ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(1), ++ (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(1), ++ (IXGBE_IMIREXT_SIZE_BP | ++ IXGBE_IMIREXT_CTRL_PSH)); ++ } ++ ++ if (adapter->lli_size) { ++ /* use filter 2 for size */ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(2), ++ (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(2), ++ (adapter->lli_size | IXGBE_IMIREXT_CTRL_BP)); ++ } ++} ++ ++#endif /* IXGBE_NO_LLI */ ++static void ixgbe_configure(struct ixgbe_adapter *adapter) ++{ ++ ixgbe_set_rx_mode(adapter->netdev); ++ ++#ifdef NETIF_F_HW_VLAN_TX ++ ixgbe_restore_vlan(adapter); ++#endif ++ ixgbe_configure_dcb(adapter); ++ ++#ifdef IXGBE_FCOE ++ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) ++ ixgbe_configure_fcoe(adapter); ++ ++#endif /* IXGBE_FCOE */ ++ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ++ ixgbe_init_fdir_signature_82599(&adapter->hw, ++ adapter->fdir_pballoc); ++ else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) ++ ixgbe_init_fdir_perfect_82599(&adapter->hw, ++ adapter->fdir_pballoc); ++ ixgbe_configure_virtualization(adapter); ++ ++ ixgbe_configure_tx(adapter); ++ ixgbe_configure_rx(adapter); ++} ++ ++static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) ++{ ++ switch (hw->phy.type) { ++ case ixgbe_phy_sfp_avago: ++ case ixgbe_phy_sfp_ftl: ++ case ixgbe_phy_sfp_intel: ++ case ixgbe_phy_sfp_unknown: ++ case ixgbe_phy_sfp_passive_tyco: ++ case ixgbe_phy_sfp_passive_unknown: ++ case ixgbe_phy_sfp_active_unknown: ++ case ixgbe_phy_sfp_ftl_active: ++ return true; ++ default: ++ return false; ++ } ++} ++ ++/** ++ * ixgbe_sfp_link_config - set up SFP+ link ++ * @adapter: pointer to private adapter struct ++ **/ ++static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ if (hw->phy.multispeed_fiber) { ++ /* ++ * In multispeed fiber setups, the device may not have ++ * had a physical connection when the driver loaded. ++ * If that's the case, the initial link configuration ++ * couldn't get the MAC into 10G or 1G mode, so we'll ++ * never have a link status change interrupt fire. ++ * We need to try and force an autonegotiation ++ * session, then bring up link. ++ */ ++ hw->mac.ops.setup_sfp(hw); ++ if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) ++ schedule_work(&adapter->multispeed_fiber_task); ++ } else { ++ /* ++ * Direct Attach Cu and non-multispeed fiber modules ++ * still need to be configured properly prior to ++ * attempting link. ++ */ ++ if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK)) ++ schedule_work(&adapter->sfp_config_module_task); ++ } ++} ++ ++/** ++ * ixgbe_non_sfp_link_config - set up non-SFP+ link ++ * @hw: pointer to private hardware struct ++ * ++ * Returns 0 on success, negative on failure ++ **/ ++static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) ++{ ++ u32 autoneg; ++ bool negotiation, link_up = false; ++ u32 ret = IXGBE_ERR_LINK_SETUP; ++ ++ if (hw->mac.ops.check_link) ++ ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); ++ ++ if (ret) ++ goto link_cfg_out; ++ ++ autoneg = hw->phy.autoneg_advertised; ++ if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) ++ ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, ++ &negotiation); ++ if (ret) ++ goto link_cfg_out; ++ ++ if (hw->mac.ops.setup_link) ++ ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up); ++link_cfg_out: ++ return ret; ++} ++ ++/** ++ * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset ++ * @adapter: board private structure ++ * ++ * On a reset we need to clear out the VF stats or accounting gets ++ * messed up because they're not clear on read. ++ **/ ++void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int i; ++ ++ for(i = 0; i < adapter->num_vfs; i++) { ++ adapter->vfinfo[i].last_vfstats.gprc = ++ IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i)); ++ adapter->vfinfo[i].saved_rst_vfstats.gprc += ++ adapter->vfinfo[i].vfstats.gprc; ++ adapter->vfinfo[i].vfstats.gprc = 0; ++ adapter->vfinfo[i].last_vfstats.gptc = ++ IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i)); ++ adapter->vfinfo[i].saved_rst_vfstats.gptc += ++ adapter->vfinfo[i].vfstats.gptc; ++ adapter->vfinfo[i].vfstats.gptc = 0; ++ adapter->vfinfo[i].last_vfstats.gorc = ++ IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i)); ++ adapter->vfinfo[i].saved_rst_vfstats.gorc += ++ adapter->vfinfo[i].vfstats.gorc; ++ adapter->vfinfo[i].vfstats.gorc = 0; ++ adapter->vfinfo[i].last_vfstats.gotc = ++ IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i)); ++ adapter->vfinfo[i].saved_rst_vfstats.gotc += ++ adapter->vfinfo[i].vfstats.gotc; ++ adapter->vfinfo[i].vfstats.gotc = 0; ++ adapter->vfinfo[i].last_vfstats.mprc = ++ IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i)); ++ adapter->vfinfo[i].saved_rst_vfstats.mprc += ++ adapter->vfinfo[i].vfstats.mprc; ++ adapter->vfinfo[i].vfstats.mprc = 0; ++ } ++} ++ ++static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 gpie = 0; ++ ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { ++ gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | ++ IXGBE_GPIE_OCD; ++#ifdef CONFIG_IXGBE_NAPI ++ gpie |= IXGBE_GPIE_EIAME; ++ /* ++ * use EIAM to auto-mask when MSI-X interrupt is asserted ++ * this saves a register write for every interrupt ++ */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); ++ break; ++ default: ++ case ixgbe_mac_82599EB: ++ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); ++ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); ++ break; ++ } ++ } else { ++ /* legacy interrupts, use EIAM to auto-mask when reading EICR, ++ * specifically only auto mask tx and rx interrupts */ ++ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); ++#endif ++ } ++ ++ /* XXX: to interrupt immediately for EICS writes, enable this */ ++ /* gpie |= IXGBE_GPIE_EIMEN; */ ++ ++ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { ++ gpie &= ~IXGBE_GPIE_VTMODE_MASK; ++ gpie |= IXGBE_GPIE_VTMODE_64; ++ } ++ ++ /* Enable Thermal over heat sensor interrupt */ ++ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) ++ gpie |= IXGBE_SDP0_GPIEN; ++ ++ /* Enable fan failure interrupt */ ++ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) ++ gpie |= IXGBE_SDP1_GPIEN; ++ ++ if (hw->mac.type == ixgbe_mac_82599EB) ++ gpie |= IXGBE_SDP1_GPIEN; ++ gpie |= IXGBE_SDP2_GPIEN; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); ++#ifdef IXGBE_TCP_TIMER ++ ++ if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) || ++ (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { ++ u32 tcp_timer = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); ++ tcp_timer |= IXGBE_TCPTIMER_DURATION_MASK; ++ tcp_timer |= (IXGBE_TCPTIMER_KS | ++ IXGBE_TCPTIMER_COUNT_ENABLE | ++ IXGBE_TCPTIMER_LOOP); ++ IXGBE_WRITE_REG(hw, IXGBE_TCPTIMER, tcp_timer); ++ tcp_timer = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); ++ } ++#endif ++} ++ ++static int ixgbe_up_complete(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int err; ++ u32 ctrl_ext; ++ ++ ixgbe_get_hw_control(adapter); ++ ixgbe_setup_gpie(adapter); ++ ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ++ ixgbe_configure_msix(adapter); ++ else ++ ixgbe_configure_msi_and_legacy(adapter); ++ ++#ifndef IXGBE_NO_LLI ++ ixgbe_configure_lli(adapter); ++ ++#endif ++ /* enable the optics */ ++ if (hw->phy.multispeed_fiber) ++ ixgbe_enable_tx_laser(hw); ++ ++ clear_bit(__IXGBE_DOWN, &adapter->state); ++ ixgbe_napi_enable_all(adapter); ++ ++ if (ixgbe_is_sfp(hw)) { ++ ixgbe_sfp_link_config(adapter); ++ } else { ++ err = ixgbe_non_sfp_link_config(hw); ++ if (err) ++ DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); ++ } ++ ++ /* clear any pending interrupts, may auto mask */ ++ IXGBE_READ_REG(hw, IXGBE_EICR); ++ ixgbe_irq_enable(adapter, true, true); ++ ++ /* ++ * For hot-pluggable SFP+ devices, a SFP+ module may have arrived ++ * before interrupts were enabled but after probe. Such devices ++ * wouldn't have their type indentified yet. We need to kick off ++ * the SFP+ module setup first, then try to bring up link. If we're ++ * not hot-pluggable SFP+, we just need to configure link and bring ++ * it up. ++ */ ++ if (hw->phy.type == ixgbe_phy_none) ++ schedule_work(&adapter->sfp_config_module_task); ++ ++ /* enable transmits */ ++ netif_tx_start_all_queues(adapter->netdev); ++ ++ /* bring the link up in the watchdog, this could race with our first ++ * link up interrupt but shouldn't be a problem */ ++ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; ++ adapter->link_check_timeout = jiffies; ++ mod_timer(&adapter->watchdog_timer, jiffies); ++ ++ ixgbe_clear_vf_stats_counters(adapter); ++ /* Set PF Reset Done bit so PF/VF Mail Ops can work */ ++ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ++ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; ++ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); ++ ++ return 0; ++} ++ ++void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) ++{ ++ WARN_ON(in_interrupt()); ++ /* put off any impending NetWatchDogTimeout */ ++ adapter->netdev->trans_start = jiffies; ++ ++ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) ++ msleep(1); ++ ixgbe_down(adapter); ++ /* ++ * If SR-IOV enabled then wait a bit before bringing the adapter ++ * back up to give the VFs time to respond to the reset. The ++ * two second wait is based upon the watchdog timer cycle in ++ * the VF driver. ++ */ ++ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ++ msleep(2000); ++ ixgbe_up(adapter); ++ clear_bit(__IXGBE_RESETTING, &adapter->state); ++} ++ ++int ixgbe_up(struct ixgbe_adapter *adapter) ++{ ++ int err; ++ ++ ixgbe_configure(adapter); ++ ++ err = ixgbe_up_complete(adapter); ++ ++ return err; ++} ++ ++void ixgbe_reset(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int err; ++ ++ err = hw->mac.ops.init_hw(hw); ++ switch (err) { ++ case 0: ++ case IXGBE_ERR_SFP_NOT_PRESENT: ++ break; ++ case IXGBE_ERR_MASTER_REQUESTS_PENDING: ++ DPRINTK(HW, INFO, "master disable timed out\n"); ++ break; ++ case IXGBE_ERR_EEPROM_VERSION: ++ /* We are running on a pre-production device, log a warning */ ++ DPRINTK(PROBE, INFO, "This device is a pre-production adapter/" ++ "LOM. Please be aware there may be issues associated " ++ "with your hardware. If you are experiencing problems " ++ "please contact your Intel or hardware representative " ++ "who provided you with this hardware.\n"); ++ break; ++ default: ++ DPRINTK(PROBE, ERR, "Hardware Error: %d\n", err); ++ } ++ ++ /* reprogram the RAR[0] in case user changed it. */ ++ if (hw->mac.ops.set_rar) ++ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); ++} ++ ++/** ++ * ixgbe_clean_rx_ring - Free Rx Buffers per Queue ++ * @rx_ring: ring to free buffers from ++ **/ ++void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) ++{ ++ struct device *dev = rx_ring->dev; ++ unsigned long size; ++ u16 i; ++ ++ /* ring already cleared, nothing to do */ ++ if (!rx_ring->rx_buffer_info) ++ return; ++ ++ /* Free all the Rx ring sk_buffs */ ++ for (i = 0; i < rx_ring->count; i++) { ++ struct ixgbe_rx_buffer *rx_buffer_info; ++ ++ rx_buffer_info = &rx_ring->rx_buffer_info[i]; ++ if (rx_buffer_info->dma) { ++ dma_unmap_single(dev, ++ rx_buffer_info->dma, ++ rx_ring->rx_buf_len, ++ DMA_FROM_DEVICE); ++ rx_buffer_info->dma = 0; ++ } ++ if (rx_buffer_info->skb) { ++ struct sk_buff *skb = rx_buffer_info->skb; ++ rx_buffer_info->skb = NULL; ++ do { ++ struct sk_buff *this = skb; ++ if (IXGBE_RSC_CB(this)->delay_unmap) { ++ dma_unmap_single(dev, ++ IXGBE_RSC_CB(this)->dma, ++ rx_ring->rx_buf_len, ++ DMA_FROM_DEVICE); ++ IXGBE_RSC_CB(skb)->dma = 0; ++ IXGBE_RSC_CB(skb)->delay_unmap = false; ++ } ++ skb = skb->prev; ++ dev_kfree_skb(this); ++ } while (skb); ++ } ++ if (!rx_buffer_info->page) ++ continue; ++ if (rx_buffer_info->page_dma) { ++ dma_unmap_page(dev, ++ rx_buffer_info->page_dma, ++ PAGE_SIZE / 2, ++ DMA_FROM_DEVICE); ++ } ++ ++ rx_buffer_info->page_dma = 0; ++ put_page(rx_buffer_info->page); ++ rx_buffer_info->page = NULL; ++ rx_buffer_info->page_offset = 0; ++ } ++ ++ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; ++ memset(rx_ring->rx_buffer_info, 0, size); ++ ++ /* Zero out the descriptor ring */ ++ memset(rx_ring->desc, 0, rx_ring->size); ++ ++ rx_ring->next_to_clean = 0; ++ rx_ring->next_to_use = 0; ++} ++ ++/** ++ * ixgbe_clean_tx_ring - Free Tx Buffers ++ * @tx_ring: ring to be cleaned ++ **/ ++static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) ++{ ++ struct ixgbe_tx_buffer *tx_buffer_info; ++ unsigned long size; ++ u16 i; ++ ++ /* ring already cleared, nothing to do */ ++ if (!tx_ring->tx_buffer_info) ++ return; ++ ++ /* Free all the Tx ring sk_buffs */ ++ for (i = 0; i < tx_ring->count; i++) { ++ tx_buffer_info = &tx_ring->tx_buffer_info[i]; ++ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); ++ } ++ ++ size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; ++ memset(tx_ring->tx_buffer_info, 0, size); ++ ++ /* Zero out the descriptor ring */ ++ memset(tx_ring->desc, 0, tx_ring->size); ++ ++ tx_ring->next_to_use = 0; ++ tx_ring->next_to_clean = 0; ++} ++ ++/** ++ * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues ++ * @adapter: board private structure ++ **/ ++static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ ++ for (i = 0; i < adapter->num_rx_queues; i++) ++ ixgbe_clean_rx_ring(adapter->rx_ring[i]); ++} ++ ++/** ++ * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues ++ * @adapter: board private structure ++ **/ ++static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ ixgbe_clean_tx_ring(adapter->tx_ring[i]); ++} ++ ++void ixgbe_down(struct ixgbe_adapter *adapter) ++{ ++ struct net_device *netdev = adapter->netdev; ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 rxctrl; ++ u32 txdctl; ++ int i; ++ ++ /* signal that we are down to the interrupt handler */ ++ set_bit(__IXGBE_DOWN, &adapter->state); ++ ++ /* disable receives */ ++ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); ++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); ++ ++ IXGBE_WRITE_FLUSH(hw); ++ msleep(10); ++ ++ netif_tx_stop_all_queues(netdev); ++ ++ /* call carrier off first to avoid false dev_watchdog timeouts */ ++ netif_carrier_off(netdev); ++ netif_tx_disable(netdev); ++ ++ ixgbe_irq_disable(adapter); ++ ++ ixgbe_napi_disable_all(adapter); ++ ++ clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); ++ del_timer_sync(&adapter->sfp_timer); ++ del_timer_sync(&adapter->watchdog_timer); ++ ++ /* disable receive for all VFs and wait one second */ ++ if (adapter->num_vfs) { ++ /* Mark all the VFs as inactive */ ++ for (i = 0 ; i < adapter->num_vfs; i++) ++ adapter->vfinfo[i].clear_to_send = 0; ++ ++ /* ping all the active vfs to let them know we are going down */ ++ ixgbe_ping_all_vfs(adapter); ++ ++ /* Disable all VFTE/VFRE TX/RX */ ++ ixgbe_disable_tx_rx(adapter); ++ } ++ ++ /* disable transmits in the hardware now that interrupts are off */ ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ u8 reg_idx = adapter->tx_ring[i]->reg_idx; ++ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); ++ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), ++ (txdctl & ~IXGBE_TXDCTL_ENABLE)); ++ } ++ /* Disable the Tx DMA engine on 82599 */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82599EB: ++ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ++ (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ++ ~IXGBE_DMATXCTL_TE)); ++ break; ++ default: ++ break; ++ } ++ ++ /* power down the optics */ ++ if (hw->phy.multispeed_fiber) ++ ixgbe_disable_tx_laser(hw); ++ ++#ifdef NETIF_F_NTUPLE ++ ethtool_ntuple_flush(netdev); ++#endif /* NETIF_F_NTUPLE */ ++ ++#ifdef HAVE_PCI_ERS ++ if (!pci_channel_offline(adapter->pdev)) ++#endif ++ ixgbe_reset(adapter); ++ ixgbe_clean_all_tx_rings(adapter); ++ ixgbe_clean_all_rx_rings(adapter); ++ ++ /* since we reset the hardware DCA settings were cleared */ ++ ixgbe_setup_dca(adapter); ++} ++ ++#ifdef CONFIG_IXGBE_NAPI ++/** ++ * ixgbe_poll - NAPI Rx polling callback ++ * @napi: structure for representing this polling device ++ * @budget: how many packets driver is allowed to clean ++ * ++ * This function is used for legacy and MSI, NAPI mode ++ **/ ++static int ixgbe_poll(struct napi_struct *napi, int budget) ++{ ++ struct ixgbe_q_vector *q_vector = ++ container_of(napi, struct ixgbe_q_vector, napi); ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ int tx_clean_complete, work_done = 0; ++ ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ ixgbe_update_dca(q_vector); ++ ++ tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); ++ ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget); ++ ++ if (!tx_clean_complete) ++ work_done = budget; ++ ++#ifndef HAVE_NETDEV_NAPI_LIST ++ if (!netif_running(adapter->netdev)) ++ work_done = 0; ++ ++#endif ++ /* If no Tx and not enough Rx work done, exit the polling mode */ ++ if (work_done < budget) { ++ napi_complete(napi); ++ if (adapter->rx_itr_setting & 1) ++ ixgbe_set_itr(adapter); ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); ++ } ++ return work_done; ++} ++ ++#endif /* CONFIG_IXGBE_NAPI */ ++/** ++ * ixgbe_tx_timeout - Respond to a Tx Hang ++ * @netdev: network interface device structure ++ **/ ++static void ixgbe_tx_timeout(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ /* Do the reset outside of interrupt context */ ++ schedule_work(&adapter->reset_task); ++} ++ ++static void ixgbe_reset_task(struct work_struct *work) ++{ ++ struct ixgbe_adapter *adapter; ++ adapter = container_of(work, struct ixgbe_adapter, reset_task); ++ ++ /* If we're already down or resetting, just bail */ ++ if (test_bit(__IXGBE_DOWN, &adapter->state) || ++ test_bit(__IXGBE_RESETTING, &adapter->state)) ++ return; ++ ++ adapter->tx_timeout_count++; ++ ++ ixgbe_reinit_locked(adapter); ++} ++ ++ ++/** ++ * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device ++ * @adapter: board private structure to initialize ++ * ++ * When DCB (Data Center Bridging) is enabled, allocate queues for ++ * each traffic class. If multiqueue isn't availabe, then abort DCB ++ * initialization. ++ * ++ **/ ++static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) ++{ ++ bool ret = false; ++ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; ++ ++ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) ++ return ret; ++ ++#ifdef HAVE_TX_MQ ++ f->mask = 0x7 << 3; ++ adapter->num_rx_queues = f->indices; ++ adapter->num_tx_queues = f->indices; ++ ret = true; ++#else ++ DPRINTK(DRV, INFO, "Kernel has no multiqueue support, disabling DCB\n"); ++ f->mask = 0; ++ f->indices = 0; ++#endif ++ ++ return ret; ++} ++ ++/** ++ * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices ++ * @adapter: board private structure to initialize ++ * ++ * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues ++ * and VM pools where appropriate. If RSS is available, then also try and ++ * enable RSS and map accordingly. ++ * ++ **/ ++static inline bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter) ++{ ++ int vmdq_i = adapter->ring_feature[RING_F_VMDQ].indices; ++ int vmdq_m = 0; ++ int rss_i = adapter->ring_feature[RING_F_RSS].indices; ++ int rss_m = adapter->ring_feature[RING_F_RSS].mask; ++ unsigned long i; ++ int rss_shift; ++ bool ret = false; ++ ++ switch (adapter->flags & (IXGBE_FLAG_RSS_ENABLED ++ | IXGBE_FLAG_DCB_ENABLED ++ | IXGBE_FLAG_VMDQ_ENABLED)) { ++ ++ case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED): ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ vmdq_i = min(IXGBE_MAX_VMDQ_INDICES, vmdq_i); ++ if (vmdq_i > 32) ++ rss_i = 2; ++ else ++ rss_i = 4; ++ i = rss_i; ++ rss_shift = find_first_bit(&i, sizeof(i) * 8); ++ rss_m = (rss_i - 1); ++ vmdq_m = ((IXGBE_MAX_VMDQ_INDICES - 1) << ++ rss_shift) & (MAX_RX_QUEUES - 1); ++ break; ++ default: ++ break; ++ } ++ adapter->num_rx_queues = vmdq_i * rss_i; ++ adapter->num_tx_queues = min(MAX_TX_QUEUES, vmdq_i * rss_i); ++ ret = true; ++ break; ++ ++ case (IXGBE_FLAG_VMDQ_ENABLED): ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1); ++ break; ++ case ixgbe_mac_82599EB: ++ vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1) << 1; ++ break; ++ default: ++ break; ++ } ++ adapter->num_rx_queues = vmdq_i; ++ adapter->num_tx_queues = vmdq_i; ++ ret = true; ++ break; ++ ++ default: ++ ret = false; ++ goto vmdq_queues_out; ++ } ++ ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { ++ adapter->num_rx_pools = vmdq_i; ++ adapter->num_rx_queues_per_pool = adapter->num_rx_queues / ++ vmdq_i; ++ } else { ++ adapter->num_rx_pools = adapter->num_rx_queues; ++ adapter->num_rx_queues_per_pool = 1; ++ } ++ /* save the mask for later use */ ++ adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; ++vmdq_queues_out: ++ return ret; ++} ++ ++/** ++ * ixgbe_set_rss_queues: Allocate queues for RSS ++ * @adapter: board private structure to initialize ++ * ++ * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try ++ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. ++ * ++ **/ ++static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) ++{ ++ bool ret = false; ++ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; ++ ++ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { ++ f->mask = 0xF; ++ adapter->num_rx_queues = f->indices; ++#ifdef HAVE_TX_MQ ++ adapter->num_tx_queues = f->indices; ++#endif ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++/** ++ * ixgbe_set_fdir_queues: Allocate queues for Flow Director ++ * @adapter: board private structure to initialize ++ * ++ * Flow Director is an advanced Rx filter, attempting to get Rx flows back ++ * to the original CPU that initiated the Tx session. This runs in addition ++ * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the ++ * Rx load across CPUs using RSS. ++ * ++ **/ ++static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) ++{ ++ bool ret = false; ++ struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; ++ ++ f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); ++ f_fdir->mask = 0; ++ ++ /* ++ * Use RSS in addition to Flow Director to ensure the best ++ * distribution of flows across cores, even when an FDIR flow ++ * isn't matched. ++ */ ++ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && ++ ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || ++ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) { ++ adapter->num_rx_queues = f_fdir->indices; ++#ifdef HAVE_TX_MQ ++ adapter->num_tx_queues = f_fdir->indices; ++#endif ++ ret = true; ++ } else { ++ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ } ++ return ret; ++} ++ ++#ifdef IXGBE_FCOE ++/** ++ * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) ++ * @adapter: board private structure to initialize ++ * ++ * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. ++ * The ring feature mask is not used as a mask for FCoE, as it can take any 8 ++ * rx queues out of the max numberof rx queues, instead, it is used as the ++ * index of the first rx queue. ++ * ++ **/ ++static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) ++{ ++ bool ret = false; ++ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; ++ ++ f->indices = min((int)num_online_cpus(), f->indices); ++ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { ++ adapter->num_rx_queues = 1; ++ adapter->num_tx_queues = 1; ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n"); ++ ixgbe_set_dcb_queues(adapter); ++ } ++ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { ++ DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n"); ++ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || ++ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) ++ ixgbe_set_fdir_queues(adapter); ++ else ++ ixgbe_set_rss_queues(adapter); ++ } ++ /* adding FCoE queues */ ++ f->mask = adapter->num_rx_queues; ++ adapter->num_rx_queues += f->indices; ++ adapter->num_tx_queues += f->indices; ++ ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++#endif /* IXGBE_FCOE */ ++ ++/** ++ * ixgbe_set_sriov_queues: Allocate queues for IOV use ++ * @adapter: board private structure to initialize ++ * ++ * IOV doesn't actually use anything, so just NAK the ++ * request for now and let the other queue routines ++ * figure out what to do. ++ */ ++static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) ++{ ++ return false; ++} ++ ++/* ++ * ixgbe_set_num_queues: Allocate queues for device, feature dependant ++ * @adapter: board private structure to initialize ++ * ++ * This is the top level queue allocation routine. The order here is very ++ * important, starting with the "most" number of features turned on at once, ++ * and ending with the smallest set of features. This way large combinations ++ * can be allocated if they're turned on, and smaller combinations are the ++ * fallthrough conditions. ++ * ++ **/ ++static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) ++{ ++ /* Start with base case */ ++ adapter->num_rx_queues = 1; ++ adapter->num_tx_queues = 1; ++ adapter->num_rx_pools = adapter->num_rx_queues; ++ adapter->num_rx_queues_per_pool = 1; ++ ++ if (ixgbe_set_sriov_queues(adapter)) ++ return; ++ ++ if (ixgbe_set_vmdq_queues(adapter)) ++ return; ++ ++#ifdef IXGBE_FCOE ++ if (ixgbe_set_fcoe_queues(adapter)) ++ return; ++ ++#endif /* IXGBE_FCOE */ ++ if (ixgbe_set_dcb_queues(adapter)) ++ return; ++ ++ if (ixgbe_set_fdir_queues(adapter)) ++ return; ++ ++ ++ if (ixgbe_set_rss_queues(adapter)) ++ return; ++} ++ ++static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, ++ int vectors) ++{ ++ int err, vector_threshold; ++ ++ /* We'll want at least 3 (vector_threshold): ++ * 1) TxQ[0] Cleanup ++ * 2) RxQ[0] Cleanup ++ * 3) Other (Link Status Change, etc.) ++ * 4) TCP Timer (optional) ++ */ ++ vector_threshold = MIN_MSIX_COUNT; ++ ++ /* The more we get, the more we will assign to Tx/Rx Cleanup ++ * for the separate queues...where Rx Cleanup >= Tx Cleanup. ++ * Right now, we simply care about how many we'll get; we'll ++ * set them up later while requesting irq's. ++ */ ++ while (vectors >= vector_threshold) { ++ err = pci_enable_msix(adapter->pdev, adapter->msix_entries, ++ vectors); ++ if (!err) /* Success in acquiring all requested vectors. */ ++ break; ++ else if (err < 0) ++ vectors = 0; /* Nasty failure, quit now */ ++ else /* err == number of vectors we should try again with */ ++ vectors = err; ++ } ++ ++ if (vectors < vector_threshold) { ++ /* Can't allocate enough MSI-X interrupts? Oh well. ++ * This just means we'll go with either a single MSI ++ * vector or fall back to legacy interrupts. ++ */ ++ DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n"); ++ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; ++ kfree(adapter->msix_entries); ++ adapter->msix_entries = NULL; ++ } else { ++ adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ ++ /* ++ * Adjust for only the vectors we'll use, which is minimum ++ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of ++ * vectors we were allocated. ++ */ ++ adapter->num_msix_vectors = min(vectors, ++ adapter->max_msix_q_vectors + NON_Q_VECTORS); ++ } ++} ++ ++/** ++ * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS ++ * @adapter: board private structure to initialize ++ * ++ * Cache the descriptor ring offsets for RSS to the assigned rings. ++ * ++ **/ ++static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ ++ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) ++ return false; ++ ++ for (i = 0; i < adapter->num_rx_queues; i++) ++ adapter->rx_ring[i]->reg_idx = i; ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ adapter->tx_ring[i]->reg_idx = i; ++ ++ return true; ++} ++ ++/** ++ * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB ++ * @adapter: board private structure to initialize ++ * ++ * Cache the descriptor ring offsets for DCB to the assigned rings. ++ * ++ **/ ++static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ bool ret = false; ++ int dcb_i = adapter->ring_feature[RING_F_DCB].indices; ++ ++ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) ++ return false; ++ ++ /* the number of queues is assumed to be symmetric */ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ for (i = 0; i < dcb_i; i++) { ++ adapter->rx_ring[i]->reg_idx = i << 3; ++ adapter->tx_ring[i]->reg_idx = i << 2; ++ } ++ ret = true; ++ break; ++ case ixgbe_mac_82599EB: ++ if (dcb_i == 8) { ++ /* ++ * Tx TC0 starts at: descriptor queue 0 ++ * Tx TC1 starts at: descriptor queue 32 ++ * Tx TC2 starts at: descriptor queue 64 ++ * Tx TC3 starts at: descriptor queue 80 ++ * Tx TC4 starts at: descriptor queue 96 ++ * Tx TC5 starts at: descriptor queue 104 ++ * Tx TC6 starts at: descriptor queue 112 ++ * Tx TC7 starts at: descriptor queue 120 ++ * ++ * Rx TC0-TC7 are offset by 16 queues each ++ */ ++ for (i = 0; i < 3; i++) { ++ adapter->tx_ring[i]->reg_idx = i << 5; ++ adapter->rx_ring[i]->reg_idx = i << 4; ++ } ++ for ( ; i < 5; i++) { ++ adapter->tx_ring[i]->reg_idx = ((i + 2) << 4); ++ adapter->rx_ring[i]->reg_idx = i << 4; ++ } ++ for ( ; i < dcb_i; i++) { ++ adapter->tx_ring[i]->reg_idx = ((i + 8) << 3); ++ adapter->rx_ring[i]->reg_idx = i << 4; ++ } ++ ret = true; ++ } else if (dcb_i == 4) { ++ /* ++ * Tx TC0 starts at: descriptor queue 0 ++ * Tx TC1 starts at: descriptor queue 64 ++ * Tx TC2 starts at: descriptor queue 96 ++ * Tx TC3 starts at: descriptor queue 112 ++ * ++ * Rx TC0-TC3 are offset by 32 queues each ++ */ ++ adapter->tx_ring[0]->reg_idx = 0; ++ adapter->tx_ring[1]->reg_idx = 64; ++ adapter->tx_ring[2]->reg_idx = 96; ++ adapter->tx_ring[3]->reg_idx = 112; ++ for (i = 0 ; i < dcb_i; i++) ++ adapter->rx_ring[i]->reg_idx = i << 5; ++ ret = true; ++ } ++ break; ++ default: ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * ixgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq ++ * @adapter: board private structure to initialize ++ * ++ * Cache the descriptor ring offsets for VMDq to the assigned rings. It ++ * will also try to cache the proper offsets if RSS is enabled along with ++ * VMDq. ++ * ++ **/ ++static inline bool ixgbe_cache_ring_vmdq(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ bool ret = false; ++#ifdef IXGBE_FCOE ++ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; ++#endif /* IXGBE_FCOE */ ++ switch (adapter->flags & (IXGBE_FLAG_RSS_ENABLED ++ | IXGBE_FLAG_DCB_ENABLED ++ | IXGBE_FLAG_VMDQ_ENABLED)) { ++ ++ case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED): ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ /* since the # of rss queues per vmdq pool is ++ * limited to either 2 or 4, there is no index ++ * skipping and we can set them up with no ++ * funky mapping ++ */ ++ for (i = 0; i < adapter->num_rx_queues; i++) ++ adapter->rx_ring[i]->reg_idx = i; ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ adapter->tx_ring[i]->reg_idx = i; ++ ret = true; ++ break; ++ default: ++ break; ++ } ++ break; ++ ++ case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_DCB_ENABLED): ++ if (adapter->hw.mac.type == ixgbe_mac_82599EB) { ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ adapter->rx_ring[i]->reg_idx = ++ (adapter->num_vfs + i) * ++ adapter->ring_feature[RING_F_DCB].indices; ++#ifdef IXGBE_FCOE ++ adapter->rx_ring[i]->reg_idx += ++ (i >= f->mask ? adapter->fcoe.tc : 0); ++#endif /* IXGBE_FCOE */ ++ } ++ ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ adapter->tx_ring[i]->reg_idx = ++ (adapter->num_vfs + i) * ++ adapter->ring_feature[RING_F_DCB].indices; ++#ifdef IXGBE_FCOE ++ adapter->tx_ring[i]->reg_idx += ++ (i >= f->mask ? adapter->fcoe.tc : 0); ++#endif /* IXGBE_FCOE */ ++ } ++ ret = true; ++ } ++ break; ++ ++ case (IXGBE_FLAG_VMDQ_ENABLED): ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ for (i = 0; i < adapter->num_rx_queues; i++) ++ adapter->rx_ring[i]->reg_idx = i; ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ adapter->tx_ring[i]->reg_idx = i; ++ ret = true; ++ break; ++ case ixgbe_mac_82599EB: ++ /* even without rss, there are 2 queues per ++ * pool, the odd numbered ones are unused. ++ */ ++ for (i = 0; i < adapter->num_rx_queues; i++) ++ adapter->rx_ring[i]->reg_idx = VMDQ_P(i) * 2; ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ adapter->tx_ring[i]->reg_idx = VMDQ_P(i) * 2; ++ ret = true; ++ break; ++ default: ++ break; ++ } ++ break; ++ } ++ ++ return ret; ++} ++ ++/** ++ * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director ++ * @adapter: board private structure to initialize ++ * ++ * Cache the descriptor ring offsets for Flow Director to the assigned rings. ++ * ++ **/ ++static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ bool ret = false; ++ ++ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && ++ ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || ++ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { ++ for (i = 0; i < adapter->num_rx_queues; i++) ++ adapter->rx_ring[i]->reg_idx = i; ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ adapter->tx_ring[i]->reg_idx = i; ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++#ifdef IXGBE_FCOE ++/** ++ * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE ++ * @adapter: board private structure to initialize ++ * ++ * Cache the descriptor ring offsets for FCoE mode to the assigned rings. ++ * ++ */ ++static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; ++ int i; ++ u8 fcoe_rx_i = 0, fcoe_tx_i = 0; ++ ++ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) ++ return false; ++ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ struct ixgbe_fcoe *fcoe = &adapter->fcoe; ++ ++ ixgbe_cache_ring_dcb(adapter); ++ /* find out queues in TC for FCoE */ ++ fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; ++ fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; ++ /* ++ * In 82599, the number of Tx queues for each traffic ++ * class for both 8-TC and 4-TC modes are: ++ * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 ++ * 8 TCs: 32 32 16 16 8 8 8 8 ++ * 4 TCs: 64 64 32 32 ++ * We have max 8 queues for FCoE, where 8 the is ++ * FCoE redirection table size. If TC for FCoE is ++ * less than or equal to TC3, we have enough queues ++ * to add max of 8 queues for FCoE, so we start FCoE ++ * tx queue from the next one, i.e., reg_idx + 1. ++ * If TC for FCoE is above TC3, implying 8 TC mode, ++ * and we need 8 for FCoE, we have to take all queues ++ * in that traffic class for FCoE. ++ */ ++ if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) ++ fcoe_tx_i--; ++ } ++ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { ++ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || ++ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) ++ ixgbe_cache_ring_fdir(adapter); ++ else ++ ixgbe_cache_ring_rss(adapter); ++ ++ fcoe_rx_i = f->mask; ++ fcoe_tx_i = f->mask; ++ } ++ for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { ++ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; ++ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; ++ } ++ return true; ++} ++ ++#endif /* IXGBE_FCOE */ ++/** ++ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov ++ * @adapter: board private structure to initialize ++ * ++ * SR-IOV doesn't use any descriptor rings but changes the default if ++ * no other mapping is used. ++ * ++ */ ++static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) ++{ ++ adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; ++ adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; ++ return false; ++} ++ ++/** ++ * ixgbe_cache_ring_register - Descriptor ring to register mapping ++ * @adapter: board private structure to initialize ++ * ++ * Once we know the feature-set enabled for the device, we'll cache ++ * the register offset the descriptor ring is assigned to. ++ * ++ * Note, the order the various feature calls is important. It must start with ++ * the "most" features enabled at the same time, then trickle down to the ++ * least amount of features turned on at once. ++ **/ ++static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) ++{ ++ /* start with default case */ ++ adapter->rx_ring[0]->reg_idx = 0; ++ adapter->tx_ring[0]->reg_idx = 0; ++ ++ if (ixgbe_cache_ring_sriov(adapter)) ++ return; ++ ++ if (ixgbe_cache_ring_vmdq(adapter)) ++ return; ++ ++#ifdef IXGBE_FCOE ++ if (ixgbe_cache_ring_fcoe(adapter)) ++ return; ++ ++#endif /* IXGBE_FCOE */ ++ if (ixgbe_cache_ring_dcb(adapter)) ++ return; ++ ++ if (ixgbe_cache_ring_fdir(adapter)) ++ return; ++ ++ if (ixgbe_cache_ring_rss(adapter)) ++ return; ++ ++} ++ ++/** ++ * ixgbe_alloc_queues - Allocate memory for all rings ++ * @adapter: board private structure to initialize ++ * ++ * We allocate one ring per queue at run-time since we don't know the ++ * number of queues at compile-time. The polling_netdev array is ++ * intended for Multiqueue, but should work fine with a single queue. ++ **/ ++static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ int rx_count; ++#ifdef HAVE_DEVICE_NUMA_NODE ++ int orig_node = adapter->node; ++ ++ WARN_ON(orig_node != -1 && !node_online(orig_node)); ++#endif /* HAVE_DEVICE_NUMA_NODE */ ++ ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ struct ixgbe_ring *ring = adapter->tx_ring[i]; ++#ifdef HAVE_DEVICE_NUMA_NODE ++ if (orig_node == -1) { ++ int cur_node = next_online_node(adapter->node); ++ if (cur_node == MAX_NUMNODES) ++ cur_node = first_online_node; ++ adapter->node = cur_node; ++ } ++#endif /* HAVE_DEVICE_NUMA_NODE */ ++ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, ++ adapter->node); ++ if (!ring) ++ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); ++ if (!ring) ++ goto err_tx_ring_allocation; ++ ring->count = adapter->tx_ring_count; ++ ring->queue_index = i; ++ ring->dev = pci_dev_to_dev(adapter->pdev); ++ ring->netdev = adapter->netdev; ++ ring->numa_node = adapter->node; ++ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ++ ring->atr_sample_rate = adapter->atr_sample_rate; ++ ring->atr_count = 0; ++ ++ adapter->tx_ring[i] = ring; ++ } ++ ++#ifdef HAVE_DEVICE_NUMA_NODE ++ /* Restore the adapter's original node */ ++ adapter->node = orig_node; ++ ++#endif /* HAVE_DEVICE_NUMA_NODE */ ++ rx_count = adapter->rx_ring_count; ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ struct ixgbe_ring *ring = adapter->rx_ring[i]; ++#ifdef HAVE_DEVICE_NUMA_NODE ++ if (orig_node == -1) { ++ int cur_node = next_online_node(adapter->node); ++ if (cur_node == MAX_NUMNODES) ++ cur_node = first_online_node; ++ adapter->node = cur_node; ++ } ++#endif /* HAVE_DEVICE_NUMA_NODE */ ++ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, ++ adapter->node); ++ if (!ring) ++ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); ++ if (!ring) ++ goto err_rx_ring_allocation; ++ ring->count = rx_count; ++ ring->queue_index = i; ++ ring->dev = pci_dev_to_dev(adapter->pdev); ++ ring->netdev = adapter->netdev; ++ ring->numa_node = adapter->node; ++ ++ adapter->rx_ring[i] = ring; ++ } ++ ++#ifdef HAVE_DEVICE_NUMA_NODE ++ /* Restore the adapter's original node */ ++ adapter->node = orig_node; ++ ++#endif /* HAVE_DEVICE_NUMA_NODE */ ++ ixgbe_cache_ring_register(adapter); ++ ++ return 0; ++ ++err_rx_ring_allocation: ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ kfree(adapter->tx_ring[i]); ++err_tx_ring_allocation: ++ return -ENOMEM; ++} ++ ++/** ++ * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported ++ * @adapter: board private structure to initialize ++ * ++ * Attempt to configure the interrupts using the best available ++ * capabilities of the hardware and the kernel. ++ **/ ++static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int err = 0; ++ int vector, v_budget; ++ ++ if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)) ++ goto try_msi; ++ ++ /* ++ * It's easy to be greedy for MSI-X vectors, but it really ++ * doesn't do us much good if we have a lot more vectors ++ * than CPU's. So let's be conservative and only ask for ++ * (roughly) the same number of vectors as there are CPU's. ++ */ ++ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, ++ (int)num_online_cpus()) + NON_Q_VECTORS; ++ ++ /* ++ * At the same time, hardware can only support a maximum of ++ * hw.mac->max_msix_vectors vectors. With features ++ * such as RSS and VMDq, we can easily surpass the number of Rx and Tx ++ * descriptor queues supported by our device. Thus, we cap it off in ++ * those rare cases where the cpu count also exceeds our vector limit. ++ */ ++ v_budget = min(v_budget, (int)hw->mac.max_msix_vectors); ++ ++ /* A failure in MSI-X entry allocation isn't fatal, but it does ++ * mean we disable MSI-X capabilities of the adapter. */ ++ adapter->msix_entries = kcalloc(v_budget, ++ sizeof(struct msix_entry), GFP_KERNEL); ++ if (adapter->msix_entries) { ++ for (vector = 0; vector < v_budget; vector++) ++ adapter->msix_entries[vector].entry = vector; ++ ++ ixgbe_acquire_msix_vectors(adapter, v_budget); ++ ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ++ goto out; ++ } ++ ++ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; ++ adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; ++ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ adapter->atr_sample_rate = 0; ++ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; ++ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { ++ DPRINTK(PROBE, ERR, "MSIX interrupt not available - " ++ "disabling SR-IOV\n"); ++ ixgbe_disable_sriov(adapter); ++ } ++ ++ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; ++ ixgbe_set_num_queues(adapter); ++ ++try_msi: ++ if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE)) ++ goto out; ++ ++ err = pci_enable_msi(adapter->pdev); ++ if (!err) { ++ adapter->flags |= IXGBE_FLAG_MSI_ENABLED; ++ } else { ++ DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " ++ "falling back to legacy. Error: %d\n", err); ++ /* reset err */ ++ err = 0; ++ } ++ ++out: ++#ifdef HAVE_TX_MQ ++ /* Notify the stack of the (possibly) reduced Tx Queue count. */ ++#ifdef CONFIG_NETDEVICES_MULTIQUEUE ++ adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; ++#else ++ adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; ++#endif ++#endif /* HAVE_TX_MQ */ ++ return err; ++} ++ ++/** ++ * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors ++ * @adapter: board private structure to initialize ++ * ++ * We allocate one q_vector per queue interrupt. If allocation fails we ++ * return -ENOMEM. ++ **/ ++static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) ++{ ++ int v_idx, num_q_vectors; ++ struct ixgbe_q_vector *q_vector; ++ int rx_vectors; ++#ifdef CONFIG_IXGBE_NAPI ++ int (*poll)(struct napi_struct *, int); ++#endif ++ ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { ++ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ rx_vectors = adapter->num_rx_queues; ++#ifdef CONFIG_IXGBE_NAPI ++ poll = &ixgbe_clean_rxtx_many; ++#endif ++ } else { ++ num_q_vectors = 1; ++ rx_vectors = 1; ++#ifdef CONFIG_IXGBE_NAPI ++ poll = &ixgbe_poll; ++#endif ++ } ++ ++ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { ++ q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), ++ GFP_KERNEL, adapter->node); ++ if (!q_vector) ++ q_vector = kzalloc(sizeof(struct ixgbe_q_vector), ++ GFP_KERNEL); ++ if (!q_vector) ++ goto err_out; ++ q_vector->adapter = adapter; ++ if (q_vector->txr_count && !q_vector->rxr_count) ++ q_vector->eitr = adapter->tx_eitr_param; ++ else ++ q_vector->eitr = adapter->rx_eitr_param; ++ q_vector->v_idx = v_idx; ++#ifndef IXGBE_NO_LRO ++ if (v_idx < rx_vectors) { ++ int size = sizeof(struct ixgbe_lro_list); ++ q_vector->lrolist = vmalloc_node(size, adapter->node); ++ if (!q_vector->lrolist) ++ q_vector->lrolist = vmalloc(size); ++ if (!q_vector->lrolist) { ++ kfree(q_vector); ++ goto err_out; ++ } ++ memset(q_vector->lrolist, 0, size); ++ ixgbe_lro_ring_init(q_vector->lrolist); ++ } ++#endif ++#ifdef CONFIG_IXGBE_NAPI ++ netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); ++#endif ++ adapter->q_vector[v_idx] = q_vector; ++ } ++ ++ return 0; ++ ++err_out: ++ while (v_idx) { ++ v_idx--; ++ q_vector = adapter->q_vector[v_idx]; ++#ifdef CONFIG_IXGBE_NAPI ++ netif_napi_del(&q_vector->napi); ++#endif ++#ifndef IXGBE_NO_LRO ++ if (q_vector->lrolist) { ++ ixgbe_lro_ring_exit(q_vector->lrolist); ++ vfree(q_vector->lrolist); ++ q_vector->lrolist = NULL; ++ } ++#endif ++ kfree(q_vector); ++ adapter->q_vector[v_idx] = NULL; ++ } ++ return -ENOMEM; ++} ++ ++/** ++ * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors ++ * @adapter: board private structure to initialize ++ * ++ * This function frees the memory allocated to the q_vectors. In addition if ++ * NAPI is enabled it will delete any references to the NAPI struct prior ++ * to freeing the q_vector. ++ **/ ++static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) ++{ ++ int v_idx, num_q_vectors; ++ ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { ++ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ } else { ++ num_q_vectors = 1; ++ } ++ ++ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { ++ struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; ++ ++ adapter->q_vector[v_idx] = NULL; ++#ifdef CONFIG_IXGBE_NAPI ++ netif_napi_del(&q_vector->napi); ++#endif ++#ifndef IXGBE_NO_LRO ++ if (q_vector->lrolist) { ++ ixgbe_lro_ring_exit(q_vector->lrolist); ++ vfree(q_vector->lrolist); ++ q_vector->lrolist = NULL; ++ } ++#endif ++ kfree(q_vector); ++ } ++} ++ ++static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) ++{ ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { ++ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; ++ pci_disable_msix(adapter->pdev); ++ kfree(adapter->msix_entries); ++ adapter->msix_entries = NULL; ++ } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { ++ adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; ++ pci_disable_msi(adapter->pdev); ++ } ++ return; ++} ++ ++/** ++ * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme ++ * @adapter: board private structure to initialize ++ * ++ * We determine which interrupt scheme to use based on... ++ * - Kernel support (MSI, MSI-X) ++ * - which can be user-defined (via MODULE_PARAM) ++ * - Hardware queue count (num_*_queues) ++ * - defined by miscellaneous hardware support/features (RSS, etc.) ++ **/ ++int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) ++{ ++ int err; ++ ++ /* Number of supported queues */ ++ ixgbe_set_num_queues(adapter); ++ ++ err = ixgbe_set_interrupt_capability(adapter); ++ if (err) { ++ DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); ++ goto err_set_interrupt; ++ } ++ ++ err = ixgbe_alloc_q_vectors(adapter); ++ if (err) { ++ DPRINTK(PROBE, ERR, "Unable to allocate memory for queue " ++ "vectors\n"); ++ goto err_alloc_q_vectors; ++ } ++ ++ err = ixgbe_alloc_queues(adapter); ++ if (err) { ++ DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); ++ goto err_alloc_queues; ++ } ++ ++ DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " ++ "Tx Queue count = %u\n", ++ (adapter->num_rx_queues > 1) ? "Enabled" : ++ "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); ++ ++ set_bit(__IXGBE_DOWN, &adapter->state); ++ ++ return 0; ++err_alloc_queues: ++ ixgbe_free_q_vectors(adapter); ++err_alloc_q_vectors: ++ ixgbe_reset_interrupt_capability(adapter); ++err_set_interrupt: ++ return err; ++} ++ ++/** ++ * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings ++ * @adapter: board private structure to clear interrupt scheme on ++ * ++ * We go through and clear interrupt specific resources and reset the structure ++ * to pre-load conditions ++ **/ ++void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ kfree(adapter->tx_ring[i]); ++ adapter->tx_ring[i] = NULL; ++ } ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ kfree(adapter->rx_ring[i]); ++ adapter->rx_ring[i] = NULL; ++ } ++ ++ ixgbe_free_q_vectors(adapter); ++ ixgbe_reset_interrupt_capability(adapter); ++} ++ ++/** ++ * ixgbe_sfp_timer - worker thread to find a missing module ++ * @data: pointer to our adapter struct ++ **/ ++static void ixgbe_sfp_timer(unsigned long data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ ++ /* Do the sfp_timer outside of interrupt context due to the ++ * delays that sfp+ detection requires */ ++ schedule_work(&adapter->sfp_task); ++} ++ ++/** ++ * ixgbe_sfp_task - worker thread to find a missing module ++ * @work: pointer to work_struct containing our data ++ **/ ++static void ixgbe_sfp_task(struct work_struct *work) ++{ ++ struct ixgbe_adapter *adapter = container_of(work, ++ struct ixgbe_adapter, ++ sfp_task); ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ if ((hw->phy.type == ixgbe_phy_nl) && ++ (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { ++ s32 ret = hw->phy.ops.identify_sfp(hw); ++ if (ret && ret != IXGBE_ERR_SFP_NOT_SUPPORTED) ++ goto reschedule; ++ ret = hw->phy.ops.reset(hw); ++ if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { ++ DPRINTK(PROBE, ERR, "failed to initialize because an " ++ "unsupported SFP+ module type was detected.\n" ++ "Reload the driver after installing a " ++ "supported module.\n"); ++ unregister_netdev(adapter->netdev); ++ adapter->netdev_registered = false; ++ } else { ++ DPRINTK(PROBE, INFO, "detected SFP+: %d\n", ++ hw->phy.sfp_type); ++ } ++ /* don't need this routine any more */ ++ clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); ++ } ++ return; ++reschedule: ++ if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state)) ++ mod_timer(&adapter->sfp_timer, ++ round_jiffies(jiffies + (2 * HZ))); ++} ++ ++/** ++ * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) ++ * @adapter: board private structure to initialize ++ * ++ * ixgbe_sw_init initializes the Adapter private data structure. ++ * Fields are initialized based on PCI device information and ++ * OS network device settings (MTU size). ++ **/ ++static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct pci_dev *pdev = adapter->pdev; ++ int err; ++ ++ /* PCI config space info */ ++ ++ hw->vendor_id = pdev->vendor; ++ hw->device_id = pdev->device; ++ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); ++ hw->subsystem_vendor_id = pdev->subsystem_vendor; ++ hw->subsystem_device_id = pdev->subsystem_device; ++ ++ err = ixgbe_init_shared_code(hw); ++ if (err) { ++ DPRINTK(PROBE, ERR, "init_shared_code failed: %d\n", err); ++ goto out; ++ } ++ ++ /* Set capability flags */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ if (hw->device_id == IXGBE_DEV_ID_82598AT) ++ adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++ adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; ++#endif ++ adapter->flags |= IXGBE_FLAG_MSI_CAPABLE; ++ adapter->flags |= IXGBE_FLAG_MSIX_CAPABLE; ++ if (adapter->flags & IXGBE_FLAG_MSIX_CAPABLE) ++ adapter->flags |= IXGBE_FLAG_MQ_CAPABLE; ++ if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE) ++ adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; ++ if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE) ++ adapter->flags |= IXGBE_FLAG_RSS_CAPABLE; ++ if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE) ++ adapter->flags |= IXGBE_FLAG_VMDQ_CAPABLE; ++ adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; ++ adapter->flags &= ~IXGBE_FLAG_SRIOV_CAPABLE; ++ adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598; ++ break; ++ case ixgbe_mac_82599EB: ++#ifndef IXGBE_NO_SMART_SPEED ++ hw->phy.smart_speed = ixgbe_smart_speed_on; ++#else ++ hw->phy.smart_speed = ixgbe_smart_speed_off; ++#endif ++ adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++ adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; ++#endif ++ adapter->flags |= IXGBE_FLAG_MSI_CAPABLE; ++ adapter->flags |= IXGBE_FLAG_MSIX_CAPABLE; ++ if (adapter->flags & IXGBE_FLAG_MSIX_CAPABLE) ++ adapter->flags |= IXGBE_FLAG_MQ_CAPABLE; ++ if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE) ++ adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; ++ if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE) ++ adapter->flags |= IXGBE_FLAG_RSS_CAPABLE; ++ if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE) ++ adapter->flags |= IXGBE_FLAG_VMDQ_CAPABLE; ++#ifdef IXGBE_FCOE ++ if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE) { ++ adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; ++ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; ++ adapter->ring_feature[RING_F_FCOE].indices = 0; ++#ifdef CONFIG_DCB ++ /* Default traffic class to use for FCoE */ ++ adapter->fcoe.tc = IXGBE_FCOE_DEFTC; ++ adapter->fcoe.up = IXGBE_FCOE_DEFTC; ++#endif ++ } ++#endif ++ if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE) ++ adapter->flags |= IXGBE_FLAG_SRIOV_CAPABLE; ++ if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) ++ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; ++#ifdef NETIF_F_NTUPLE ++ /* n-tuple support exists, always init our spinlock */ ++ spin_lock_init(&adapter->fdir_perfect_lock); ++#endif /* NETIF_F_NTUPLE */ ++ adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599; ++ break; ++ default: ++ break; ++ } ++ ++ /* Default DCB settings, if applicable */ ++ adapter->ring_feature[RING_F_DCB].indices = 8; ++ ++ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) { ++ int j, dcb_i; ++ struct tc_configuration *tc; ++ dcb_i = adapter->ring_feature[RING_F_DCB].indices; ++ adapter->dcb_cfg.num_tcs.pg_tcs = dcb_i; ++ adapter->dcb_cfg.num_tcs.pfc_tcs = dcb_i; ++ for (j = 0; j < dcb_i; j++) { ++ tc = &adapter->dcb_cfg.tc_config[j]; ++ tc->path[DCB_TX_CONFIG].bwg_id = 0; ++ tc->path[DCB_TX_CONFIG].bwg_percent = 100 / dcb_i; ++ tc->path[DCB_RX_CONFIG].bwg_id = 0; ++ tc->path[DCB_RX_CONFIG].bwg_percent = 100 / dcb_i; ++ tc->dcb_pfc = pfc_disabled; ++ if (j == 0) { ++ /* total of all TCs bandwidth needs to be 100 */ ++ tc->path[DCB_TX_CONFIG].bwg_percent += 100 % dcb_i; ++ tc->path[DCB_RX_CONFIG].bwg_percent += 100 % dcb_i; ++ } ++ } ++ adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; ++ adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; ++ adapter->dcb_cfg.rx_pba_cfg = pba_equal; ++ adapter->dcb_cfg.pfc_mode_enable = false; ++ ++ adapter->dcb_cfg.round_robin_enable = false; ++ adapter->dcb_set_bitmap = 0x00; ++ ++ } ++ /* XXX does this need to be initialized even w/o DCB? */ ++ ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, ++ adapter->ring_feature[RING_F_DCB].indices); ++ ++ if (hw->mac.type == ixgbe_mac_82599EB) ++ hw->mbx.ops.init_params(hw); ++ ++ /* default flow control settings */ ++ hw->fc.requested_mode = ixgbe_fc_full; ++ hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ ++ ++ adapter->last_lfc_mode = hw->fc.current_mode; ++ hw->fc.high_water = IXGBE_DEFAULT_FCRTH; ++ hw->fc.low_water = IXGBE_DEFAULT_FCRTL; ++ hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; ++ hw->fc.send_xon = true; ++ hw->fc.disable_fc_autoneg = false; ++ ++ /* set defaults for eitr in MegaBytes */ ++ adapter->eitr_low = 10; ++ adapter->eitr_high = 20; ++ ++ /* set default ring sizes */ ++ adapter->tx_ring_count = IXGBE_DEFAULT_TXD; ++ adapter->rx_ring_count = IXGBE_DEFAULT_RXD; ++ ++ /* enable rx csum by default */ ++ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; ++ ++ set_bit(__IXGBE_DOWN, &adapter->state); ++out: ++ return err; ++} ++ ++/** ++ * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) ++ * @tx_ring: tx descriptor ring (for a specific queue) to setup ++ * ++ * Return 0 on success, negative on failure ++ **/ ++int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) ++{ ++ struct device *dev = tx_ring->dev; ++ int size; ++ ++ size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; ++ tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node); ++ if (!tx_ring->tx_buffer_info) ++ tx_ring->tx_buffer_info = vmalloc(size); ++ if (!tx_ring->tx_buffer_info) ++ goto err; ++ memset(tx_ring->tx_buffer_info, 0, size); ++ ++ /* round up to nearest 4K */ ++ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); ++ tx_ring->size = ALIGN(tx_ring->size, 4096); ++ ++ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, ++ &tx_ring->dma, GFP_KERNEL); ++ if (!tx_ring->desc) ++ goto err; ++ ++ tx_ring->next_to_use = 0; ++ tx_ring->next_to_clean = 0; ++ tx_ring->work_limit = tx_ring->count; ++ return 0; ++ ++err: ++ vfree(tx_ring->tx_buffer_info); ++ tx_ring->tx_buffer_info = NULL; ++ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); ++ return -ENOMEM; ++} ++ ++/** ++ * ixgbe_setup_all_tx_resources - allocate all queues Tx resources ++ * @adapter: board private structure ++ * ++ * If this function returns with an error, then it's possible one or ++ * more of the rings is populated (while the rest are not). It is the ++ * callers duty to clean those orphaned rings. ++ * ++ * Return 0 on success, negative on failure ++ **/ ++static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) ++{ ++ int i, err = 0; ++ ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++#ifdef HAVE_DEVICE_NUMA_NODE ++ DPRINTK(TX_ERR, INFO, "tx[%02d] bd: %d - assigning node %d\n", ++ i, adapter->bd_number, adapter->tx_ring[i]->numa_node); ++#endif /* HAVE_DEVICE_NUMA_NODE */ ++ err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); ++ if (!err) ++ continue; ++ DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); ++ break; ++ } ++ ++ return err; ++} ++ ++/** ++ * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) ++ * @rx_ring: rx descriptor ring (for a specific queue) to setup ++ * ++ * Returns 0 on success, negative on failure ++ **/ ++int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) ++{ ++ struct device *dev = rx_ring->dev; ++ int size; ++ ++ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; ++ rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node); ++ if (!rx_ring->rx_buffer_info) ++ rx_ring->rx_buffer_info = vmalloc(size); ++ if (!rx_ring->rx_buffer_info) ++ goto err; ++ memset(rx_ring->rx_buffer_info, 0, size); ++ ++ /* Round up to nearest 4K */ ++ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); ++ rx_ring->size = ALIGN(rx_ring->size, 4096); ++ ++ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, ++ &rx_ring->dma, GFP_KERNEL); ++ ++ if (!rx_ring->desc) ++ goto err; ++ ++ rx_ring->next_to_clean = 0; ++ rx_ring->next_to_use = 0; ++#ifndef CONFIG_IXGBE_NAPI ++ rx_ring->work_limit = rx_ring->count / 2; ++#endif ++ ++ return 0; ++err: ++ vfree(rx_ring->rx_buffer_info); ++ rx_ring->rx_buffer_info = NULL; ++ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); ++ return -ENOMEM; ++} ++ ++/** ++ * ixgbe_setup_all_rx_resources - allocate all queues Rx resources ++ * @adapter: board private structure ++ * ++ * If this function returns with an error, then it's possible one or ++ * more of the rings is populated (while the rest are not). It is the ++ * callers duty to clean those orphaned rings. ++ * ++ * Return 0 on success, negative on failure ++ **/ ++static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) ++{ ++ int i, err = 0; ++ ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++#ifdef HAVE_DEVICE_NUMA_NODE ++ DPRINTK(RX_ERR, INFO, "rx[%02d] bd: %d - assigning node %d\n", ++ i, adapter->bd_number, adapter->rx_ring[i]->numa_node); ++#endif /* HAVE_DEVICE_NUMA_NODE */ ++ err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); ++ if (!err) ++ continue; ++ DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); ++ break; ++ } ++ ++ return err; ++} ++ ++/** ++ * ixgbe_free_tx_resources - Free Tx Resources per Queue ++ * @tx_ring: Tx descriptor ring for a specific queue ++ * ++ * Free all transmit software resources ++ **/ ++void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) ++{ ++ ixgbe_clean_tx_ring(tx_ring); ++ ++ vfree(tx_ring->tx_buffer_info); ++ tx_ring->tx_buffer_info = NULL; ++ ++ /* if not set, then don't free */ ++ if (!tx_ring->desc) ++ return; ++ ++ dma_free_coherent(tx_ring->dev, tx_ring->size, ++ tx_ring->desc, tx_ring->dma); ++ ++ tx_ring->desc = NULL; ++} ++ ++/** ++ * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues ++ * @adapter: board private structure ++ * ++ * Free all transmit software resources ++ **/ ++static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ if (adapter->tx_ring[i]->desc) ++ ixgbe_free_tx_resources(adapter->tx_ring[i]); ++} ++ ++/** ++ * ixgbe_free_rx_resources - Free Rx Resources ++ * @rx_ring: ring to clean the resources from ++ * ++ * Free all receive software resources ++ **/ ++void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) ++{ ++ ixgbe_clean_rx_ring(rx_ring); ++ ++ vfree(rx_ring->rx_buffer_info); ++ rx_ring->rx_buffer_info = NULL; ++ ++ /* if not set, then don't free */ ++ if (!rx_ring->desc) ++ return; ++ ++ dma_free_coherent(rx_ring->dev, rx_ring->size, ++ rx_ring->desc, rx_ring->dma); ++ ++ rx_ring->desc = NULL; ++} ++ ++/** ++ * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues ++ * @adapter: board private structure ++ * ++ * Free all receive software resources ++ **/ ++static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ ++ for (i = 0; i < adapter->num_rx_queues; i++) ++ if (adapter->rx_ring[i]->desc) ++ ixgbe_free_rx_resources(adapter->rx_ring[i]); ++} ++ ++/** ++ * ixgbe_change_mtu - Change the Maximum Transfer Unit ++ * @netdev: network interface device structure ++ * @new_mtu: new value for maximum frame size ++ * ++ * Returns 0 on success, negative on failure ++ **/ ++static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; ++ ++ /* MTU < 68 is an error and causes problems on some kernels */ ++ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { ++ if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) ++ return -EINVAL; ++ } else { ++ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) ++ return -EINVAL; ++ } ++ ++ DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", ++ netdev->mtu, new_mtu); ++ /* must set new MTU before calling down or up */ ++ netdev->mtu = new_mtu; ++ ++ if (netif_running(netdev)) ++ ixgbe_reinit_locked(adapter); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_open - Called when a network interface is made active ++ * @netdev: network interface device structure ++ * ++ * Returns 0 on success, negative value on failure ++ * ++ * The open entry point is called when a network interface is made ++ * active by the system (IFF_UP). At this point all resources needed ++ * for transmit and receive operations are allocated, the interrupt ++ * handler is registered with the OS, the watchdog timer is started, ++ * and the stack is notified that the interface is ready. ++ **/ ++static int ixgbe_open(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ int err; ++ ++ /* disallow open during test */ ++ if (test_bit(__IXGBE_TESTING, &adapter->state)) ++ return -EBUSY; ++ ++ netif_carrier_off(netdev); ++ ++ /* allocate transmit descriptors */ ++ err = ixgbe_setup_all_tx_resources(adapter); ++ if (err) ++ goto err_setup_tx; ++ ++ /* allocate receive descriptors */ ++ err = ixgbe_setup_all_rx_resources(adapter); ++ if (err) ++ goto err_setup_rx; ++ ++ ixgbe_configure(adapter); ++ ++ /* ++ * Map the Tx/Rx rings to the vectors we were allotted. ++ * if request_irq will be called in this function map_rings ++ * must be called *before* up_complete ++ */ ++ ixgbe_map_rings_to_vectors(adapter); ++ ++ err = ixgbe_up_complete(adapter); ++ if (err) ++ goto err_setup_rx; ++ ++ /* clear any pending interrupts, may auto mask */ ++ IXGBE_READ_REG(hw, IXGBE_EICR); ++ ++ err = ixgbe_request_irq(adapter); ++ if (err) ++ goto err_req_irq; ++ ++ ixgbe_irq_enable(adapter, true, true); ++ ++ /* ++ * If this adapter has a fan, check to see if we had a failure ++ * before we enabled the interrupt. ++ */ ++ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { ++ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); ++ if (esdp & IXGBE_ESDP_SDP1) ++ DPRINTK(DRV, CRIT, ++ "Fan has stopped, replace the adapter\n"); ++ } ++ ++ return 0; ++ ++err_req_irq: ++ ixgbe_down(adapter); ++ ixgbe_release_hw_control(adapter); ++ ixgbe_free_irq(adapter); ++err_setup_rx: ++ ixgbe_free_all_rx_resources(adapter); ++err_setup_tx: ++ ixgbe_free_all_tx_resources(adapter); ++ ixgbe_reset(adapter); ++ ++ return err; ++} ++ ++/** ++ * ixgbe_close - Disables a network interface ++ * @netdev: network interface device structure ++ * ++ * Returns 0, this is not allowed to fail ++ * ++ * The close entry point is called when an interface is de-activated ++ * by the OS. The hardware is still under the drivers control, but ++ * needs to be disabled. A global MAC reset is issued to stop the ++ * hardware, and all transmit and receive resources are freed. ++ **/ ++static int ixgbe_close(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ ixgbe_down(adapter); ++ ixgbe_free_irq(adapter); ++ ++ ixgbe_free_all_tx_resources(adapter); ++ ixgbe_free_all_rx_resources(adapter); ++ ++ ixgbe_release_hw_control(adapter); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++static int ixgbe_resume(struct pci_dev *pdev) ++{ ++ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); ++ struct net_device *netdev = adapter->netdev; ++ u32 err; ++ ++ pci_set_power_state(pdev, PCI_D0); ++ pci_restore_state(pdev); ++ /* ++ * pci_restore_state clears dev->state_saved so call ++ * pci_save_state to restore it. ++ */ ++ pci_save_state(pdev); ++ ++ err = pci_enable_device(pdev); ++ if (err) { ++ printk(KERN_ERR "ixgbe: Cannot enable PCI device from " ++ "suspend\n"); ++ return err; ++ } ++ pci_set_master(pdev); ++ ++ pci_wake_from_d3(pdev, false); ++ ++ err = ixgbe_init_interrupt_scheme(adapter); ++ if (err) { ++ printk(KERN_ERR "ixgbe: Cannot initialize interrupts for " ++ "device\n"); ++ return err; ++ } ++ ++ ixgbe_reset(adapter); ++ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); ++ ++ if (netif_running(netdev)) { ++ err = ixgbe_open(netdev); ++ if (err) ++ return err; ++ } ++ ++ netif_device_attach(netdev); ++ ++ return 0; ++} ++#endif /* CONFIG_PM */ ++ ++/* ++ * __ixgbe_shutdown is not used when power management ++ * is disabled on older kernels (<2.6.12). causes a compile ++ * warning/error, because it is defined and not used. ++ */ ++#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) ++static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) ++{ ++ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); ++ struct net_device *netdev = adapter->netdev; ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 ctrl, fctrl; ++ u32 wufc = adapter->wol; ++#ifdef CONFIG_PM ++ int retval = 0; ++#endif ++ ++ netif_device_detach(netdev); ++ ++ if (netif_running(netdev)) { ++ ixgbe_down(adapter); ++ ixgbe_free_irq(adapter); ++ ixgbe_free_all_tx_resources(adapter); ++ ixgbe_free_all_rx_resources(adapter); ++ } ++ ++ ixgbe_clear_interrupt_scheme(adapter); ++ ++#ifdef CONFIG_PM ++ retval = pci_save_state(pdev); ++ if (retval) ++ return retval; ++ ++#endif ++ if (wufc) { ++ ixgbe_set_rx_mode(netdev); ++ ++ /* turn on all-multi mode if wake on multicast is enabled */ ++ if (wufc & IXGBE_WUFC_MC) { ++ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++ fctrl |= IXGBE_FCTRL_MPE; ++ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); ++ } ++ ++ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); ++ ctrl |= IXGBE_CTRL_GIO_DIS; ++ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); ++ } else { ++ IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); ++ IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); ++ } ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ pci_wake_from_d3(pdev, false); ++ break; ++ case ixgbe_mac_82599EB: ++ if (wufc) ++ pci_wake_from_d3(pdev, true); ++ else ++ pci_wake_from_d3(pdev, false); ++ break; ++ default: ++ break; ++ } ++ ++ *enable_wake = !!wufc; ++ ++ ixgbe_release_hw_control(adapter); ++ ++ pci_disable_device(pdev); ++ ++ return 0; ++} ++#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */ ++ ++#ifdef CONFIG_PM ++static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) ++{ ++ int retval; ++ bool wake; ++ ++ retval = __ixgbe_shutdown(pdev, &wake); ++ if (retval) ++ return retval; ++ ++ if (wake) { ++ pci_prepare_to_sleep(pdev); ++ } else { ++ pci_wake_from_d3(pdev, false); ++ pci_set_power_state(pdev, PCI_D3hot); ++ } ++ ++ return 0; ++} ++#endif /* CONFIG_PM */ ++ ++#ifndef USE_REBOOT_NOTIFIER ++static void ixgbe_shutdown(struct pci_dev *pdev) ++{ ++ bool wake; ++ ++ __ixgbe_shutdown(pdev, &wake); ++ ++ if (system_state == SYSTEM_POWER_OFF) { ++ pci_wake_from_d3(pdev, wake); ++ pci_set_power_state(pdev, PCI_D3hot); ++ } ++} ++ ++#endif ++/** ++ * ixgbe_update_stats - Update the board statistics counters. ++ * @adapter: board private structure ++ **/ ++void ixgbe_update_stats(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u64 total_mpc = 0; ++ u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; ++ u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; ++ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; ++ u64 bytes = 0, packets = 0; ++#ifndef IXGBE_NO_LRO ++ u32 flushed = 0, coal = 0, recycled = 0; ++ int num_q_vectors = 1; ++#endif ++ ++ if (test_bit(__IXGBE_DOWN, &adapter->state) || ++ test_bit(__IXGBE_RESETTING, &adapter->state)) ++ return; ++ ++#ifndef IXGBE_NO_LRO ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ++ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ ++#endif ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { ++ u64 rsc_count = 0; ++ u64 rsc_flush = 0; ++ for (i = 0; i < 16; i++) ++ adapter->hw_rx_no_dma_resources += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; ++ rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; ++ } ++ adapter->rsc_total_count = rsc_count; ++ adapter->rsc_total_flush = rsc_flush; ++ } ++ ++#ifndef IXGBE_NO_LRO ++ for (i = 0; i < num_q_vectors; i++) { ++ struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; ++ if (!q_vector || !q_vector->lrolist) ++ continue; ++ flushed += q_vector->lrolist->stats.flushed; ++ coal += q_vector->lrolist->stats.coal; ++ recycled += q_vector->lrolist->stats.recycled; ++ } ++ adapter->lro_stats.flushed = flushed; ++ adapter->lro_stats.coal = coal; ++ adapter->lro_stats.recycled = recycled; ++ ++#endif ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; ++ non_eop_descs += rx_ring->rx_stats.non_eop_descs; ++ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; ++ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; ++ bytes += rx_ring->stats.bytes; ++ packets += rx_ring->stats.packets; ++ ++ } ++ adapter->non_eop_descs = non_eop_descs; ++ adapter->alloc_rx_page_failed = alloc_rx_page_failed; ++ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; ++ adapter->net_stats.rx_bytes = bytes; ++ adapter->net_stats.rx_packets = packets; ++ ++ bytes = 0; ++ packets = 0; ++ /* gather some stats to the adapter struct that are per queue */ ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; ++ restart_queue += tx_ring->tx_stats.restart_queue; ++ tx_busy += tx_ring->tx_stats.tx_busy; ++ bytes += tx_ring->stats.bytes; ++ packets += tx_ring->stats.packets; ++ } ++ adapter->restart_queue = restart_queue; ++ adapter->tx_busy = tx_busy; ++ adapter->net_stats.tx_bytes = bytes; ++ adapter->net_stats.tx_packets = packets; ++ ++ adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); ++ for (i = 0; i < 8; i++) { ++ /* for packet buffers not used, the register should read 0 */ ++ mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); ++ missed_rx += mpc; ++ adapter->stats.mpc[i] += mpc; ++ total_mpc += adapter->stats.mpc[i]; ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); ++ adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); ++ adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); ++ adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); ++ adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, ++ IXGBE_PXONRXC(i)); ++ adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, ++ IXGBE_PXOFFRXC(i)); ++ break; ++ case ixgbe_mac_82599EB: ++ adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, ++ IXGBE_PXONRXCNT(i)); ++ adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, ++ IXGBE_PXOFFRXCNT(i)); ++ break; ++ default: ++ break; ++ } ++ adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw, ++ IXGBE_PXONTXC(i)); ++ adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw, ++ IXGBE_PXOFFTXC(i)); ++ } ++ adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); ++ /* work around hardware counting issue */ ++ adapter->stats.gprc -= missed_rx; ++ ++ /* 82598 hardware only has a 32 bit counter in the high register */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); ++ adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); ++ adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); ++ adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); ++ adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); ++ break; ++ case ixgbe_mac_82599EB: ++ adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); ++ IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ ++ adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); ++ IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ ++ adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); ++ IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ ++ adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); ++ adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); ++#ifdef HAVE_TX_MQ ++ adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); ++ adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); ++#endif /* HAVE_TX_MQ */ ++#ifdef IXGBE_FCOE ++ adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); ++ adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); ++ adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); ++ adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); ++ adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); ++ adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); ++ adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); ++#endif /* IXGBE_FCOE */ ++ break; ++ default: ++ break; ++ } ++ bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); ++ adapter->stats.bprc += bprc; ++ adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ adapter->stats.mprc -= bprc; ++ adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); ++ adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); ++ adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); ++ adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); ++ adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); ++ adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); ++ adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); ++ adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); ++ lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); ++ adapter->stats.lxontxc += lxon; ++ lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); ++ adapter->stats.lxofftxc += lxoff; ++ adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); ++ adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); ++ adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); ++ /* ++ * 82598 errata - tx of flow control packets is included in tx counters ++ */ ++ xon_off_tot = lxon + lxoff; ++ adapter->stats.gptc -= xon_off_tot; ++ adapter->stats.mptc -= xon_off_tot; ++ adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); ++ adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); ++ adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); ++ adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); ++ adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); ++ adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); ++ adapter->stats.ptc64 -= xon_off_tot; ++ adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); ++ adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); ++ adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); ++ adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); ++ adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); ++ adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); ++ ++ /* Fill out the OS statistics structure */ ++ adapter->net_stats.multicast = adapter->stats.mprc; ++ ++ /* Rx Errors */ ++ adapter->net_stats.rx_errors = adapter->stats.crcerrs + ++ adapter->stats.rlec; ++ adapter->net_stats.rx_dropped = 0; ++ adapter->net_stats.rx_length_errors = adapter->stats.rlec; ++ adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; ++ adapter->net_stats.rx_missed_errors = total_mpc; ++ ++ /* ++ * VF Stats Collection - skip while resetting because these ++ * are not clear on read and otherwise you'll sometimes get ++ * crazy values. ++ */ ++ if (!test_bit(__IXGBE_RESETTING, &adapter->state)) { ++ for(i = 0; i < adapter->num_vfs; i++) { ++ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i), \ ++ adapter->vfinfo[i].last_vfstats.gprc, \ ++ adapter->vfinfo[i].vfstats.gprc); ++ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i), \ ++ adapter->vfinfo[i].last_vfstats.gptc, \ ++ adapter->vfinfo[i].vfstats.gptc); ++ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i), \ ++ IXGBE_PVFGORC_MSB(i), \ ++ adapter->vfinfo[i].last_vfstats.gorc, \ ++ adapter->vfinfo[i].vfstats.gorc); ++ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i), \ ++ IXGBE_PVFGOTC_MSB(i), \ ++ adapter->vfinfo[i].last_vfstats.gotc, \ ++ adapter->vfinfo[i].vfstats.gotc); ++ UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i), \ ++ adapter->vfinfo[i].last_vfstats.mprc, \ ++ adapter->vfinfo[i].vfstats.mprc); ++ } ++ } ++} ++ ++/** ++ * ixgbe_watchdog - Timer Call-back ++ * @data: pointer to adapter cast into an unsigned long ++ **/ ++static void ixgbe_watchdog(unsigned long data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct ixgbe_hw *hw = &adapter->hw; ++ u64 eics = 0; ++ int i; ++ ++ /* if interface is down do nothing */ ++ if (test_bit(__IXGBE_DOWN, &adapter->state)) ++ return; ++ ++ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { ++ /* ++ * for legacy and MSI interrupts don't set any bits ++ * that are enabled for EIAM, because this operation ++ * would set *both* EIMS and EICS for any bit in EIAM ++ */ ++ IXGBE_WRITE_REG(hw, IXGBE_EICS, ++ (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); ++ } else { ++ /* get one bit for every active tx/rx interrupt vector */ ++ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { ++ struct ixgbe_q_vector *qv = adapter->q_vector[i]; ++ if (qv->rxr_count || qv->txr_count) ++ eics |= ((u64)1 << i); ++ } ++ } ++ ++ /* Cause software interrupt to ensure rings are cleaned */ ++ ixgbe_irq_rearm_queues(adapter, eics); ++ ++ /* Reset the timer */ ++ mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); ++ ++ schedule_work(&adapter->watchdog_task); ++} ++ ++/** ++ * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber ++ * @work: pointer to work_struct containing our data ++ **/ ++static void ixgbe_multispeed_fiber_task(struct work_struct *work) ++{ ++ struct ixgbe_adapter *adapter = container_of(work, ++ struct ixgbe_adapter, ++ multispeed_fiber_task); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 autoneg; ++ bool negotiation; ++ ++ adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; ++ autoneg = hw->phy.autoneg_advertised; ++ if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) ++ hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); ++ hw->mac.autotry_restart = false; ++ if (hw->mac.ops.setup_link) ++ hw->mac.ops.setup_link(hw, autoneg, negotiation, true); ++ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; ++ adapter->link_check_timeout = jiffies; ++ adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK; ++} ++ ++/** ++ * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module ++ * @work: pointer to work_struct containing our data ++ **/ ++static void ixgbe_sfp_config_module_task(struct work_struct *work) ++{ ++ struct ixgbe_adapter *adapter = container_of(work, ++ struct ixgbe_adapter, ++ sfp_config_module_task); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 err; ++ ++ adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; ++ err = hw->phy.ops.identify_sfp(hw); ++ if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { ++ DPRINTK(PROBE, ERR, "failed to load because an " ++ "unsupported SFP+ module type was detected.\n"); ++ unregister_netdev(adapter->netdev); ++ adapter->netdev_registered = false; ++ return; ++ } ++ /* ++ * A module may be identified correctly, but the EEPROM may not have ++ * support for that module. setup_sfp() will fail in that case, so ++ * we should not allow that module to load. ++ */ ++ err = hw->mac.ops.setup_sfp(hw); ++ if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { ++ DPRINTK(PROBE, ERR, "failed to load because an " ++ "unsupported SFP+ module type was detected.\n"); ++ unregister_netdev(adapter->netdev); ++ adapter->netdev_registered = false; ++ return; ++ } ++ ++ if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) ++ /* This will also work for DA Twinax connections */ ++ schedule_work(&adapter->multispeed_fiber_task); ++ adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK; ++} ++ ++#ifdef HAVE_TX_MQ ++/** ++ * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table ++ * @work: pointer to work_struct containing our data ++ **/ ++static void ixgbe_fdir_reinit_task(struct work_struct *work) ++{ ++ struct ixgbe_adapter *adapter = container_of(work, ++ struct ixgbe_adapter, ++ fdir_reinit_task); ++ struct ixgbe_hw *hw = &adapter->hw; ++ int i; ++ ++ /* if interface is down do nothing */ ++ if (test_bit(__IXGBE_DOWN, &adapter->state)) ++ return; ++ ++ if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ set_bit(__IXGBE_TX_FDIR_INIT_DONE, ++ &(adapter->tx_ring[i]->state)); ++ } else { ++ DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " ++ "ignored adding FDIR ATR filters \n"); ++ } ++ /* Done FDIR Re-initialization, enable transmits */ ++ netif_tx_start_all_queues(adapter->netdev); ++} ++ ++#endif /* HAVE_TX_MQ */ ++/** ++ * ixgbe_watchdog_task - worker thread to bring link up ++ * @work: pointer to work_struct containing our data ++ **/ ++static void ixgbe_watchdog_task(struct work_struct *work) ++{ ++ struct ixgbe_adapter *adapter = container_of(work, ++ struct ixgbe_adapter, ++ watchdog_task); ++ struct net_device *netdev = adapter->netdev; ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 link_speed = adapter->link_speed; ++ bool link_up = adapter->link_up; ++ int i; ++ struct ixgbe_ring *tx_ring; ++ int some_tx_pending = 0; ++ ++ /* if interface is down do nothing */ ++ if (test_bit(__IXGBE_DOWN, &adapter->state)) ++ return; ++ ++ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { ++ if (hw->mac.ops.check_link) { ++ hw->mac.ops.check_link(hw, &link_speed, &link_up, false); ++ } else { ++ /* always assume link is up, if no check link function */ ++ link_speed = IXGBE_LINK_SPEED_10GB_FULL; ++ link_up = true; ++ } ++ if (link_up) { ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) ++ hw->mac.ops.fc_enable(hw, i); ++ } else { ++ hw->mac.ops.fc_enable(hw, 0); ++ } ++ } ++ ++ if (link_up || ++ time_after(jiffies, (adapter->link_check_timeout + ++ IXGBE_TRY_LINK_TIMEOUT))) { ++ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; ++ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); ++ IXGBE_WRITE_FLUSH(hw); ++ } ++ adapter->link_up = link_up; ++ adapter->link_speed = link_speed; ++ } ++ ++ if (link_up) { ++ if (!netif_carrier_ok(netdev)) { ++ bool flow_rx, flow_tx; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: { ++ u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++ u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); ++ flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); ++ flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); ++ } ++ break; ++ case ixgbe_mac_82599EB: { ++ u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); ++ u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); ++ flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); ++ flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); ++ } ++ break; ++ default: ++ flow_tx = false; ++ flow_rx = false; ++ break; ++ } ++ DPRINTK(LINK, INFO, "NIC Link is Up %s, " ++ "Flow Control: %s\n", ++ (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? ++ "10 Gbps" : ++ (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? ++ "1 Gbps" : "unknown speed")), ++ ((flow_rx && flow_tx) ? "RX/TX" : ++ (flow_rx ? "RX" : ++ (flow_tx ? "TX" : "None")))); ++ ++ netif_carrier_on(netdev); ++ netif_tx_wake_all_queues(netdev); ++ } else { ++ /* Force detection of hung controller */ ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ tx_ring = adapter->tx_ring[i]; ++ set_check_for_tx_hang(tx_ring); ++ } ++ } ++ } else { ++ adapter->link_up = false; ++ adapter->link_speed = 0; ++ if (netif_carrier_ok(netdev)) { ++ DPRINTK(LINK, INFO, "NIC Link is Down\n"); ++ netif_carrier_off(netdev); ++ netif_tx_stop_all_queues(netdev); ++ } ++ } ++ ++ if (!netif_carrier_ok(netdev)) { ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ tx_ring = adapter->tx_ring[i]; ++ if (tx_ring->next_to_use != tx_ring->next_to_clean) { ++ some_tx_pending = 1; ++ break; ++ } ++ } ++ ++ if (some_tx_pending) { ++ /* We've lost link, so the controller stops DMA, ++ * but we've got queued Tx work that's never going ++ * to get done, so reset controller to flush Tx. ++ * (Do the reset outside of interrupt context). ++ */ ++ schedule_work(&adapter->reset_task); ++ } ++ } ++ ++ ixgbe_update_stats(adapter); ++ ++ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { ++ /* poll faster when waiting for link */ ++ mod_timer(&adapter->watchdog_timer, jiffies + (HZ/10)); ++ } ++ ++} ++ ++void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, ++ u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) ++{ ++ struct ixgbe_adv_tx_context_desc *context_desc; ++ struct ixgbe_tx_buffer *tx_buffer_info; ++ u16 i = tx_ring->next_to_use; ++ ++ tx_buffer_info = &tx_ring->tx_buffer_info[i]; ++ tx_buffer_info->time_stamp = jiffies; ++ tx_buffer_info->next_to_watch = i; ++ ++ context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); ++ ++ i++; ++ if (i == tx_ring->count) ++ tx_ring->next_to_use = 0; ++ else ++ tx_ring->next_to_use = i; ++ ++ /* set bits to identify this as an advanced context descriptor */ ++ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; ++ ++ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); ++ context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); ++ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); ++ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); ++} ++ ++static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, ++ u32 tx_flags, u8 *hdr_len) ++{ ++#ifdef NETIF_F_TSO ++ int err; ++ u32 vlan_macip_lens, type_tucmd; ++ u32 mss_l4len_idx, l4len; ++ ++ if (!skb_is_gso(skb)) ++#endif /* NETIF_F_TSO */ ++ return 0; ++#ifdef NETIF_F_TSO ++ ++ if (skb_header_cloned(skb)) { ++ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); ++ if (err) ++ return err; ++ } ++ ++ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ ++ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ++ ++ if (skb->protocol == __constant_htons(ETH_P_IP)) { ++ struct iphdr *iph = ip_hdr(skb); ++ iph->tot_len = 0; ++ iph->check = 0; ++ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, ++ iph->daddr, 0, ++ IPPROTO_TCP, ++ 0); ++ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ++#ifdef NETIF_F_TSO6 ++ } else if (skb_is_gso_v6(skb)) { ++ ipv6_hdr(skb)->payload_len = 0; ++ tcp_hdr(skb)->check = ++ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, ++ &ipv6_hdr(skb)->daddr, ++ 0, IPPROTO_TCP, 0); ++#endif ++ } ++ ++ l4len = tcp_hdrlen(skb); ++ *hdr_len = skb_transport_offset(skb) + l4len; ++ ++ /* mss_l4len_id: use 1 as index for TSO */ ++ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; ++ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; ++ mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT); ++ ++ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ ++ vlan_macip_lens = skb_network_header_len(skb); ++ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; ++ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ++ ++ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, ++ mss_l4len_idx); ++ ++ return 1; ++#endif ++} ++ ++static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, ++ struct sk_buff *skb, u32 tx_flags) ++{ ++ u32 vlan_macip_lens = 0, type_tucmd = 0; ++ ++ if (skb->ip_summed != CHECKSUM_PARTIAL) { ++ if (!(tx_flags & IXGBE_TX_FLAGS_VLAN)) ++ return false; ++ } else { ++ __be16 protocol = skb->protocol; ++ ++#ifdef NETIF_F_HW_VLAN_TX ++ if (skb->protocol == __constant_htons(ETH_P_8021Q)) { ++ const struct vlan_ethhdr *vhdr = ++ (const struct vlan_ethhdr *)skb->data; ++ ++ protocol = vhdr->h_vlan_encapsulated_proto; ++ } else { ++ protocol = skb->protocol; ++ } ++ ++#endif ++ switch (protocol) { ++ case __constant_htons(ETH_P_IP): ++ if (ip_hdr(skb)->protocol == IPPROTO_TCP) ++ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP | ++ IXGBE_ADVTXD_TUCMD_IPV4; ++ else ++ type_tucmd = IXGBE_ADVTXD_TUCMD_IPV4; ++ break; ++#ifdef NETIF_F_IPV6_CSUM ++ case __constant_htons(ETH_P_IPV6): ++ /* XXX what about other V6 headers?? */ ++ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) ++ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ++ break; ++#endif ++ default: ++ if (unlikely(net_ratelimit())) { ++ dev_warn(tx_ring->dev, ++ "partial checksum but proto=%x!\n", ++ skb->protocol); ++ } ++ break; ++ } ++ vlan_macip_lens = skb_network_header_len(skb); ++ } ++ ++ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; ++ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ++ ++ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); ++ ++ return (skb->ip_summed == CHECKSUM_PARTIAL); ++} ++ ++static u16 ixgbe_tx_map(struct ixgbe_ring *tx_ring, ++ struct sk_buff *skb, u32 tx_flags, ++ unsigned int first, const u8 hdr_len) ++{ ++ struct device *dev = tx_ring->dev; ++ struct ixgbe_tx_buffer *tx_buffer_info; ++#ifdef MAX_SKB_FRAGS ++ unsigned int nr_frags = skb_shinfo(skb)->nr_frags; ++ unsigned int f = 0; ++ unsigned int data_len = skb->data_len; ++#endif ++ unsigned int len = skb_headlen(skb), bytecount = skb->len; ++ u32 offset = 0, size; ++ u16 gso_segs = 1; ++ u16 i = tx_ring->next_to_use; ++ u16 count = 0; ++ ++#ifdef IXGBE_FCOE ++ if (tx_flags & IXGBE_TX_FLAGS_FCOE) { ++ if (data_len >= sizeof(struct fcoe_crc_eof)) { ++ data_len -= sizeof(struct fcoe_crc_eof); ++ } else { ++ len -= sizeof(struct fcoe_crc_eof) - data_len; ++ data_len = 0; ++ } ++ } ++ ++#endif ++ while (len) { ++ tx_buffer_info = &tx_ring->tx_buffer_info[i]; ++ size = min_t(unsigned int, len, IXGBE_MAX_DATA_PER_TXD); ++ ++ tx_buffer_info->length = size; ++ tx_buffer_info->mapped_as_page = false; ++ tx_buffer_info->dma = dma_map_single(dev, ++ skb->data + offset, ++ size, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, tx_buffer_info->dma)) ++ goto dma_error; ++ tx_buffer_info->time_stamp = jiffies; ++ tx_buffer_info->next_to_watch = i; ++ ++ len -= size; ++ offset += size; ++ count++; ++ i++; ++ if (i == tx_ring->count) ++ i = 0; ++ } ++ ++#ifdef MAX_SKB_FRAGS ++ while (data_len && (f < nr_frags)) { ++ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f]; ++ len = min_t(unsigned int, data_len, frag->size); ++ ++ offset = frag->page_offset; ++ ++ while (len) { ++ tx_buffer_info = &tx_ring->tx_buffer_info[i]; ++ size = min_t(unsigned int, len, IXGBE_MAX_DATA_PER_TXD); ++ ++ tx_buffer_info->length = size; ++ tx_buffer_info->mapped_as_page = true; ++ tx_buffer_info->dma = dma_map_page(dev, ++ frag->page, ++ offset, size, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, tx_buffer_info->dma)) ++ goto dma_error; ++ tx_buffer_info->time_stamp = jiffies; ++ tx_buffer_info->next_to_watch = i; ++ ++ len -= size; ++ data_len -= size; ++ offset += size; ++ count++; ++ i++; ++ if (i == tx_ring->count) ++ i = 0; ++ } ++ f++; ++ } ++ ++#endif ++ if (i == 0) ++ i = tx_ring->count; ++ i--; ++ ++#ifdef NETIF_F_TSO ++ if (tx_flags & IXGBE_TX_FLAGS_TSO) ++ gso_segs = skb_shinfo(skb)->gso_segs; ++#ifdef IXGBE_FCOE ++ /* adjust for FCoE Sequence Offload */ ++ else if (tx_flags & IXGBE_TX_FLAGS_FSO) ++ gso_segs = DIV_ROUND_UP(skb->len - hdr_len, ++ skb_shinfo(skb)->gso_size); ++#endif /* IXGBE_FCOE */ ++#endif ++ bytecount += ((gso_segs - 1) * hdr_len); ++ ++ /* multiply data chunks by size of headers */ ++ tx_ring->tx_buffer_info[i].bytecount = bytecount; ++ tx_ring->tx_buffer_info[i].gso_segs = gso_segs; ++ tx_ring->tx_buffer_info[i].skb = skb; ++ tx_ring->tx_buffer_info[first].next_to_watch = i; ++ ++ return count; ++ ++dma_error: ++ dev_err(dev, "TX DMA map failed\n"); ++ ++ /* clear timestamp and dma mappings for failed tx_buffer_info map */ ++ tx_buffer_info->dma = 0; ++ tx_buffer_info->time_stamp = 0; ++ tx_buffer_info->next_to_watch = 0; ++ ++ /* clear timestamp and dma mappings for remaining portion of packet */ ++ for (; count > 0; count--) { ++ if (i == 0) ++ i = tx_ring->count; ++ i--; ++ tx_buffer_info = &tx_ring->tx_buffer_info[i]; ++ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); ++ } ++ ++ return 0; ++} ++ ++#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ ++ IXGBE_TXD_CMD_RS | \ ++ IXGBE_TXD_CMD_IFCS) ++ ++static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, int tx_flags, ++ int count, u32 paylen, const u8 hdr_len) ++{ ++ union ixgbe_adv_tx_desc *tx_desc = NULL; ++ struct ixgbe_tx_buffer *tx_buffer_info; ++ u32 olinfo_status = 0, cmd_type_len; ++ u16 i; ++ ++ cmd_type_len = IXGBE_ADVTXD_DTYP_DATA | ++ IXGBE_ADVTXD_DCMD_IFCS | ++ IXGBE_ADVTXD_DCMD_DEXT; ++ ++ if (tx_flags & IXGBE_TX_FLAGS_VLAN) ++ cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; ++ ++ if (tx_flags & IXGBE_TX_FLAGS_TSO) { ++ cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; ++ ++ olinfo_status |= IXGBE_TXD_POPTS_TXSM << ++ IXGBE_ADVTXD_POPTS_SHIFT; ++ ++ /* use index 1 context for tso */ ++ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); ++ if (tx_flags & IXGBE_TX_FLAGS_IPV4) ++ olinfo_status |= IXGBE_TXD_POPTS_IXSM << ++ IXGBE_ADVTXD_POPTS_SHIFT; ++ ++ } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) { ++ olinfo_status |= IXGBE_TXD_POPTS_TXSM << ++ IXGBE_ADVTXD_POPTS_SHIFT; ++ ++#ifdef IXGBE_FCOE ++ } else if (tx_flags & IXGBE_TX_FLAGS_FCOE) { ++ olinfo_status |= IXGBE_ADVTXD_CC; ++ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); ++ if (tx_flags & IXGBE_TX_FLAGS_FSO) ++ cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; ++#endif /* IXGBE_FCOE */ ++ } ++ ++ olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); ++ ++ i = tx_ring->next_to_use; ++ while (count--) { ++ tx_buffer_info = &tx_ring->tx_buffer_info[i]; ++ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); ++ tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); ++ tx_desc->read.cmd_type_len = ++ cpu_to_le32(cmd_type_len | tx_buffer_info->length); ++ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); ++ i++; ++ if (i == tx_ring->count) ++ i = 0; ++ } ++ tx_ring->next_to_use = i; ++ ++ tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD); ++ ++ /* ++ * Force memory writes to complete before letting h/w ++ * know there are new descriptors to fetch. (Only ++ * applicable for weak-ordered memory model archs, ++ * such as IA-64). ++ */ ++ wmb(); ++ ++ writel(i, tx_ring->tail); ++} ++ ++static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, ++ u8 queue, u32 tx_flags) ++{ ++ struct ethhdr *eth = (struct ethhdr *)skb->data; ++ struct iphdr *iph = ip_hdr(skb); ++ struct ixgbe_atr_input atr_input; ++ u16 vlan_id, src_port, dst_port; ++ u8 l4type = 0; ++ ++ /* Right now, we support IPv4 only */ ++ if (skb->protocol != htons(ETH_P_IP)) ++ return; ++ ++ /* check if we're UDP or TCP */ ++ if (iph->protocol == IPPROTO_TCP) { ++ struct tcphdr *th = tcp_hdr(skb); ++ src_port = th->source; ++ dst_port = th->dest; ++ l4type |= IXGBE_ATR_L4TYPE_TCP; ++ /* l4type IPv4 type is 0, no need to assign */ ++ } else { ++ /* Unsupported L4 header, just bail here */ ++ return; ++ } ++ ++ memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); ++ ++ vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> ++ IXGBE_TX_FLAGS_VLAN_SHIFT; ++ ++ ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); ++ ixgbe_atr_set_src_port_82599(&atr_input, dst_port); ++ ixgbe_atr_set_dst_port_82599(&atr_input, src_port); ++ ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto); ++ ixgbe_atr_set_l4type_82599(&atr_input, l4type); ++ /* src and dst are inverted, think how the receiver sees them */ ++ ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr); ++ ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr); ++ ++ /* This assumes the Rx queue and Tx queue are bound to the same CPU */ ++ ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); ++} ++ ++static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) ++{ ++ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); ++ /* Herbert's original patch had: ++ * smp_mb__after_netif_stop_queue(); ++ * but since that doesn't exist yet, just open code it. */ ++ smp_mb(); ++ ++ /* We need to check again in a case another CPU has just ++ * made room available. */ ++ if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) ++ return -EBUSY; ++ ++ /* A reprieve! - use start_queue because it doesn't call schedule */ ++ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); ++ ++tx_ring->tx_stats.restart_queue; ++ return 0; ++} ++ ++static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) ++{ ++ if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) ++ return 0; ++ return __ixgbe_maybe_stop_tx(tx_ring, size); ++} ++ ++netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, ++ struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *tx_ring) ++{ ++ struct net_device *netdev = tx_ring->netdev; ++ int tso; ++ int count = 0, tx_map_count = 0; ++#ifdef MAX_SKB_FRAGS ++#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD ++ unsigned int f; ++#endif ++#endif ++ u32 tx_flags = 0; ++ u16 first; ++ u8 hdr_len = 0; ++ ++ /* ++ * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, ++ * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD, ++ * + 2 desc gap to keep tail from touching head, ++ * + 1 desc for context descriptor, ++ * otherwise try next time ++ */ ++#ifdef MAX_SKB_FRAGS ++#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD ++ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) ++ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); ++#else ++ count += skb_shinfo(skb)->nr_frags; ++#endif ++#endif ++ count += TXD_USE_COUNT(skb_headlen(skb)); ++ if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { ++ tx_ring->tx_stats.tx_busy++; ++ return NETDEV_TX_BUSY; ++ } ++ ++#ifdef NETIF_F_HW_VLAN_TX ++ if (adapter->vlgrp && vlan_tx_tag_present(skb)) { ++ tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; ++ tx_flags |= IXGBE_TX_FLAGS_VLAN; ++ } ++ ++#endif ++#ifdef HAVE_TX_MQ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; ++#ifdef IXGBE_FCOE ++ /* for FCoE with DCB, we force the priority to what ++ * was specified by the switch */ ++ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && ++ ((skb->protocol == htons(ETH_P_FCOE)) || ++ (skb->protocol == htons(ETH_P_FIP)))) ++ tx_flags |= adapter->fcoe.up << ++ IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; ++ else ++#endif /* IXGBE_FCOE */ ++ tx_flags |= skb->queue_mapping << ++ IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; ++ tx_flags |= IXGBE_TX_FLAGS_VLAN; ++ } ++ ++#endif /* HAVE_TX_MQ */ ++ first = tx_ring->next_to_use; ++ ++ if (skb->protocol == htons(ETH_P_IP)) ++ tx_flags |= IXGBE_TX_FLAGS_IPV4; ++ ++#ifdef IXGBE_FCOE ++ /* setup tx offload for FCoE */ ++ else if (skb->protocol == htons(ETH_P_FCOE)) { ++ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { ++ tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len); ++ if (tso < 0) { ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; ++ } ++ if (tso) ++ tx_flags |= IXGBE_TX_FLAGS_FSO | ++ IXGBE_TX_FLAGS_FCOE; ++ else ++ tx_flags |= IXGBE_TX_FLAGS_FCOE; ++ ++ goto xmit_fcoe; ++ } ++ } ++ ++#endif /* IXGBE_FCOE */ ++ if ((tso = ixgbe_tso(tx_ring, skb, tx_flags, &hdr_len))) ++ tx_flags |= IXGBE_TX_FLAGS_TSO; ++ else if (ixgbe_tx_csum(tx_ring, skb, tx_flags)) ++ tx_flags |= IXGBE_TX_FLAGS_CSUM; ++ ++ if (tso < 0) { ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; ++ } ++ ++ /* add the ATR filter if ATR is on */ ++ if (tx_ring->atr_sample_rate) { ++ ++tx_ring->atr_count; ++ if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && ++ test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) { ++ ixgbe_atr(adapter, skb, tx_ring->queue_index, tx_flags); ++ tx_ring->atr_count = 0; ++ } ++ } ++ ++#ifdef IXGBE_FCOE ++xmit_fcoe: ++#endif /* IXGBE_FCOE */ ++ tx_map_count = ixgbe_tx_map(tx_ring, skb, tx_flags, first, hdr_len); ++ if (!tx_map_count) { ++ /* handle dma mapping errors in ixgbe_tx_map */ ++ dev_kfree_skb_any(skb); ++ tx_ring->next_to_use = first; ++ return NETDEV_TX_OK; ++ } ++ ++ ixgbe_tx_queue(tx_ring, tx_flags, tx_map_count, skb->len, hdr_len); ++ ++ netdev->trans_start = jiffies; ++ ++ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); ++ ++ return NETDEV_TX_OK; ++} ++ ++static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_ring *tx_ring; ++ ++#ifdef HAVE_TX_MQ ++ tx_ring = adapter->tx_ring[skb->queue_mapping]; ++#else ++ tx_ring = adapter->tx_ring[0]; ++#endif ++ return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); ++} ++ ++/** ++ * ixgbe_get_stats - Get System Network Statistics ++ * @netdev: network interface device structure ++ * ++ * Returns the address of the device statistics structure. ++ * The statistics are actually updated from the timer callback. ++ **/ ++static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ /* only return the current stats */ ++ return &adapter->net_stats; ++} ++ ++/** ++ * ixgbe_set_mac - Change the Ethernet Address of the NIC ++ * @netdev: network interface device structure ++ * @p: pointer to an address structure ++ * ++ * Returns 0 on success, negative on failure ++ **/ ++static int ixgbe_set_mac(struct net_device *netdev, void *p) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct sockaddr *addr = p; ++ ++ if (!is_valid_ether_addr(addr->sa_data)) ++ return -EADDRNOTAVAIL; ++ ++ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); ++ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); ++ ++ if (hw->mac.ops.set_rar) ++ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); ++ ++ return 0; ++} ++ ++#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) ++/** ++ * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding ++ * netdev->dev_addr_list ++ * @netdev: network interface device structure ++ * ++ * Returns non-zero on failure ++ **/ ++static int ixgbe_add_sanmac_netdev(struct net_device *dev) ++{ ++ int err = 0; ++ struct ixgbe_adapter *adapter = netdev_priv(dev); ++ struct ixgbe_mac_info *mac = &adapter->hw.mac; ++ ++ if (is_valid_ether_addr(mac->san_addr)) { ++ rtnl_lock(); ++ err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); ++ rtnl_unlock(); ++ } ++ return err; ++} ++ ++/** ++ * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding ++ * netdev->dev_addr_list ++ * @netdev: network interface device structure ++ * ++ * Returns non-zero on failure ++ **/ ++static int ixgbe_del_sanmac_netdev(struct net_device *dev) ++{ ++ int err = 0; ++ struct ixgbe_adapter *adapter = netdev_priv(dev); ++ struct ixgbe_mac_info *mac = &adapter->hw.mac; ++ ++ if (is_valid_ether_addr(mac->san_addr)) { ++ rtnl_lock(); ++ err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); ++ rtnl_unlock(); ++ } ++ return err; ++} ++ ++#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) */ ++#ifdef ETHTOOL_OPS_COMPAT ++/** ++ * ixgbe_ioctl - ++ * @netdev: ++ * @ifreq: ++ * @cmd: ++ **/ ++static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) ++{ ++ switch (cmd) { ++ case SIOCETHTOOL: ++ return ethtool_ioctl(ifr); ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++#endif ++#ifdef CONFIG_NET_POLL_CONTROLLER ++/* ++ * Polling 'interrupt' - used by things like netconsole to send skbs ++ * without having to re-enable interrupts. It's not called while ++ * the interrupt routine is executing. ++ */ ++static void ixgbe_netpoll(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ int i; ++ ++ /* if interface is down do nothing */ ++ if (test_bit(__IXGBE_DOWN, &adapter->state)) ++ return; ++ ++#ifndef CONFIG_IXGBE_NAPI ++ ixgbe_irq_disable(adapter); ++#endif ++ adapter->flags |= IXGBE_FLAG_IN_NETPOLL; ++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { ++ int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; ++ for (i = 0; i < num_q_vectors; i++) { ++ struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; ++ ixgbe_msix_clean_many(0, q_vector); ++ } ++ } else { ++ ixgbe_intr(adapter->pdev->irq, netdev); ++ } ++ adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; ++#ifndef CONFIG_IXGBE_NAPI ++ ixgbe_irq_enable(adapter, true, true); ++#endif ++} ++ ++#endif ++#ifdef HAVE_NETDEV_SELECT_QUEUE ++static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(dev); ++ int txq = smp_processor_id(); ++ ++#ifdef IXGBE_FCOE ++ if ((skb->protocol == htons(ETH_P_FCOE)) || ++ (skb->protocol == htons(ETH_P_FIP))) { ++ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { ++ txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); ++ txq += adapter->ring_feature[RING_F_FCOE].mask; ++ return txq; ++ } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ txq = adapter->fcoe.up; ++ return txq; ++ } ++ } ++ ++#endif /* IXGBE_FCOE */ ++ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { ++ while (unlikely(txq >= dev->real_num_tx_queues)) ++ txq -= dev->real_num_tx_queues; ++ return txq; ++ } ++ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ if (skb->priority == TC_PRIO_CONTROL) ++ txq = adapter->ring_feature[RING_F_DCB].indices - 1; ++ else ++ txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) ++ >> 13; ++ return txq; ++ } ++ return skb_tx_hash(dev, skb); ++} ++ ++#endif /* HAVE_NETDEV_SELECT_QUEUE */ ++#ifdef HAVE_NET_DEVICE_OPS ++static const struct net_device_ops ixgbe_netdev_ops = { ++ .ndo_open = &ixgbe_open, ++ .ndo_stop = &ixgbe_close, ++ .ndo_start_xmit = &ixgbe_xmit_frame, ++ .ndo_get_stats = &ixgbe_get_stats, ++ .ndo_set_rx_mode = &ixgbe_set_rx_mode, ++ .ndo_set_multicast_list = &ixgbe_set_rx_mode, ++ .ndo_validate_addr = eth_validate_addr, ++ .ndo_set_mac_address = &ixgbe_set_mac, ++ .ndo_change_mtu = &ixgbe_change_mtu, ++#ifdef ETHTOOL_OPS_COMPAT ++ .ndo_do_ioctl = &ixgbe_ioctl, ++#endif ++ .ndo_tx_timeout = &ixgbe_tx_timeout, ++ .ndo_vlan_rx_register = &ixgbe_vlan_rx_register, ++ .ndo_vlan_rx_add_vid = &ixgbe_vlan_rx_add_vid, ++ .ndo_vlan_rx_kill_vid = &ixgbe_vlan_rx_kill_vid, ++#ifdef HAVE_IPLINK_VF_CONFIG ++ .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, ++ .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, ++ .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, ++ .ndo_get_vf_config = ixgbe_ndo_get_vf_config, ++#endif ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ .ndo_poll_controller = &ixgbe_netpoll, ++#endif ++ .ndo_select_queue = &ixgbe_select_queue, ++#ifdef IXGBE_FCOE ++ .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, ++ .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, ++#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE ++ .ndo_fcoe_enable = ixgbe_fcoe_enable, ++ .ndo_fcoe_disable = ixgbe_fcoe_disable, ++#endif ++#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN ++ .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, ++#endif ++#endif /* IXGBE_FCOE */ ++}; ++ ++#endif /* HAVE_NET_DEVICE_OPS */ ++ ++ ++ ++void ixgbe_assign_netdev_ops(struct net_device *dev) ++{ ++ struct ixgbe_adapter *adapter; ++ adapter = netdev_priv(dev); ++#ifdef HAVE_NET_DEVICE_OPS ++ dev->netdev_ops = &ixgbe_netdev_ops; ++#else /* HAVE_NET_DEVICE_OPS */ ++ dev->open = &ixgbe_open; ++ dev->stop = &ixgbe_close; ++ dev->hard_start_xmit = &ixgbe_xmit_frame; ++ dev->get_stats = &ixgbe_get_stats; ++#ifdef HAVE_SET_RX_MODE ++ dev->set_rx_mode = &ixgbe_set_rx_mode; ++#endif ++ dev->set_multicast_list = &ixgbe_set_rx_mode; ++ dev->set_mac_address = &ixgbe_set_mac; ++ dev->change_mtu = &ixgbe_change_mtu; ++#ifdef ETHTOOL_OPS_COMPAT ++ dev->do_ioctl = &ixgbe_ioctl; ++#endif ++#ifdef HAVE_TX_TIMEOUT ++ dev->tx_timeout = &ixgbe_tx_timeout; ++#endif ++#ifdef NETIF_F_HW_VLAN_TX ++ dev->vlan_rx_register = &ixgbe_vlan_rx_register; ++ dev->vlan_rx_add_vid = &ixgbe_vlan_rx_add_vid; ++ dev->vlan_rx_kill_vid = &ixgbe_vlan_rx_kill_vid; ++#endif ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ dev->poll_controller = &ixgbe_netpoll; ++#endif ++#ifdef HAVE_NETDEV_SELECT_QUEUE ++ dev->select_queue = &ixgbe_select_queue; ++#endif /* HAVE_NETDEV_SELECT_QUEUE */ ++#endif /* HAVE_NET_DEVICE_OPS */ ++ ixgbe_set_ethtool_ops(dev); ++ dev->watchdog_timeo = 5 * HZ; ++} ++ ++static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter) ++{ ++#ifdef CONFIG_PCI_IOV ++ int err; ++ ++ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); ++ if (err) { ++ DPRINTK(PROBE, ERR, ++ "Failed to enable PCI sriov: %d\n", err); ++ goto err_novfs; ++ } ++ /* If call to enable VFs succeeded then allocate memory ++ * for per VF control structures. ++ */ ++ adapter->vfinfo = ++ kcalloc(adapter->num_vfs, ++ sizeof(struct vf_data_storage), GFP_KERNEL); ++ if (adapter->vfinfo) { ++ adapter->l2switch_enable = true; ++ adapter->repl_enable = true; ++ ++ /* RSS not compatible with SR-IOV operation */ ++ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; ++ ++ /* Disable RSC when in SR-IOV mode */ ++ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | ++ IXGBE_FLAG2_RSC_ENABLED); ++ ++ adapter->flags &= ~(IXGBE_FLAG_RX_PS_ENABLED | ++ IXGBE_FLAG_RX_PS_CAPABLE); ++ ++ return; ++ } ++ ++ /* Oh oh */ ++ DPRINTK(PROBE, ERR, ++ "Unable to allocate memory for VF " ++ "Data Storage - SRIOV disabled\n"); ++ pci_disable_sriov(adapter->pdev); ++ ++err_novfs: ++ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; ++ adapter->num_vfs = 0; ++#endif /* CONFIG_PCI_IOV */ ++} ++ ++/** ++ * ixgbe_probe - Device Initialization Routine ++ * @pdev: PCI device information struct ++ * @ent: entry in ixgbe_pci_tbl ++ * ++ * Returns 0 on success, negative on failure ++ * ++ * ixgbe_probe initializes an adapter identified by a pci_dev structure. ++ * The OS initialization, configuring of the adapter private structure, ++ * and a hardware reset occur. ++ **/ ++static int __devinit ixgbe_probe(struct pci_dev *pdev, ++ const struct pci_device_id *ent) ++{ ++ struct net_device *netdev; ++ struct ixgbe_adapter *adapter = NULL; ++ struct ixgbe_hw *hw = NULL; ++ static int cards_found; ++ int i, err, pci_using_dac; ++#ifdef HAVE_TX_MQ ++ unsigned int indices; ++#endif ++ u32 part_num; ++ u8 part_str[IXGBE_PBA_LEN]; ++ u32 part_str_size = IXGBE_PBA_LEN; ++ enum ixgbe_mac_type mac_type = ixgbe_mac_unknown; ++#ifdef IXGBE_FCOE ++ u16 device_caps; ++#endif ++ ++ err = pci_enable_device(pdev); ++ if (err) ++ return err; ++ ++ if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && ++ !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { ++ pci_using_dac = 1; ++ } else { ++ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); ++ if (err) { ++ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), ++ DMA_BIT_MASK(32)); ++ if (err) { ++ dev_err(pci_dev_to_dev(pdev), "No usable DMA " ++ "configuration, aborting\n"); ++ goto err_dma; ++ } ++ } ++ pci_using_dac = 0; ++ } ++ ++ err = pci_request_regions(pdev, ixgbe_driver_name); ++ if (err) { ++ dev_err(pci_dev_to_dev(pdev), ++ "pci_request_regions failed 0x%x\n", err); ++ goto err_pci_reg; ++ } ++ ++ /* ++ * The mac_type is needed before we have the adapter is set up ++ * so rather than maintain two devID -> MAC tables we dummy up ++ * an ixgbe_hw stuct and use ixgbe_set_mac_type. ++ */ ++ hw = vmalloc(sizeof(struct ixgbe_hw)); ++ if (!hw) { ++ printk(KERN_INFO "Unable to allocate memory for early mac " ++ "check\n"); ++ } else { ++ hw->vendor_id = pdev->vendor; ++ hw->device_id = pdev->device; ++ ixgbe_set_mac_type(hw); ++ mac_type = hw->mac.type; ++ vfree(hw); ++ } ++ ++ /* ++ * Workaround of Silicon errata on 82598. Disable LOs in the PCI switch ++ * port to which the 82598 is connected to prevent duplicate ++ * completions caused by LOs. We need the mac type so that we only ++ * do this on 82598 devices, ixgbe_set_mac_type does this for us if ++ * we set it's device ID. ++ */ ++ if (mac_type == ixgbe_mac_82598EB) ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); ++ ++ pci_enable_pcie_error_reporting(pdev); ++ ++ pci_set_master(pdev); ++ ++#ifdef HAVE_TX_MQ ++ indices = num_possible_cpus(); ++ if (mac_type == ixgbe_mac_unknown) ++ indices = max_t(unsigned int, IXGBE_MAX_RSS_INDICES, ++ IXGBE_MAX_FDIR_INDICES); ++ else if (mac_type == ixgbe_mac_82598EB) ++ indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); ++ else ++ indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); ++ indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); ++#ifdef IXGBE_FCOE ++ indices += min_t(unsigned int, num_possible_cpus(), ++ IXGBE_MAX_FCOE_INDICES); ++#endif ++ netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); ++#else ++ netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); ++#endif ++ if (!netdev) { ++ err = -ENOMEM; ++ goto err_alloc_etherdev; ++ } ++ ++ SET_NETDEV_DEV(netdev, &pdev->dev); ++ ++ adapter = netdev_priv(netdev); ++ pci_set_drvdata(pdev, adapter); ++ ++ adapter->netdev = netdev; ++ adapter->pdev = pdev; ++ hw = &adapter->hw; ++ hw->back = adapter; ++ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; ++ ++#ifdef HAVE_DEVICE_NUMA_NODE ++ DPRINTK(TX_ERR, INFO, "my (original) node was: %d\n", ++ dev_to_node(&pdev->dev)); ++#endif /* HAVE_DEVICE_NUMA_NODE */ ++ ++#ifdef HAVE_PCI_ERS ++ /* ++ * call save state here in standalone driver because it relies on ++ * adapter struct to exist, and needs to call netdev_priv ++ */ ++ pci_save_state(pdev); ++ ++#endif ++ hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ++ pci_resource_len(pdev, 0)); ++ if (!hw->hw_addr) { ++ err = -EIO; ++ goto err_ioremap; ++ } ++ ++ ixgbe_assign_netdev_ops(netdev); ++ ++ strcpy(netdev->name, pci_name(pdev)); ++ ++ adapter->bd_number = cards_found; ++ ++#ifdef IXGBE_TCP_TIMER ++ adapter->msix_addr = ioremap(pci_resource_start(pdev, 3), ++ pci_resource_len(pdev, 3)); ++ if (!adapter->msix_addr) { ++ err = -EIO; ++ printk("Error in ioremap of BAR3\n"); ++ goto err_map_msix; ++ } ++ ++#endif ++ /* set up this timer and work struct before calling get_invariants ++ * which might start the timer ++ */ ++ setup_timer(&adapter->sfp_timer, &ixgbe_sfp_timer, ++ (unsigned long) adapter); ++ INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task); ++ ++ /* multispeed fiber has its own tasklet, called from GPI SDP1 context */ ++ INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task); ++ ++ /* a new SFP+ module arrival, called from GPI SDP2 context */ ++ INIT_WORK(&adapter->sfp_config_module_task, ++ ixgbe_sfp_config_module_task); ++ ++ /* setup the private structure */ ++ err = ixgbe_sw_init(adapter); ++ if (err) ++ goto err_sw_init; ++ ++ /* Make it possible the adapter to be woken up via WOL */ ++ if (adapter->hw.mac.type == ixgbe_mac_82599EB) ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); ++ ++ /* ++ * If we have a fan, this is as early we know, warn if we ++ * have had a failure. ++ */ ++ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { ++ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); ++ if (esdp & IXGBE_ESDP_SDP1) ++ DPRINTK(PROBE, CRIT, ++ "Fan has stopped, replace the adapter\n"); ++ } ++ ++ /* reset_hw fills in the perm_addr as well */ ++ hw->phy.reset_if_overtemp = true; ++ err = hw->mac.ops.reset_hw(hw); ++ hw->phy.reset_if_overtemp = false; ++ if (err == IXGBE_ERR_SFP_NOT_PRESENT && ++ hw->mac.type == ixgbe_mac_82598EB) { ++ /* ++ * Start a kernel thread to watch for a module to arrive. ++ * Only do this for 82598, since 82599 will generate interrupts ++ * on module arrival. ++ */ ++ set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); ++ mod_timer(&adapter->sfp_timer, ++ round_jiffies(jiffies + (2 * HZ))); ++ err = 0; ++ } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { ++ DPRINTK(PROBE, ERR, "failed to load because an " ++ "unsupported SFP+ module type was detected.\n"); ++ goto err_sw_init; ++ } else if (err) { ++ DPRINTK(PROBE, ERR, "HW Init failed: %d\n", err); ++ goto err_sw_init; ++ } ++ ++ /* ++ * check_options must be called before setup_link to set up ++ * hw->fc completely ++ */ ++ ixgbe_check_options(adapter); ++ ++ DPRINTK(TX_ERR, INFO, "my (preferred) node is: %d\n", adapter->node); ++ ++ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ++ ixgbe_probe_vf(adapter); ++ ++#ifdef MAX_SKB_FRAGS ++#ifdef NETIF_F_HW_VLAN_TX ++ netdev->features = NETIF_F_SG | ++ NETIF_F_IP_CSUM | ++ NETIF_F_HW_VLAN_TX | ++ NETIF_F_HW_VLAN_RX | ++ NETIF_F_HW_VLAN_FILTER; ++ ++#else ++ netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM; ++ ++#endif ++#ifdef NETIF_F_IPV6_CSUM ++ netdev->features |= NETIF_F_IPV6_CSUM; ++#endif ++#ifdef NETIF_F_TSO ++ netdev->features |= NETIF_F_TSO; ++#ifdef NETIF_F_TSO6 ++ netdev->features |= NETIF_F_TSO6; ++#endif /* NETIF_F_TSO6 */ ++#endif /* NETIF_F_TSO */ ++#ifdef NETIF_F_GRO ++ netdev->features |= NETIF_F_GRO; ++#endif /* NETIF_F_GRO */ ++#ifdef NETIF_F_NTUPLE ++ /* ++ * If perfect filters were enabled in check_options(), enable them ++ * on the netdevice too. ++ */ ++ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) ++ netdev->features |= NETIF_F_NTUPLE; ++#endif /* NETIF_F_NTUPLE */ ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ++ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) ++ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { ++ adapter->flags &= ~(IXGBE_FLAG_FDIR_HASH_CAPABLE ++ | IXGBE_FLAG_FDIR_PERFECT_CAPABLE); ++#ifdef NETIF_F_NTUPLE ++ /* clear n-tuple support in the netdev unconditionally */ ++ netdev->features &= ~NETIF_F_NTUPLE; ++#endif /* NETIF_F_NTUPLE */ ++ } ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { ++ netdev->features |= NETIF_F_LRO; ++ adapter->flags2 &= ~IXGBE_FLAG2_SWLRO_ENABLED; ++#ifndef IXGBE_NO_HW_RSC ++ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; ++#else ++ netdev->features |= NETIF_F_LRO; ++ adapter->flags2 |= IXGBE_FLAG2_SWLRO_ENABLED; ++#endif ++ } else { ++#ifndef IXGBE_NO_LRO ++ netdev->features |= NETIF_F_LRO; ++ adapter->flags2 |= IXGBE_FLAG2_SWLRO_ENABLED; ++#endif ++ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; ++ } ++#ifdef HAVE_NETDEV_VLAN_FEATURES ++#ifdef NETIF_F_TSO ++ netdev->vlan_features |= NETIF_F_TSO; ++#ifdef NETIF_F_TSO6 ++ netdev->vlan_features |= NETIF_F_TSO6; ++#endif /* NETIF_F_TSO6 */ ++#endif /* NETIF_F_TSO */ ++ netdev->vlan_features |= NETIF_F_IP_CSUM; ++#ifdef NETIF_F_IPV6_CSUM ++ netdev->vlan_features |= NETIF_F_IPV6_CSUM; ++#endif ++ netdev->vlan_features |= NETIF_F_SG; ++ ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++#ifdef CONFIG_DCB ++ netdev->dcbnl_ops = &dcbnl_ops; ++#endif ++ ++#ifdef IXGBE_FCOE ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++#ifdef NETIF_F_FSO ++ if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { ++ ixgbe_get_device_caps(hw, &device_caps); ++ if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) { ++ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; ++ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; ++ DPRINTK(PROBE, INFO, "FCoE offload feature " ++ "is not available. Disabling FCoE " ++ "offload feature\n"); ++ } ++#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE ++ else { ++ adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; ++ adapter->ring_feature[RING_F_FCOE].indices = ++ IXGBE_FCRETA_SIZE; ++ netdev->features |= NETIF_F_FSO; ++ netdev->features |= NETIF_F_FCOE_CRC; ++ netdev->features |= NETIF_F_FCOE_MTU; ++ netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; ++ DPRINTK(PROBE, INFO, "Enabling FCoE offload " ++ "feature\n"); ++ } ++#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ ++ } ++#ifdef HAVE_NETDEV_VLAN_FEATURES ++ if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { ++ netdev->vlan_features |= NETIF_F_FSO; ++ netdev->vlan_features |= NETIF_F_FCOE_CRC; ++ netdev->vlan_features |= NETIF_F_FCOE_MTU; ++ } ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++#endif /* NETIF_F_FSO */ ++ break; ++ default: ++ break; ++ } ++#endif /* IXGBE_FCOE */ ++ if (pci_using_dac) ++ netdev->features |= NETIF_F_HIGHDMA; ++ ++#endif /* MAX_SKB_FRAGS */ ++ /* make sure the EEPROM is good */ ++ if (hw->eeprom.ops.validate_checksum && ++ (hw->eeprom.ops.validate_checksum(hw, NULL) < 0)) { ++ DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); ++ err = -EIO; ++ goto err_sw_init; ++ } ++ ++ memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); ++#ifdef ETHTOOL_GPERMADDR ++ memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); ++ ++ if (ixgbe_validate_mac_addr(netdev->perm_addr)) { ++ DPRINTK(PROBE, INFO, "invalid MAC address\n"); ++ err = -EIO; ++ goto err_sw_init; ++ } ++#else ++ if (ixgbe_validate_mac_addr(netdev->dev_addr)) { ++ DPRINTK(PROBE, INFO, "invalid MAC address\n"); ++ err = -EIO; ++ goto err_sw_init; ++ } ++#endif ++ ++ /* power down the optics */ ++ if (hw->phy.multispeed_fiber) ++ ixgbe_disable_tx_laser(hw); ++ ++ setup_timer(&adapter->watchdog_timer, &ixgbe_watchdog, ++ (unsigned long) adapter); ++ ++ INIT_WORK(&adapter->reset_task, ixgbe_reset_task); ++ INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task); ++ ++ err = ixgbe_init_interrupt_scheme(adapter); ++ if (err) ++ goto err_sw_init; ++ ++ switch (pdev->device) { ++ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: ++ /* All except this subdevice support WOL */ ++ if (pdev->subsystem_device == ++ IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { ++ adapter->wol = 0; ++ break; ++ } ++ case IXGBE_DEV_ID_82599_KX4: ++ adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | ++ IXGBE_WUFC_MC | IXGBE_WUFC_BC); ++ break; ++ default: ++ adapter->wol = 0; ++ break; ++ } ++ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); ++ ++ /* save off EEPROM version number */ ++ ixgbe_read_eeprom(hw, 0x29, &adapter->eeprom_version); ++ ++ /* reset the hardware with the new settings */ ++ err = hw->mac.ops.start_hw(hw); ++ if (err == IXGBE_ERR_EEPROM_VERSION) { ++ /* We are running on a pre-production device, log a warning */ ++ DPRINTK(PROBE, INFO, "This device is a pre-production adapter/" ++ "LOM. Please be aware there may be issues associated " ++ "with your hardware. If you are experiencing problems " ++ "please contact your Intel or hardware representative " ++ "who provided you with this hardware.\n"); ++ } ++ /* pick up the PCI bus settings for reporting later */ ++ if (hw->mac.ops.get_bus_info) ++ hw->mac.ops.get_bus_info(hw); ++ ++ ++ strcpy(netdev->name, "eth%d"); ++ err = register_netdev(netdev); ++ if (err) ++ goto err_register; ++ ++ adapter->netdev_registered = true; ++ /* carrier off reporting is important to ethtool even BEFORE open */ ++ netif_carrier_off(netdev); ++ /* keep stopping all the transmit queues for older kernels */ ++ netif_tx_stop_all_queues(netdev); ++ ++#ifdef HAVE_TX_MQ ++ INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); ++#endif /* HAVE_TX_MQ */ ++ INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task); ++ if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) { ++ err = dca_add_requester(&pdev->dev); ++ switch (err) { ++ case 0: ++ adapter->flags |= IXGBE_FLAG_DCA_ENABLED; ++ ixgbe_setup_dca(adapter); ++ break; ++ /* -19 is returned from the kernel when no provider is found */ ++ case -19: ++ DPRINTK(PROBE, INFO, "No DCA provider found. Please " ++ "start ioatdma for DCA functionality.\n"); ++ break; ++ default: ++ DPRINTK(PROBE, INFO, "DCA registration failed: %d\n", ++ err); ++ break; ++ } ++ } ++ ++ /* print all messages at the end so that we use our eth%d name */ ++ /* print bus type/speed/width info */ ++ DPRINTK(PROBE, INFO, "(PCI Express:%s:%s) ", ++ ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": ++ (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), ++ (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : ++ (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : ++ (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : ++ ("Unknown")); ++ ++ /* print the MAC address */ ++ for (i = 0; i < 6; i++) ++ printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':'); ++ ++ /* Frist try to read PBA as a string */ ++ err = ixgbe_read_pba_string(hw, part_str, &part_str_size); ++ switch (err) { ++ case 0: ++ break; ++ case IXGBE_NOT_IMPLEMENTED: ++ /* old style PBA number */ ++ ixgbe_read_pba_num(hw, &part_num); ++ sprintf(part_str, "%06x-%03x\n", (part_num >> 8), ++ (part_num & 0xff)); ++ break; ++ default: ++ strcpy(part_str, "Unknown"); ++ break; ++ } ++ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) ++ DPRINTK(PROBE, INFO, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", ++ hw->mac.type, hw->phy.type, hw->phy.sfp_type, ++ part_str); ++ else ++ DPRINTK(PROBE, INFO, "MAC: %d, PHY: %d, PBA No: %s\n", ++ hw->mac.type, hw->phy.type, part_str); ++ ++ if (((hw->bus.speed == ixgbe_bus_speed_2500) && ++ (hw->bus.width <= ixgbe_bus_width_pcie_x4)) || ++ (hw->bus.width <= ixgbe_bus_width_pcie_x2)) { ++ DPRINTK(PROBE, WARNING, "PCI-Express bandwidth available for " ++ "this card is not sufficient for optimal " ++ "performance.\n"); ++ DPRINTK(PROBE, WARNING, "For optimal performance a x8 " ++ "PCI-Express slot is required.\n"); ++ } ++ ++#ifdef NETIF_F_GRO ++ if (adapter->netdev->features & NETIF_F_GRO) ++ DPRINTK(PROBE, INFO, "GRO is enabled\n"); ++ else if (adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED) ++#else ++ if (adapter->flags2 & IXGBE_FLAG2_SWLRO_ENABLED) ++#endif ++ DPRINTK(PROBE, INFO, "Internal LRO is enabled \n"); ++ else ++ DPRINTK(PROBE, INFO, "LRO is disabled \n"); ++ ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) ++ DPRINTK(PROBE, INFO, "HW RSC is enabled \n"); ++#ifdef CONFIG_PCI_IOV ++ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { ++ DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n", ++ adapter->num_vfs); ++ for (i = 0; i < adapter->num_vfs; i++) ++ ixgbe_vf_configuration(pdev, (i | 0x10000000)); ++ } ++#endif ++ ++#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) ++ /* add san mac addr to netdev */ ++ ixgbe_add_sanmac_netdev(netdev); ++ ++#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ ++ DPRINTK(PROBE, INFO, "Intel(R) 10 Gigabit Network Connection\n"); ++ cards_found++; ++ return 0; ++ ++err_register: ++ ixgbe_clear_interrupt_scheme(adapter); ++ ixgbe_release_hw_control(adapter); ++err_sw_init: ++ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ++ ixgbe_disable_sriov(adapter); ++ clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); ++ del_timer_sync(&adapter->sfp_timer); ++ cancel_work_sync(&adapter->sfp_task); ++ cancel_work_sync(&adapter->multispeed_fiber_task); ++ cancel_work_sync(&adapter->sfp_config_module_task); ++#ifdef IXGBE_TCP_TIMER ++ iounmap(adapter->msix_addr); ++err_map_msix: ++#endif ++ iounmap(hw->hw_addr); ++err_ioremap: ++ free_netdev(netdev); ++err_alloc_etherdev: ++ pci_release_regions(pdev); ++err_pci_reg: ++err_dma: ++ ++ pci_disable_device(pdev); ++ return err; ++} ++ ++/** ++ * ixgbe_remove - Device Removal Routine ++ * @pdev: PCI device information struct ++ * ++ * ixgbe_remove is called by the PCI subsystem to alert the driver ++ * that it should release a PCI device. The could be caused by a ++ * Hot-Plug event, or because the driver is going to be removed from ++ * memory. ++ **/ ++static void __devexit ixgbe_remove(struct pci_dev *pdev) ++{ ++ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); ++ struct net_device *netdev = adapter->netdev; ++ ++ set_bit(__IXGBE_DOWN, &adapter->state); ++ /* ++ * clear the module not found bit to make sure the worker won't ++ * reschedule ++ */ ++ clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); ++ del_timer_sync(&adapter->watchdog_timer); ++ del_timer_sync(&adapter->sfp_timer); ++ cancel_work_sync(&adapter->reset_task); ++ cancel_work_sync(&adapter->watchdog_task); ++ cancel_work_sync(&adapter->sfp_task); ++#ifdef HAVE_TX_MQ ++ cancel_work_sync(&adapter->fdir_reinit_task); ++#endif ++ cancel_work_sync(&adapter->check_overtemp_task); ++ cancel_work_sync(&adapter->multispeed_fiber_task); ++ cancel_work_sync(&adapter->sfp_config_module_task); ++ flush_scheduled_work(); ++ ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { ++ adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; ++ dca_remove_requester(&pdev->dev); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); ++ } ++ ++#ifdef IXGBE_FCOE ++ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) ++ ixgbe_cleanup_fcoe(adapter); ++ ++#endif /* IXGBE_FCOE */ ++#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) ++ /* remove the added san mac */ ++ ixgbe_del_sanmac_netdev(netdev); ++ ++#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ ++ if (adapter->netdev_registered) { ++ unregister_netdev(netdev); ++ adapter->netdev_registered = false; ++ } ++ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ++ ixgbe_disable_sriov(adapter); ++ ++ ixgbe_clear_interrupt_scheme(adapter); ++ ixgbe_release_hw_control(adapter); ++ ++#ifdef IXGBE_TCP_TIMER ++ iounmap(adapter->msix_addr); ++#endif ++ iounmap(adapter->hw.hw_addr); ++ pci_release_regions(pdev); ++ ++ DPRINTK(PROBE, INFO, "complete\n"); ++ free_netdev(netdev); ++ ++ pci_disable_pcie_error_reporting(pdev); ++ ++ pci_disable_device(pdev); ++} ++ ++u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) ++{ ++ u16 value; ++ struct ixgbe_adapter *adapter = hw->back; ++ ++ pci_read_config_word(adapter->pdev, reg, &value); ++ return value; ++} ++ ++void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) ++{ ++ struct ixgbe_adapter *adapter = hw->back; ++ ++ pci_write_config_word(adapter->pdev, reg, value); ++} ++ ++#ifdef HAVE_PCI_ERS ++/** ++ * ixgbe_io_error_detected - called when PCI error is detected ++ * @pdev: Pointer to PCI device ++ * @state: The current pci connection state ++ * ++ * This function is called after a PCI bus error affecting ++ * this device has been detected. ++ */ ++static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, ++ pci_channel_state_t state) ++{ ++ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); ++ struct net_device *netdev = adapter->netdev; ++ ++ netif_device_detach(netdev); ++ ++ if (state == pci_channel_io_perm_failure) ++ return PCI_ERS_RESULT_DISCONNECT; ++ ++ if (netif_running(netdev)) ++ ixgbe_down(adapter); ++ pci_disable_device(pdev); ++ ++ /* Request a slot reset. */ ++ return PCI_ERS_RESULT_NEED_RESET; ++} ++ ++/** ++ * ixgbe_io_slot_reset - called after the pci bus has been reset. ++ * @pdev: Pointer to PCI device ++ * ++ * Restart the card from scratch, as if from a cold-boot. ++ */ ++static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) ++{ ++ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); ++ pci_ers_result_t result; ++ ++ if (pci_enable_device(pdev)) { ++ DPRINTK(PROBE, ERR, ++ "Cannot re-enable PCI device after reset.\n"); ++ result = PCI_ERS_RESULT_DISCONNECT; ++ } else { ++ pci_set_master(pdev); ++ pci_restore_state(pdev); ++ /* ++ * After second error pci->state_saved is false, this ++ * resets it so EEH doesn't break. ++ */ ++ pci_save_state(pdev); ++ ++ pci_wake_from_d3(pdev, false); ++ ++ ixgbe_reset(adapter); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); ++ result = PCI_ERS_RESULT_RECOVERED; ++ } ++ ++ pci_cleanup_aer_uncorrect_error_status(pdev); ++ ++ return result; ++} ++ ++/** ++ * ixgbe_io_resume - called when traffic can start flowing again. ++ * @pdev: Pointer to PCI device ++ * ++ * This callback is called when the error recovery driver tells us that ++ * its OK to resume normal operation. ++ */ ++static void ixgbe_io_resume(struct pci_dev *pdev) ++{ ++ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); ++ struct net_device *netdev = adapter->netdev; ++ ++ if (netif_running(netdev)) { ++ if (ixgbe_up(adapter)) { ++ DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n"); ++ return; ++ } ++ } ++ ++ netif_device_attach(netdev); ++} ++ ++static struct pci_error_handlers ixgbe_err_handler = { ++ .error_detected = ixgbe_io_error_detected, ++ .slot_reset = ixgbe_io_slot_reset, ++ .resume = ixgbe_io_resume, ++}; ++ ++#endif ++static struct pci_driver ixgbe_driver = { ++ .name = ixgbe_driver_name, ++ .id_table = ixgbe_pci_tbl, ++ .probe = ixgbe_probe, ++ .remove = __devexit_p(ixgbe_remove), ++#ifdef CONFIG_PM ++ .suspend = ixgbe_suspend, ++ .resume = ixgbe_resume, ++#endif ++#ifndef USE_REBOOT_NOTIFIER ++ .shutdown = ixgbe_shutdown, ++#endif ++#ifdef HAVE_PCI_ERS ++ .err_handler = &ixgbe_err_handler ++#endif ++}; ++ ++bool ixgbe_is_ixgbe(struct pci_dev *pcidev) ++{ ++ if (pci_dev_driver(pcidev) != &ixgbe_driver) ++ return false; ++ else ++ return true; ++} ++ ++/** ++ * ixgbe_init_module - Driver Registration Routine ++ * ++ * ixgbe_init_module is the first routine called when the driver is ++ * loaded. All it does is register with the PCI subsystem. ++ **/ ++static int __init ixgbe_init_module(void) ++{ ++ int ret; ++ printk(KERN_INFO "ixgbe: %s - version %s\n", ixgbe_driver_string, ++ ixgbe_driver_version); ++ ++ printk(KERN_INFO "%s\n", ixgbe_copyright); ++ ++#ifndef CONFIG_DCB ++ ixgbe_dcb_netlink_register(); ++#endif ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++ dca_register_notify(&dca_notifier); ++ ++#endif ++ ret = pci_register_driver(&ixgbe_driver); ++ return ret; ++} ++ ++module_init(ixgbe_init_module); ++ ++/** ++ * ixgbe_exit_module - Driver Exit Cleanup Routine ++ * ++ * ixgbe_exit_module is called just before the driver is removed ++ * from memory. ++ **/ ++static void __exit ixgbe_exit_module(void) ++{ ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++ dca_unregister_notify(&dca_notifier); ++#endif ++#ifndef CONFIG_DCB ++ ixgbe_dcb_netlink_unregister(); ++#endif ++ pci_unregister_driver(&ixgbe_driver); ++} ++ ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, ++ void *p) ++{ ++ int ret_val; ++ ++ ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, ++ __ixgbe_notify_dca); ++ ++ return ret_val ? NOTIFY_BAD : NOTIFY_DONE; ++} ++#endif ++module_exit(ixgbe_exit_module); ++ ++/* ixgbe_main.c */ ++ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_mbx.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_mbx.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_mbx.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_mbx.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,468 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe_type.h" ++#include "ixgbe_mbx.h" ++ ++/** ++ * ixgbe_read_mbx - Reads a message from the mailbox ++ * @hw: pointer to the HW structure ++ * @msg: The message buffer ++ * @size: Length of buffer ++ * @mbx_id: id of mailbox to read ++ * ++ * returns SUCCESS if it successfuly read message from buffer ++ **/ ++s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ /* limit read to size of mailbox */ ++ if (size > mbx->size) ++ size = mbx->size; ++ ++ if (mbx->ops.read) ++ ret_val = mbx->ops.read(hw, msg, size, mbx_id); ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_write_mbx - Write a message to the mailbox ++ * @hw: pointer to the HW structure ++ * @msg: The message buffer ++ * @size: Length of buffer ++ * @mbx_id: id of mailbox to write ++ * ++ * returns SUCCESS if it successfully copied message into the buffer ++ **/ ++s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ s32 ret_val = 0; ++ ++ if (size > mbx->size) ++ ret_val = IXGBE_ERR_MBX; ++ ++ else if (mbx->ops.write) ++ ret_val = mbx->ops.write(hw, msg, size, mbx_id); ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_check_for_msg - checks to see if someone sent us mail ++ * @hw: pointer to the HW structure ++ * @mbx_id: id of mailbox to check ++ * ++ * returns SUCCESS if the Status bit was found or else ERR_MBX ++ **/ ++s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ if (mbx->ops.check_for_msg) ++ ret_val = mbx->ops.check_for_msg(hw, mbx_id); ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_check_for_ack - checks to see if someone sent us ACK ++ * @hw: pointer to the HW structure ++ * @mbx_id: id of mailbox to check ++ * ++ * returns SUCCESS if the Status bit was found or else ERR_MBX ++ **/ ++s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ if (mbx->ops.check_for_ack) ++ ret_val = mbx->ops.check_for_ack(hw, mbx_id); ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_check_for_rst - checks to see if other side has reset ++ * @hw: pointer to the HW structure ++ * @mbx_id: id of mailbox to check ++ * ++ * returns SUCCESS if the Status bit was found or else ERR_MBX ++ **/ ++s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ if (mbx->ops.check_for_rst) ++ ret_val = mbx->ops.check_for_rst(hw, mbx_id); ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_poll_for_msg - Wait for message notification ++ * @hw: pointer to the HW structure ++ * @mbx_id: id of mailbox to write ++ * ++ * returns SUCCESS if it successfully received a message notification ++ **/ ++static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ int countdown = mbx->timeout; ++ ++ if (!countdown || !mbx->ops.check_for_msg) ++ goto out; ++ ++ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { ++ countdown--; ++ if (!countdown) ++ break; ++ udelay(mbx->udelay); ++ } ++ ++out: ++ return countdown ? 0 : IXGBE_ERR_MBX; ++} ++ ++/** ++ * ixgbe_poll_for_ack - Wait for message acknowledgement ++ * @hw: pointer to the HW structure ++ * @mbx_id: id of mailbox to write ++ * ++ * returns SUCCESS if it successfully received a message acknowledgement ++ **/ ++static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ int countdown = mbx->timeout; ++ ++ if (!countdown || !mbx->ops.check_for_ack) ++ goto out; ++ ++ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { ++ countdown--; ++ if (!countdown) ++ break; ++ udelay(mbx->udelay); ++ } ++ ++out: ++ return countdown ? 0 : IXGBE_ERR_MBX; ++} ++ ++/** ++ * ixgbe_read_posted_mbx - Wait for message notification and receive message ++ * @hw: pointer to the HW structure ++ * @msg: The message buffer ++ * @size: Length of buffer ++ * @mbx_id: id of mailbox to write ++ * ++ * returns SUCCESS if it successfully received a message notification and ++ * copied it into the receive buffer. ++ **/ ++s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ if (!mbx->ops.read) ++ goto out; ++ ++ ret_val = ixgbe_poll_for_msg(hw, mbx_id); ++ ++ /* if ack received read message, otherwise we timed out */ ++ if (!ret_val) ++ ret_val = mbx->ops.read(hw, msg, size, mbx_id); ++out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack ++ * @hw: pointer to the HW structure ++ * @msg: The message buffer ++ * @size: Length of buffer ++ * @mbx_id: id of mailbox to write ++ * ++ * returns SUCCESS if it successfully copied message into the buffer and ++ * received an ack to that message within delay * timeout period ++ **/ ++s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, ++ u16 mbx_id) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ /* exit if either we can't write or there isn't a defined timeout */ ++ if (!mbx->ops.write || !mbx->timeout) ++ goto out; ++ ++ /* send msg */ ++ ret_val = mbx->ops.write(hw, msg, size, mbx_id); ++ ++ /* if msg sent wait until we receive an ack */ ++ if (!ret_val) ++ ret_val = ixgbe_poll_for_ack(hw, mbx_id); ++out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_init_mbx_ops_generic - Initialize MB function pointers ++ * @hw: pointer to the HW structure ++ * ++ * Setups up the mailbox read and write message function pointers ++ **/ ++void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ ++ mbx->ops.read_posted = ixgbe_read_posted_mbx; ++ mbx->ops.write_posted = ixgbe_write_posted_mbx; ++} ++ ++static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) ++{ ++ u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ if (mbvficr & mask) { ++ ret_val = 0; ++ IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail ++ * @hw: pointer to the HW structure ++ * @vf_number: the VF index ++ * ++ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX ++ **/ ++static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) ++{ ++ s32 ret_val = IXGBE_ERR_MBX; ++ s32 index = IXGBE_MBVFICR_INDEX(vf_number); ++ u32 vf_bit = vf_number % 16; ++ ++ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, ++ index)) { ++ ret_val = 0; ++ hw->mbx.stats.reqs++; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed ++ * @hw: pointer to the HW structure ++ * @vf_number: the VF index ++ * ++ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX ++ **/ ++static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) ++{ ++ s32 ret_val = IXGBE_ERR_MBX; ++ s32 index = IXGBE_MBVFICR_INDEX(vf_number); ++ u32 vf_bit = vf_number % 16; ++ ++ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, ++ index)) { ++ ret_val = 0; ++ hw->mbx.stats.acks++; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_check_for_rst_pf - checks to see if the VF has reset ++ * @hw: pointer to the HW structure ++ * @vf_number: the VF index ++ * ++ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX ++ **/ ++static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) ++{ ++ u32 reg_offset = (vf_number < 32) ? 0 : 1; ++ u32 vf_shift = vf_number % 32; ++ u32 vflre = 0; ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ if (hw->mac.type == ixgbe_mac_82599EB) ++ vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); ++ ++ if (vflre & (1 << vf_shift)) { ++ ret_val = 0; ++ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); ++ hw->mbx.stats.rsts++; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock ++ * @hw: pointer to the HW structure ++ * @vf_number: the VF index ++ * ++ * return SUCCESS if we obtained the mailbox lock ++ **/ ++static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) ++{ ++ s32 ret_val = IXGBE_ERR_MBX; ++ u32 p2v_mailbox; ++ ++ /* Take ownership of the buffer */ ++ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); ++ ++ /* reserve mailbox for vf use */ ++ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); ++ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) ++ ret_val = 0; ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_write_mbx_pf - Places a message in the mailbox ++ * @hw: pointer to the HW structure ++ * @msg: The message buffer ++ * @size: Length of buffer ++ * @vf_number: the VF index ++ * ++ * returns SUCCESS if it successfully copied message into the buffer ++ **/ ++static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, ++ u16 vf_number) ++{ ++ s32 ret_val; ++ u16 i; ++ ++ /* lock the mailbox to prevent pf/vf race condition */ ++ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); ++ if (ret_val) ++ goto out_no_write; ++ ++ /* flush msg and acks as we are overwriting the message buffer */ ++ ixgbe_check_for_msg_pf(hw, vf_number); ++ ixgbe_check_for_ack_pf(hw, vf_number); ++ ++ /* copy the caller specified message to the mailbox memory buffer */ ++ for (i = 0; i < size; i++) ++ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); ++ ++ /* Interrupt VF to tell it a message has been sent and release buffer*/ ++ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); ++ ++ /* update stats */ ++ hw->mbx.stats.msgs_tx++; ++ ++out_no_write: ++ return ret_val; ++ ++} ++ ++/** ++ * ixgbe_read_mbx_pf - Read a message from the mailbox ++ * @hw: pointer to the HW structure ++ * @msg: The message buffer ++ * @size: Length of buffer ++ * @vf_number: the VF index ++ * ++ * This function copies a message from the mailbox buffer to the caller's ++ * memory buffer. The presumption is that the caller knows that there was ++ * a message due to a VF request so no polling for message is needed. ++ **/ ++static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, ++ u16 vf_number) ++{ ++ s32 ret_val; ++ u16 i; ++ ++ /* lock the mailbox to prevent pf/vf race condition */ ++ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); ++ if (ret_val) ++ goto out_no_read; ++ ++ /* copy the message to the mailbox memory buffer */ ++ for (i = 0; i < size; i++) ++ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); ++ ++ /* Acknowledge the message and release buffer */ ++ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); ++ ++ /* update stats */ ++ hw->mbx.stats.msgs_rx++; ++ ++out_no_read: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_init_mbx_params_pf - set initial values for pf mailbox ++ * @hw: pointer to the HW structure ++ * ++ * Initializes the hw->mbx struct to correct values for pf mailbox ++ */ ++void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ ++ if (hw->mac.type != ixgbe_mac_82599EB) ++ return; ++ ++ mbx->timeout = 0; ++ mbx->udelay = 0; ++ ++ mbx->size = IXGBE_VFMAILBOX_SIZE; ++ ++ mbx->ops.read = ixgbe_read_mbx_pf; ++ mbx->ops.write = ixgbe_write_mbx_pf; ++ mbx->ops.read_posted = ixgbe_read_posted_mbx; ++ mbx->ops.write_posted = ixgbe_write_posted_mbx; ++ mbx->ops.check_for_msg = ixgbe_check_for_msg_pf; ++ mbx->ops.check_for_ack = ixgbe_check_for_ack_pf; ++ mbx->ops.check_for_rst = ixgbe_check_for_rst_pf; ++ ++ mbx->stats.msgs_tx = 0; ++ mbx->stats.msgs_rx = 0; ++ mbx->stats.reqs = 0; ++ mbx->stats.acks = 0; ++ mbx->stats.rsts = 0; ++} ++ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_mbx.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_mbx.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_mbx.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_mbx.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,94 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_MBX_H_ ++#define _IXGBE_MBX_H_ ++ ++#include "ixgbe_type.h" ++ ++#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ ++#define IXGBE_ERR_MBX -100 ++ ++#define IXGBE_VFMAILBOX 0x002FC ++#define IXGBE_VFMBMEM 0x00200 ++ ++#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x)) ++#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn)) ++ ++#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ ++#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ ++#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ ++#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ ++#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ ++ ++#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ ++#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ ++#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ ++#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ ++ ++ ++/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the ++ * PF. The reverse is true if it is IXGBE_PF_*. ++ * Message ACK's are the value or'd with 0xF0000000 ++ */ ++#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with ++ * this are the ACK */ ++#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with ++ * this are the NACK */ ++#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still ++ clear to send requests */ ++#define IXGBE_VT_MSGINFO_SHIFT 16 ++/* bits 23:16 are used for exra info for certain messages */ ++#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) ++ ++#define IXGBE_VF_RESET 0x01 /* VF requests reset */ ++#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ ++#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ ++#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ ++#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ ++ ++/* length of permanent address message returned from PF */ ++#define IXGBE_VF_PERMADDR_MSG_LEN 4 ++/* word in permanent address message with the current multicast type */ ++#define IXGBE_VF_MC_TYPE_WORD 3 ++ ++#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ ++ ++#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ ++#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ ++ ++s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); ++s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); ++s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); ++s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); ++s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); ++s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); ++s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); ++void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw); ++void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); ++ ++#endif /* _IXGBE_MBX_H_ */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_osdep.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_osdep.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_osdep.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_osdep.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,107 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++/* glue for the OS independent part of ixgbe ++ * includes register access macros ++ */ ++ ++#ifndef _IXGBE_OSDEP_H_ ++#define _IXGBE_OSDEP_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include "kcompat.h" ++ ++ ++#ifndef msleep ++#define msleep(x) do { if(in_interrupt()) { \ ++ /* Don't mdelay in interrupt context! */ \ ++ BUG(); \ ++ } else { \ ++ msleep(x); \ ++ } } while (0) ++ ++#endif ++ ++#undef ASSERT ++ ++#ifdef DBG ++#define hw_dbg(hw, S, A...) printk(KERN_DEBUG S, ## A) ++#else ++#define hw_dbg(hw, S, A...) do {} while (0) ++#endif ++ ++#ifdef DBG ++#define IXGBE_WRITE_REG(a, reg, value) do {\ ++ switch (reg) { \ ++ case IXGBE_EIMS: \ ++ case IXGBE_EIMC: \ ++ case IXGBE_EIAM: \ ++ case IXGBE_EIAC: \ ++ case IXGBE_EICR: \ ++ case IXGBE_EICS: \ ++ printk("%s: Reg - 0x%05X, value - 0x%08X\n", __FUNCTION__, \ ++ reg, (u32)(value)); \ ++ default: \ ++ break; \ ++ } \ ++ writel((value), ((a)->hw_addr + (reg))); \ ++} while (0) ++#else ++#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) ++#endif ++ ++#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg)) ++ ++#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \ ++ writel((value), ((a)->hw_addr + (reg) + ((offset) << 2)))) ++ ++#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \ ++ readl((a)->hw_addr + (reg) + ((offset) << 2))) ++ ++#ifndef writeq ++#define writeq(val, addr) writel((u32) (val), addr); \ ++ writel((u32) (val >> 32), (addr + 4)); ++#endif ++ ++#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) ++ ++#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) ++struct ixgbe_hw; ++extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); ++extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); ++#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word ++#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word ++#define IXGBE_EEPROM_GRANT_ATTEMPS 100 ++#define IXGBE_HTONL(_i) htonl(_i) ++#define IXGBE_HTONS(_i) htons(_i) ++ ++#endif /* _IXGBE_OSDEP_H_ */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_param.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_param.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_param.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_param.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,1232 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include ++#include ++ ++#include "ixgbe.h" ++ ++/* This is the only thing that needs to be changed to adjust the ++ * maximum number of ports that the driver can manage. ++ */ ++ ++#define IXGBE_MAX_NIC 32 ++ ++#define OPTION_UNSET -1 ++#define OPTION_DISABLED 0 ++#define OPTION_ENABLED 1 ++ ++#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ ++#define XSTRINGIFY(bar) STRINGIFY(bar) ++ ++/* All parameters are treated the same, as an integer array of values. ++ * This macro just reduces the need to repeat the same declaration code ++ * over and over (plus this helps to avoid typo bugs). ++ */ ++ ++#define IXGBE_PARAM_INIT { [0 ... IXGBE_MAX_NIC] = OPTION_UNSET } ++#ifndef module_param_array ++/* Module Parameters are always initialized to -1, so that the driver ++ * can tell the difference between no user specified value or the ++ * user asking for the default value. ++ * The true default values are loaded in when ixgbe_check_options is called. ++ * ++ * This is a GCC extension to ANSI C. ++ * See the item "Labeled Elements in Initializers" in the section ++ * "Extensions to the C Language Family" of the GCC documentation. ++ */ ++ ++#define IXGBE_PARAM(X, desc) \ ++ static const int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \ ++ MODULE_PARM(X, "1-" __MODULE_STRING(IXGBE_MAX_NIC) "i"); \ ++ MODULE_PARM_DESC(X, desc); ++#else ++#define IXGBE_PARAM(X, desc) \ ++ static int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \ ++ static unsigned int num_##X; \ ++ module_param_array_named(X, X, int, &num_##X, 0); \ ++ MODULE_PARM_DESC(X, desc); ++#endif ++ ++/* IntMode (Interrupt Mode) ++ * ++ * Valid Range: 0-2 ++ * - 0 - Legacy Interrupt ++ * - 1 - MSI Interrupt ++ * - 2 - MSI-X Interrupt(s) ++ * ++ * Default Value: 2 ++ */ ++IXGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default IntMode (deprecated)"); ++IXGBE_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2"); ++#define IXGBE_INT_LEGACY 0 ++#define IXGBE_INT_MSI 1 ++#define IXGBE_INT_MSIX 2 ++#define IXGBE_DEFAULT_INT IXGBE_INT_MSIX ++ ++IXGBE_PARAM(Node, "set the starting node to allocate memory on, default -1"); ++ ++/* MQ - Multiple Queue enable/disable ++ * ++ * Valid Range: 0, 1 ++ * - 0 - disables MQ ++ * - 1 - enables MQ ++ * ++ * Default Value: 1 ++ */ ++ ++IXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1"); ++ ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++/* DCA - Direct Cache Access (DCA) Control ++ * ++ * This option allows the device to hint to DCA enabled processors ++ * which CPU should have its cache warmed with the data being ++ * transferred over PCIe. This can increase performance by reducing ++ * cache misses. ixgbe hardware supports DCA for: ++ * tx descriptor writeback ++ * rx descriptor writeback ++ * rx data ++ * rx data header only (in packet split mode) ++ * ++ * enabling option 2 can cause cache thrash in some tests, particularly ++ * if the CPU is completely utilized ++ * ++ * Valid Range: 0 - 2 ++ * - 0 - disables DCA ++ * - 1 - enables DCA ++ * - 2 - enables DCA with rx data included ++ * ++ * Default Value: 2 ++ */ ++ ++#define IXGBE_MAX_DCA 2 ++ ++IXGBE_PARAM(DCA, "Disable or enable Direct Cache Access, 0=disabled, 1=descriptor only, 2=descriptor and data"); ++ ++#endif ++/* RSS - Receive-Side Scaling (RSS) Descriptor Queues ++ * ++ * Valid Range: 0-16 ++ * - 0 - disables RSS ++ * - 1 - enables RSS and sets the Desc. Q's to min(16, num_online_cpus()). ++ * - 2-16 - enables RSS and sets the Desc. Q's to the specified value. ++ * ++ * Default Value: 1 ++ */ ++ ++IXGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, default 1=number of cpus"); ++ ++/* VMDQ - Virtual Machine Device Queues (VMDQ) ++ * ++ * Valid Range: 1-16 ++ * - 1 Disables VMDQ by allocating only a single queue. ++ * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value. ++ * ++ * Default Value: 1 ++ */ ++ ++#define IXGBE_DEFAULT_NUM_VMDQ 8 ++ ++IXGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable, 2-16 enable (default=" XSTRINGIFY(IXGBE_DEFAULT_NUM_VMDQ) ")"); ++ ++#ifdef CONFIG_PCI_IOV ++/* max_vfs - SR I/O Virtualization ++ * ++ * Valid Range: 0-63 ++ * - 0 Disables SR-IOV ++ * - 1 Enables SR-IOV to default number of VFs enabled ++ * - 2-63 - enables SR-IOV and sets the number of VFs enabled ++ * ++ * Default Value: 0 ++ */ ++ ++#define MAX_SRIOV_VFS 63 ++ ++IXGBE_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), 1 = default settings, 2-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable this many VFs"); ++#endif ++ ++/* Interrupt Throttle Rate (interrupts/sec) ++ * ++ * Valid Range: 956-488281 (0=off, 1=dynamic) ++ * ++ * Default Value: 8000 ++ */ ++#define DEFAULT_ITR 8000 ++IXGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, (956-488281), default 8000"); ++#define MAX_ITR IXGBE_MAX_INT_RATE ++#define MIN_ITR IXGBE_MIN_INT_RATE ++ ++#ifndef IXGBE_NO_LLI ++/* LLIPort (Low Latency Interrupt TCP Port) ++ * ++ * Valid Range: 0 - 65535 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IXGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)"); ++ ++#define DEFAULT_LLIPORT 0 ++#define MAX_LLIPORT 0xFFFF ++#define MIN_LLIPORT 0 ++ ++/* LLIPush (Low Latency Interrupt on TCP Push flag) ++ * ++ * Valid Range: 0,1 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IXGBE_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1)"); ++ ++#define DEFAULT_LLIPUSH 0 ++#define MAX_LLIPUSH 1 ++#define MIN_LLIPUSH 0 ++ ++/* LLISize (Low Latency Interrupt on Packet Size) ++ * ++ * Valid Range: 0 - 1500 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IXGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)"); ++ ++#define DEFAULT_LLISIZE 0 ++#define MAX_LLISIZE 1500 ++#define MIN_LLISIZE 0 ++ ++/* LLIEType (Low Latency Interrupt Ethernet Type) ++ * ++ * Valid Range: 0 - 0x8fff ++ * ++ * Default Value: 0 (disabled) ++ */ ++IXGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type"); ++ ++#define DEFAULT_LLIETYPE 0 ++#define MAX_LLIETYPE 0x8fff ++#define MIN_LLIETYPE 0 ++ ++/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold) ++ * ++ * Valid Range: 0 - 7 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IXGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold"); ++ ++#define DEFAULT_LLIVLANP 0 ++#define MAX_LLIVLANP 7 ++#define MIN_LLIVLANP 0 ++ ++#endif /* IXGBE_NO_LLI */ ++/* Rx buffer mode ++ * ++ * Valid Range: 0-2 0 = 1buf_mode_always, 1 = ps_mode_always and 2 = optimal ++ * ++ * Default Value: 2 ++ */ ++IXGBE_PARAM(RxBufferMode, "0=1 descriptor per packet,\n" ++ "\t\t\t1=use packet split, multiple descriptors per jumbo frame\n" ++ "\t\t\t2 (default)=use 1buf mode for 1500 mtu, packet split for jumbo"); ++ ++#define IXGBE_RXBUFMODE_1BUF_ALWAYS 0 ++#define IXGBE_RXBUFMODE_PS_ALWAYS 1 ++#define IXGBE_RXBUFMODE_OPTIMAL 2 ++#define IXGBE_DEFAULT_RXBUFMODE IXGBE_RXBUFMODE_OPTIMAL ++ ++#ifdef HAVE_TX_MQ ++/* Flow Director filtering mode ++ * ++ * Valid Range: 0-2 0 = off, 1 = Hashing (ATR), and 2 = perfect filters ++ * ++ * Default Value: 1 (ATR) ++ */ ++IXGBE_PARAM(FdirMode, "Flow Director filtering modes:\n" ++ "\t\t\t0 = Filtering off\n" ++ "\t\t\t1 = Signature Hashing filters (SW ATR)\n" ++ "\t\t\t2 = Perfect Filters"); ++ ++#define IXGBE_FDIR_FILTER_OFF 0 ++#define IXGBE_FDIR_FILTER_HASH 1 ++#define IXGBE_FDIR_FILTER_PERFECT 2 ++#define IXGBE_DEFAULT_FDIR_FILTER IXGBE_FDIR_FILTER_HASH ++ ++/* Flow Director packet buffer allocation level ++ * ++ * Valid Range: 0-2 0 = 8k hash/2k perfect, 1 = 16k hash/4k perfect, ++ * 2 = 32k hash/8k perfect ++ * ++ * Default Value: 0 ++ */ ++IXGBE_PARAM(FdirPballoc, "Flow Director packet buffer allocation level:\n" ++ "\t\t\t0 = 8k hash filters or 2k perfect filters\n" ++ "\t\t\t1 = 16k hash filters or 4k perfect filters\n" ++ "\t\t\t2 = 32k hash filters or 8k perfect filters"); ++ ++#define IXGBE_FDIR_PBALLOC_64K 0 ++#define IXGBE_FDIR_PBALLOC_128K 1 ++#define IXGBE_FDIR_PBALLOC_256K 2 ++#define IXGBE_DEFAULT_FDIR_PBALLOC IXGBE_FDIR_PBALLOC_64K ++ ++/* Software ATR packet sample rate ++ * ++ * Valid Range: 0-100 0 = off, 1-100 = rate of Tx packet inspection ++ * ++ * Default Value: 20 ++ */ ++IXGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate"); ++ ++#define IXGBE_MAX_ATR_SAMPLE_RATE 100 ++#define IXGBE_MIN_ATR_SAMPLE_RATE 1 ++#define IXGBE_ATR_SAMPLE_RATE_OFF 0 ++#define IXGBE_DEFAULT_ATR_SAMPLE_RATE 20 ++#endif /* HAVE_TX_MQ */ ++#ifdef IXGBE_FCOE ++/* FCoE - Fibre Channel over Ethernet Offload Enable/Disable ++ * ++ * Valid Range: 0, 1 ++ * - 0 - disables FCoE Offload ++ * - 1 - enables FCoE Offload ++ * ++ * Default Value: 1 ++ */ ++IXGBE_PARAM(FCoE, "Disable or enable FCoE Offload, default 1"); ++#endif /* IXGBE_FCOE */ ++struct ixgbe_option { ++ enum { enable_option, range_option, list_option } type; ++ const char *name; ++ const char *err; ++ int def; ++ union { ++ struct { /* range_option info */ ++ int min; ++ int max; ++ } r; ++ struct { /* list_option info */ ++ int nr; ++ const struct ixgbe_opt_list { ++ int i; ++ char *str; ++ } *p; ++ } l; ++ } arg; ++}; ++ ++static int __devinit ixgbe_validate_option(unsigned int *value, ++ struct ixgbe_option *opt) ++{ ++ if (*value == OPTION_UNSET) { ++ *value = opt->def; ++ return 0; ++ } ++ ++ switch (opt->type) { ++ case enable_option: ++ switch (*value) { ++ case OPTION_ENABLED: ++ printk(KERN_INFO "ixgbe: %s Enabled\n", opt->name); ++ return 0; ++ case OPTION_DISABLED: ++ printk(KERN_INFO "ixgbe: %s Disabled\n", opt->name); ++ return 0; ++ } ++ break; ++ case range_option: ++ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { ++ printk(KERN_INFO "ixgbe: %s set to %d\n", opt->name, *value); ++ return 0; ++ } ++ break; ++ case list_option: { ++ int i; ++ const struct ixgbe_opt_list *ent; ++ ++ for (i = 0; i < opt->arg.l.nr; i++) { ++ ent = &opt->arg.l.p[i]; ++ if (*value == ent->i) { ++ if (ent->str[0] != '\0') ++ printk(KERN_INFO "%s\n", ent->str); ++ return 0; ++ } ++ } ++ } ++ break; ++ default: ++ BUG(); ++ } ++ ++ printk(KERN_INFO "ixgbe: Invalid %s specified (%d), %s\n", ++ opt->name, *value, opt->err); ++ *value = opt->def; ++ return -1; ++} ++ ++#define LIST_LEN(l) (sizeof(l) / sizeof(l[0])) ++ ++/** ++ * ixgbe_check_options - Range Checking for Command Line Parameters ++ * @adapter: board private structure ++ * ++ * This routine checks all command line parameters for valid user ++ * input. If an invalid value is given, or if no user specified ++ * value exists, a default value is used. The final value is stored ++ * in a variable in the adapter structure. ++ **/ ++void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) ++{ ++ int bd = adapter->bd_number; ++ u32 *aflags = &adapter->flags; ++ struct ixgbe_ring_feature *feature = adapter->ring_feature; ++ ++ if (bd >= IXGBE_MAX_NIC) { ++ printk(KERN_NOTICE ++ "Warning: no configuration for board #%d\n", bd); ++ printk(KERN_NOTICE "Using defaults for all values\n"); ++#ifndef module_param_array ++ bd = IXGBE_MAX_NIC; ++#endif ++ } ++ ++ { /* Interrupt Mode */ ++ unsigned int int_mode; ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Interrupt Mode", ++ .err = ++ "using default of "__MODULE_STRING(IXGBE_DEFAULT_INT), ++ .def = IXGBE_DEFAULT_INT, ++ .arg = { .r = { .min = IXGBE_INT_LEGACY, ++ .max = IXGBE_INT_MSIX}} ++ }; ++ ++#ifdef module_param_array ++ if (num_IntMode > bd || num_InterruptType > bd) { ++#endif ++ int_mode = IntMode[bd]; ++ if (int_mode == OPTION_UNSET) ++ int_mode = InterruptType[bd]; ++ ixgbe_validate_option(&int_mode, &opt); ++ switch (int_mode) { ++ case IXGBE_INT_MSIX: ++ if (!(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) ++ printk(KERN_INFO ++ "Ignoring MSI-X setting; " ++ "support unavailable\n"); ++ break; ++ case IXGBE_INT_MSI: ++ if (!(*aflags & IXGBE_FLAG_MSI_CAPABLE)) { ++ printk(KERN_INFO ++ "Ignoring MSI setting; " ++ "support unavailable\n"); ++ } else { ++ *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_DCB_CAPABLE; ++ } ++ break; ++ case IXGBE_INT_LEGACY: ++ default: ++ *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_DCB_CAPABLE; ++ break; ++ } ++#ifdef module_param_array ++ } else { ++ /* default settings */ ++ if (opt.def == IXGBE_INT_MSIX && ++ *aflags & IXGBE_FLAG_MSIX_CAPABLE) { ++ *aflags |= IXGBE_FLAG_MSIX_CAPABLE; ++ *aflags |= IXGBE_FLAG_MSI_CAPABLE; ++ } else if (opt.def == IXGBE_INT_MSI && ++ *aflags & IXGBE_FLAG_MSI_CAPABLE) { ++ *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; ++ *aflags |= IXGBE_FLAG_MSI_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_DCB_CAPABLE; ++ } else { ++ *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_DCB_CAPABLE; ++ } ++ } ++#endif ++ } ++ { /* Multiple Queue Support */ ++ static struct ixgbe_option opt = { ++ .type = enable_option, ++ .name = "Multiple Queue Support", ++ .err = "defaulting to Enabled", ++ .def = OPTION_ENABLED ++ }; ++ ++#ifdef module_param_array ++ if (num_MQ > bd) { ++#endif ++ unsigned int mq = MQ[bd]; ++ ixgbe_validate_option(&mq, &opt); ++ if (mq) ++ *aflags |= IXGBE_FLAG_MQ_CAPABLE; ++ else ++ *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; ++#ifdef module_param_array ++ } else { ++ if (opt.def == OPTION_ENABLED) ++ *aflags |= IXGBE_FLAG_MQ_CAPABLE; ++ else ++ *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; ++ } ++#endif ++ /* Check Interoperability */ ++ if ((*aflags & IXGBE_FLAG_MQ_CAPABLE) && ++ !(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "Multiple queues are not supported while MSI-X " ++ "is disabled. Disabling Multiple Queues.\n"); ++ *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; ++ } ++ } ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++ { /* Direct Cache Access (DCA) */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Direct Cache Access (DCA)", ++ .err = "defaulting to Enabled", ++ .def = IXGBE_MAX_DCA, ++ .arg = { .r = { .min = OPTION_DISABLED, ++ .max = IXGBE_MAX_DCA}} ++ }; ++ unsigned int dca = opt.def; ++ ++#ifdef module_param_array ++ if (num_DCA > bd) { ++#endif ++ dca = DCA[bd]; ++ ixgbe_validate_option(&dca, &opt); ++ if (!dca) ++ *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; ++ ++ /* Check Interoperability */ ++ if (!(*aflags & IXGBE_FLAG_DCA_CAPABLE)) { ++ DPRINTK(PROBE, INFO, "DCA is disabled\n"); ++ *aflags &= ~IXGBE_FLAG_DCA_ENABLED; ++ } ++ ++ if (dca == IXGBE_MAX_DCA) { ++ DPRINTK(PROBE, INFO, ++ "DCA enabled for rx data\n"); ++ adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; ++ } ++#ifdef module_param_array ++ } else { ++ /* make sure to clear the capability flag if the ++ * option is disabled by default above */ ++ if (opt.def == OPTION_DISABLED) ++ *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; ++ } ++#endif ++ if (dca == IXGBE_MAX_DCA) ++ adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; ++ } ++#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */ ++ { /* Receive-Side Scaling (RSS) */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Receive-Side Scaling (RSS)", ++ .err = "using default.", ++ .def = OPTION_ENABLED, ++ .arg = { .r = { .min = OPTION_DISABLED, ++ .max = IXGBE_MAX_RSS_INDICES}} ++ }; ++ unsigned int rss = RSS[bd]; ++ ++#ifdef module_param_array ++ if (num_RSS > bd) { ++#endif ++ if (rss != OPTION_ENABLED) ++ ixgbe_validate_option(&rss, &opt); ++ /* ++ * we cannot use an else since validate option may ++ * have changed the state of RSS ++ */ ++ if (rss == OPTION_ENABLED) { ++ /* ++ * Base it off num_online_cpus() with ++ * a hardware limit cap. ++ */ ++ rss = min(IXGBE_MAX_RSS_INDICES, ++ (int)num_online_cpus()); ++ } ++ feature[RING_F_RSS].indices = rss; ++ if (rss) ++ *aflags |= IXGBE_FLAG_RSS_ENABLED; ++ else ++ *aflags &= ~IXGBE_FLAG_RSS_ENABLED; ++#ifdef module_param_array ++ } else { ++ if (opt.def == OPTION_DISABLED) { ++ *aflags &= ~IXGBE_FLAG_RSS_ENABLED; ++ } else { ++ rss = min(IXGBE_MAX_RSS_INDICES, ++ (int)num_online_cpus()); ++ feature[RING_F_RSS].indices = rss; ++ if (rss) ++ *aflags |= IXGBE_FLAG_RSS_ENABLED; ++ else ++ *aflags &= ~IXGBE_FLAG_RSS_ENABLED; ++ } ++ } ++#endif ++ /* Check Interoperability */ ++ if (*aflags & IXGBE_FLAG_RSS_ENABLED) { ++ if (!(*aflags & IXGBE_FLAG_RSS_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "RSS is not supported on this " ++ "hardware. Disabling RSS.\n"); ++ *aflags &= ~IXGBE_FLAG_RSS_ENABLED; ++ feature[RING_F_RSS].indices = 0; ++ } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "RSS is not supported while multiple " ++ "queues are disabled. " ++ "Disabling RSS.\n"); ++ *aflags &= ~IXGBE_FLAG_RSS_ENABLED; ++ *aflags &= ~IXGBE_FLAG_DCB_CAPABLE; ++ feature[RING_F_RSS].indices = 0; ++ } ++ } ++ } ++ { /* Virtual Machine Device Queues (VMDQ) */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Virtual Machine Device Queues (VMDQ)", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED, ++ .arg = { .r = { .min = OPTION_DISABLED, ++ .max = IXGBE_MAX_VMDQ_INDICES ++ }} ++ }; ++ ++#ifdef module_param_array ++ if (num_VMDQ > bd) { ++#endif ++ unsigned int vmdq = VMDQ[bd]; ++ ixgbe_validate_option(&vmdq, &opt); ++ feature[RING_F_VMDQ].indices = vmdq; ++ adapter->flags2 |= IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE; ++ /* zero or one both mean disabled from our driver's ++ * perspective */ ++ if (vmdq > 1) ++ *aflags |= IXGBE_FLAG_VMDQ_ENABLED; ++ else ++ *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; ++#ifdef module_param_array ++ } else { ++ if (opt.def == OPTION_DISABLED) { ++ *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; ++ } else { ++ feature[RING_F_VMDQ].indices = IXGBE_DEFAULT_NUM_VMDQ; ++ *aflags |= IXGBE_FLAG_VMDQ_ENABLED; ++ } ++ } ++#endif ++ /* Check Interoperability */ ++ if (*aflags & IXGBE_FLAG_VMDQ_ENABLED) { ++ if (!(*aflags & IXGBE_FLAG_VMDQ_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "VMDQ is not supported on this " ++ "hardware. Disabling VMDQ.\n"); ++ *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; ++ feature[RING_F_VMDQ].indices = 0; ++ } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "VMDQ is not supported while multiple " ++ "queues are disabled. " ++ "Disabling VMDQ.\n"); ++ *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; ++ feature[RING_F_VMDQ].indices = 0; ++ } ++ ++ if (adapter->hw.mac.type == ixgbe_mac_82598EB) ++ feature[RING_F_VMDQ].indices = ++ min(feature[RING_F_VMDQ].indices, 16); ++ ++ /* Disable RSS when using VMDQ mode */ ++ *aflags &= ~IXGBE_FLAG_RSS_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_RSS_ENABLED; ++ } ++ } ++#ifdef CONFIG_PCI_IOV ++ { /* Single Root I/O Virtualization (SR-IOV) */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "I/O Virtualization (IOV)", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED, ++ .arg = { .r = { .min = OPTION_DISABLED, ++ .max = IXGBE_MAX_VF_FUNCTIONS}} ++ }; ++ ++#ifdef module_param_array ++ if (num_max_vfs > bd) { ++#endif ++ unsigned int vfs = max_vfs[bd]; ++ ixgbe_validate_option(&vfs, &opt); ++ adapter->num_vfs = vfs; ++ if (vfs) ++ *aflags |= IXGBE_FLAG_SRIOV_ENABLED; ++ else ++ *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; ++#ifdef module_param_array ++ } else { ++ if (opt.def == OPTION_DISABLED) { ++ adapter->num_vfs = 0; ++ *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; ++ } else { ++ adapter->num_vfs = opt.def; ++ *aflags |= IXGBE_FLAG_SRIOV_ENABLED; ++ } ++ } ++#endif ++ ++ /* Check Interoperability */ ++ if (*aflags & IXGBE_FLAG_SRIOV_ENABLED) { ++ if (!(*aflags & IXGBE_FLAG_SRIOV_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "IOV is not supported on this " ++ "hardware. Disabling IOV.\n"); ++ *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; ++ adapter->num_vfs = 0; ++ } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "IOV is not supported while multiple " ++ "queues are disabled. " ++ "Disabling IOV.\n"); ++ *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; ++ adapter->num_vfs = 0; ++ } else { ++ *aflags &= ~IXGBE_FLAG_RSS_CAPABLE; ++ adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; ++ } ++ } ++ } ++#endif /* CONFIG_PCI_IOV */ ++ { /* Interrupt Throttling Rate */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Interrupt Throttling Rate (ints/sec)", ++ .err = "using default of "__MODULE_STRING(DEFAULT_ITR), ++ .def = DEFAULT_ITR, ++ .arg = { .r = { .min = MIN_ITR, ++ .max = MAX_ITR }} ++ }; ++ ++#ifdef module_param_array ++ if (num_InterruptThrottleRate > bd) { ++#endif ++ u32 eitr = InterruptThrottleRate[bd]; ++ switch (eitr) { ++ case 0: ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ /* ++ * zero is a special value, we don't want to ++ * turn off ITR completely, just set it to an ++ * insane interrupt rate ++ */ ++ adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; ++ adapter->rx_itr_setting = 0; ++ adapter->tx_itr_setting = 0; ++ break; ++ case 1: ++ DPRINTK(PROBE, INFO, "dynamic interrupt " ++ "throttling enabled\n"); ++ adapter->rx_eitr_param = 20000; ++ adapter->tx_eitr_param = ++ adapter->rx_eitr_param >> 1; ++ adapter->rx_itr_setting = 1; ++ adapter->tx_itr_setting = 1; ++ break; ++ default: ++ ixgbe_validate_option(&eitr, &opt); ++ adapter->rx_eitr_param = eitr; ++ adapter->tx_eitr_param = (eitr >> 1); ++ /* the first bit is used as control */ ++ adapter->rx_itr_setting = eitr & ~1; ++ adapter->tx_itr_setting = (eitr >> 1) & ~1; ++ break; ++ } ++#ifdef module_param_array ++ } else { ++ adapter->rx_eitr_param = DEFAULT_ITR; ++ adapter->rx_itr_setting = DEFAULT_ITR & ~1; ++ adapter->tx_eitr_param = (DEFAULT_ITR >> 1); ++ adapter->tx_itr_setting = (DEFAULT_ITR >> 1) & ~1; ++ } ++#endif ++ /* Check Interoperability */ ++ if (adapter->rx_itr_setting == 0 && ++ adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { ++ /* itr ==0 and RSC are mutually exclusive */ ++ adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; ++ adapter->netdev->features &= ~NETIF_F_LRO; ++ DPRINTK(PROBE, INFO, ++ "InterruptThrottleRate set to 0, disabling RSC\n"); ++ } ++ } ++#ifndef IXGBE_NO_LLI ++ { /* Low Latency Interrupt TCP Port*/ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt TCP Port", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLIPORT), ++ .def = DEFAULT_LLIPORT, ++ .arg = { .r = { .min = MIN_LLIPORT, ++ .max = MAX_LLIPORT }} ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIPort > bd) { ++#endif ++ adapter->lli_port = LLIPort[bd]; ++ if (adapter->lli_port) { ++ ixgbe_validate_option(&adapter->lli_port, &opt); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_port = opt.def; ++ } ++#endif ++ } ++ { /* Low Latency Interrupt on Packet Size */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt on Packet Size", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLISIZE), ++ .def = DEFAULT_LLISIZE, ++ .arg = { .r = { .min = MIN_LLISIZE, ++ .max = MAX_LLISIZE }} ++ }; ++ ++#ifdef module_param_array ++ if (num_LLISize > bd) { ++#endif ++ adapter->lli_size = LLISize[bd]; ++ if (adapter->lli_size) { ++ ixgbe_validate_option(&adapter->lli_size, &opt); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_size = opt.def; ++ } ++#endif ++ } ++ { /*Low Latency Interrupt on TCP Push flag*/ ++ static struct ixgbe_option opt = { ++ .type = enable_option, ++ .name = "Low Latency Interrupt on TCP Push flag", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIPush > bd) { ++#endif ++ unsigned int lli_push = LLIPush[bd]; ++ ixgbe_validate_option(&lli_push, &opt); ++ if (lli_push) ++ *aflags |= IXGBE_FLAG_LLI_PUSH; ++ else ++ *aflags &= ~IXGBE_FLAG_LLI_PUSH; ++#ifdef module_param_array ++ } else { ++ if (opt.def == OPTION_ENABLED) ++ *aflags |= IXGBE_FLAG_LLI_PUSH; ++ else ++ *aflags &= ~IXGBE_FLAG_LLI_PUSH; ++ } ++#endif ++ } ++ { /* Low Latency Interrupt EtherType*/ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt on Ethernet Protocol Type", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLIETYPE), ++ .def = DEFAULT_LLIETYPE, ++ .arg = { .r = { .min = MIN_LLIETYPE, ++ .max = MAX_LLIETYPE }} ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIEType > bd) { ++#endif ++ adapter->lli_etype = LLIEType[bd]; ++ if (adapter->lli_etype) { ++ ixgbe_validate_option(&adapter->lli_etype, &opt); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_etype = opt.def; ++ } ++#endif ++ } ++ { /* LLI VLAN Priority */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt on VLAN priority threashold", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLIVLANP), ++ .def = DEFAULT_LLIVLANP, ++ .arg = { .r = { .min = MIN_LLIVLANP, ++ .max = MAX_LLIVLANP }} ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIVLANP > bd) { ++#endif ++ adapter->lli_vlan_pri = LLIVLANP[bd]; ++ if (adapter->lli_vlan_pri) { ++ ixgbe_validate_option(&adapter->lli_vlan_pri, &opt); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_vlan_pri = opt.def; ++ } ++#endif ++ } ++#endif /* IXGBE_NO_LLI */ ++ { /* Rx buffer mode */ ++ unsigned int rx_buf_mode; ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Rx buffer mode", ++ .err = "using default of " ++ __MODULE_STRING(IXGBE_DEFAULT_RXBUFMODE), ++ .def = IXGBE_DEFAULT_RXBUFMODE, ++ .arg = {.r = {.min = IXGBE_RXBUFMODE_1BUF_ALWAYS, ++ .max = IXGBE_RXBUFMODE_OPTIMAL}} ++ }; ++ ++#ifdef module_param_array ++ if (num_RxBufferMode > bd) { ++#endif ++ rx_buf_mode = RxBufferMode[bd]; ++ ixgbe_validate_option(&rx_buf_mode, &opt); ++ switch (rx_buf_mode) { ++ case IXGBE_RXBUFMODE_OPTIMAL: ++ *aflags |= IXGBE_FLAG_RX_1BUF_CAPABLE; ++ *aflags |= IXGBE_FLAG_RX_PS_CAPABLE; ++ break; ++ case IXGBE_RXBUFMODE_PS_ALWAYS: ++ *aflags |= IXGBE_FLAG_RX_PS_CAPABLE; ++ break; ++ case IXGBE_RXBUFMODE_1BUF_ALWAYS: ++ *aflags |= IXGBE_FLAG_RX_1BUF_CAPABLE; ++ break; ++ default: ++ break; ++ } ++#ifdef module_param_array ++ } else { ++ *aflags |= IXGBE_FLAG_RX_1BUF_CAPABLE; ++ *aflags |= IXGBE_FLAG_RX_PS_CAPABLE; ++ } ++#endif ++ } ++#ifdef HAVE_TX_MQ ++ { /* Flow Director filtering mode */ ++ unsigned int fdir_filter_mode; ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Flow Director filtering mode", ++ .err = "using default of " ++ __MODULE_STRING(IXGBE_DEFAULT_FDIR_FILTER), ++ .def = IXGBE_DEFAULT_FDIR_FILTER, ++ .arg = {.r = {.min = IXGBE_FDIR_FILTER_OFF, ++ .max = IXGBE_FDIR_FILTER_PERFECT}} ++ }; ++ ++ *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ if (adapter->hw.mac.type == ixgbe_mac_82598EB) ++ goto no_flow_director; ++ if (num_FdirMode > bd) { ++ fdir_filter_mode = FdirMode[bd]; ++ ixgbe_validate_option(&fdir_filter_mode, &opt); ++ ++ switch (fdir_filter_mode) { ++ case IXGBE_FDIR_FILTER_OFF: ++ DPRINTK(PROBE, INFO, "Flow Director disabled\n"); ++ break; ++ case IXGBE_FDIR_FILTER_HASH: ++ *aflags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ feature[RING_F_FDIR].indices = ++ IXGBE_MAX_FDIR_INDICES; ++ DPRINTK(PROBE, INFO, ++ "Flow Director hash filtering enabled\n"); ++ break; ++ case IXGBE_FDIR_FILTER_PERFECT: ++#ifdef NETIF_F_NTUPLE ++ *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ *aflags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ feature[RING_F_FDIR].indices = ++ IXGBE_MAX_FDIR_INDICES; ++ DPRINTK(PROBE, INFO, ++ "Flow Director perfect filtering enabled\n"); ++#else /* NETIF_F_NTUPLE */ ++ DPRINTK(PROBE, INFO, "No ethtool support for " ++ "Flow Director perfect filtering. " ++ "Defaulting to hash filtering.\n"); ++ *aflags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ feature[RING_F_FDIR].indices = ++ IXGBE_MAX_FDIR_INDICES; ++#endif /* NETIF_F_NTUPLE */ ++ break; ++ default: ++ break; ++ } ++ } else { ++ if (opt.def == IXGBE_FDIR_FILTER_OFF) { ++ *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ feature[RING_F_FDIR].indices = 0; ++ DPRINTK(PROBE, INFO, ++ "Flow Director hash filtering disabled\n"); ++ } else { ++ *aflags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ feature[RING_F_FDIR].indices = IXGBE_MAX_FDIR_INDICES; ++ DPRINTK(PROBE, INFO, ++ "Flow Director hash filtering enabled\n"); ++ } ++ } ++ /* Check interoperability */ ++ if ((*aflags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || ++ (*aflags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { ++ if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "Flow Director is not supported " ++ "while multiple queues are disabled. " ++ "Disabling Flow Director\n"); ++ *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ } ++ } ++no_flow_director: ++ /* empty code line with semi-colon */ ; ++ } ++ { /* Flow Director packet buffer allocation */ ++ unsigned int fdir_pballoc_mode; ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Flow Director packet buffer allocation", ++ .err = "using default of " ++ __MODULE_STRING(IXGBE_DEFAULT_FDIR_PBALLOC), ++ .def = IXGBE_DEFAULT_FDIR_PBALLOC, ++ .arg = {.r = {.min = IXGBE_FDIR_PBALLOC_64K, ++ .max = IXGBE_FDIR_PBALLOC_256K}} ++ }; ++ char pstring[10]; ++ ++ if ((adapter->hw.mac.type == ixgbe_mac_82598EB) || ++ (!(*aflags & (IXGBE_FLAG_FDIR_HASH_CAPABLE | ++ IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) ++ goto no_fdir_pballoc; ++ if (num_FdirPballoc > bd) { ++ fdir_pballoc_mode = FdirPballoc[bd]; ++ ixgbe_validate_option(&fdir_pballoc_mode, &opt); ++ switch (fdir_pballoc_mode) { ++ case IXGBE_FDIR_PBALLOC_64K: ++ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; ++ sprintf(pstring, "64kB"); ++ break; ++ case IXGBE_FDIR_PBALLOC_128K: ++ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_128K; ++ sprintf(pstring, "128kB"); ++ break; ++ case IXGBE_FDIR_PBALLOC_256K: ++ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_256K; ++ sprintf(pstring, "256kB"); ++ break; ++ default: ++ break; ++ } ++ DPRINTK(PROBE, INFO, ++ "Flow Director allocated %s of packet buffer\n", ++ pstring); ++ } else { ++ adapter->fdir_pballoc = opt.def; ++ DPRINTK(PROBE, INFO, ++ "Flow Director allocated 64kB of packet buffer\n"); ++ } ++no_fdir_pballoc: ++ /* empty code line with semi-colon */ ; ++ } ++ { /* Flow Director ATR Tx sample packet rate */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Software ATR Tx packet sample rate", ++ .err = "using default of " ++ __MODULE_STRING(IXGBE_DEFAULT_ATR_SAMPLE_RATE), ++ .def = IXGBE_DEFAULT_ATR_SAMPLE_RATE, ++ .arg = {.r = {.min = IXGBE_ATR_SAMPLE_RATE_OFF, ++ .max = IXGBE_MAX_ATR_SAMPLE_RATE}} ++ }; ++ static const char atr_string[] = ++ "ATR Tx Packet sample rate set to"; ++ ++ adapter->atr_sample_rate = IXGBE_ATR_SAMPLE_RATE_OFF; ++ if (adapter->hw.mac.type == ixgbe_mac_82598EB) ++ goto no_fdir_sample; ++ ++ /* no sample rate for perfect filtering */ ++ if (*aflags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) ++ goto no_fdir_sample; ++ if (num_AtrSampleRate > bd) { ++ /* Only enable the sample rate if hashing (ATR) is on */ ++ if (*aflags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ++ adapter->atr_sample_rate = AtrSampleRate[bd]; ++ ++ if (adapter->atr_sample_rate) { ++ ixgbe_validate_option(&adapter->atr_sample_rate, ++ &opt); ++ DPRINTK(PROBE, INFO, "%s %d\n", atr_string, ++ adapter->atr_sample_rate); ++ } ++ } else { ++ /* Only enable the sample rate if hashing (ATR) is on */ ++ if (*aflags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ++ adapter->atr_sample_rate = opt.def; ++ ++ DPRINTK(PROBE, INFO, "%s default of %d\n", atr_string, ++ adapter->atr_sample_rate); ++ } ++no_fdir_sample: ++ /* empty code line with semi-colon */ ; ++ } ++#endif /* HAVE_TX_MQ */ ++#ifdef IXGBE_FCOE ++ { ++ *aflags &= ~IXGBE_FLAG_FCOE_CAPABLE; ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: { ++ struct ixgbe_option opt = { ++ .type = enable_option, ++ .name = "Enabled/Disable FCoE offload", ++ .err = "defaulting to Enabled", ++ .def = OPTION_ENABLED ++ }; ++#ifdef module_param_array ++ if (num_FCoE > bd) { ++#endif ++ unsigned int fcoe = FCoE[bd]; ++ ++ ixgbe_validate_option(&fcoe, &opt); ++ if (fcoe) ++ *aflags |= IXGBE_FLAG_FCOE_CAPABLE; ++#ifdef module_param_array ++ } else { ++ if (opt.def == OPTION_ENABLED) ++ *aflags |= IXGBE_FLAG_FCOE_CAPABLE; ++ } ++#endif ++#ifdef CONFIG_PCI_IOV ++ if (*aflags & IXGBE_FLAG_SRIOV_ENABLED) ++ *aflags &= ~IXGBE_FLAG_FCOE_CAPABLE; ++#endif ++ DPRINTK(PROBE, INFO, "FCoE Offload feature %sabled\n", ++ (*aflags & IXGBE_FLAG_FCOE_CAPABLE) ? ++ "en" : "dis"); ++ } ++ break; ++ default: ++ break; ++ } ++ } ++#endif /* IXGBE_FCOE */ ++ { /* Node assignment */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Node to start on", ++#ifdef HAVE_EARLY_VMALLOC_NODE ++ .err = "defaulting to 0", ++ .def = 0, ++#else ++ .err = "defaulting to -1", ++ .def = -1, ++#endif ++ .arg = { .r = { .min = 0, ++ .max = (MAX_NUMNODES - 1)}} ++ }; ++ int node_param = opt.def; ++ ++ /* if the default was zero then we need to set the ++ * default value to an online node, which is not ++ * necessarily zero, and the constant initializer ++ * above can't take first_online_node */ ++ if (node_param == 0) ++ /* must set opt.def for validate */ ++ opt.def = node_param = first_online_node; ++#ifdef module_param_array ++ if (num_Node > bd) { ++#endif ++ node_param = Node[bd]; ++ ixgbe_validate_option((uint *)&node_param, &opt); ++ ++ if (node_param != OPTION_UNSET) { ++ DPRINTK(PROBE, INFO, "node set to %d\n", node_param); ++ } ++#ifdef module_param_array ++ } ++#endif ++ /* check sanity of the value */ ++ if (node_param != -1 && !node_online(node_param)) { ++ DPRINTK(PROBE, INFO, ++ "ignoring node set to invalid value %d\n", ++ node_param); ++ node_param = opt.def; ++ } ++ ++ adapter->node = node_param; ++ } ++} ++ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_phy.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_phy.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_phy.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_phy.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,1734 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe_api.h" ++#include "ixgbe_common.h" ++#include "ixgbe_phy.h" ++ ++static void ixgbe_i2c_start(struct ixgbe_hw *hw); ++static void ixgbe_i2c_stop(struct ixgbe_hw *hw); ++static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); ++static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); ++static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); ++static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); ++static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); ++static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); ++static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); ++static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); ++static bool ixgbe_get_i2c_data(u32 *i2cctl); ++void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); ++ ++/** ++ * ixgbe_init_phy_ops_generic - Inits PHY function ptrs ++ * @hw: pointer to the hardware structure ++ * ++ * Initialize the function pointers. ++ **/ ++s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_phy_info *phy = &hw->phy; ++ ++ /* PHY */ ++ phy->ops.identify = &ixgbe_identify_phy_generic; ++ phy->ops.reset = &ixgbe_reset_phy_generic; ++ phy->ops.read_reg = &ixgbe_read_phy_reg_generic; ++ phy->ops.write_reg = &ixgbe_write_phy_reg_generic; ++ phy->ops.setup_link = &ixgbe_setup_phy_link_generic; ++ phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic; ++ phy->ops.check_link = NULL; ++ phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic; ++ phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic; ++ phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic; ++ phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic; ++ phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic; ++ phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear; ++ phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic; ++ phy->sfp_type = ixgbe_sfp_type_unknown; ++ phy->ops.check_overtemp = &ixgbe_tn_check_overtemp; ++ return 0; ++} ++ ++/** ++ * ixgbe_identify_phy_generic - Get physical layer module ++ * @hw: pointer to hardware structure ++ * ++ * Determines the physical layer module found on the current adapter. ++ **/ ++s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_ERR_PHY_ADDR_INVALID; ++ u32 phy_addr; ++ u16 ext_ability = 0; ++ ++ if (hw->phy.type == ixgbe_phy_unknown) { ++ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { ++ if (ixgbe_validate_phy_addr(hw, phy_addr)) { ++ hw->phy.addr = phy_addr; ++ ixgbe_get_phy_id(hw); ++ hw->phy.type = ++ ixgbe_get_phy_type_from_id(hw->phy.id); ++ ++ if (hw->phy.type == ixgbe_phy_unknown) { ++ hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_PHY_EXT_ABILITY, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, ++ &ext_ability); ++ if (ext_ability & ++ IXGBE_MDIO_PHY_10GBASET_ABILITY || ++ ext_ability & ++ IXGBE_MDIO_PHY_1000BASET_ABILITY) ++ hw->phy.type = ++ ixgbe_phy_cu_unknown; ++ else ++ hw->phy.type = ++ ixgbe_phy_generic; ++ } ++ ++ status = 0; ++ break; ++ } ++ } ++ if (status != 0) ++ hw->phy.addr = 0; ++ } else { ++ status = 0; ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_validate_phy_addr - Determines phy address is valid ++ * @hw: pointer to hardware structure ++ * ++ **/ ++bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) ++{ ++ u16 phy_id = 0; ++ bool valid = false; ++ ++ hw->phy.addr = phy_addr; ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); ++ ++ if (phy_id != 0xFFFF && phy_id != 0x0) ++ valid = true; ++ ++ return valid; ++} ++ ++/** ++ * ixgbe_get_phy_id - Get the phy type ++ * @hw: pointer to hardware structure ++ * ++ **/ ++s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) ++{ ++ u32 status; ++ u16 phy_id_high = 0; ++ u16 phy_id_low = 0; ++ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, ++ &phy_id_high); ++ ++ if (status == 0) { ++ hw->phy.id = (u32)(phy_id_high << 16); ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, ++ &phy_id_low); ++ hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); ++ hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); ++ } ++ return status; ++} ++ ++/** ++ * ixgbe_get_phy_type_from_id - Get the phy type ++ * @hw: pointer to hardware structure ++ * ++ **/ ++enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) ++{ ++ enum ixgbe_phy_type phy_type; ++ ++ switch (phy_id) { ++ case TN1010_PHY_ID: ++ phy_type = ixgbe_phy_tn; ++ break; ++ case AQ1002_PHY_ID: ++ phy_type = ixgbe_phy_aq; ++ break; ++ case QT2022_PHY_ID: ++ phy_type = ixgbe_phy_qt; ++ break; ++ case ATH_PHY_ID: ++ phy_type = ixgbe_phy_nl; ++ break; ++ default: ++ phy_type = ixgbe_phy_unknown; ++ break; ++ } ++ ++ hw_dbg(hw, "phy type found is %d\n", phy_type); ++ return phy_type; ++} ++ ++/** ++ * ixgbe_reset_phy_generic - Performs a PHY reset ++ * @hw: pointer to hardware structure ++ **/ ++s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) ++{ ++ u32 i; ++ u16 ctrl = 0; ++ s32 status = 0; ++ ++ if (hw->phy.type == ixgbe_phy_unknown) ++ status = ixgbe_identify_phy_generic(hw); ++ ++ if (status != 0 || hw->phy.type == ixgbe_phy_none) ++ goto out; ++ ++ if (!hw->phy.reset_if_overtemp && ++ (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) { ++ /* Don't reset PHY if it's shut down due to overtemp. */ ++ goto out; ++ } ++ ++ /* ++ * Perform soft PHY reset to the PHY_XS. ++ * This will cause a soft reset to the PHY ++ */ ++ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, ++ IXGBE_MDIO_PHY_XS_DEV_TYPE, ++ IXGBE_MDIO_PHY_XS_RESET); ++ ++ /* ++ * Poll for reset bit to self-clear indicating reset is complete. ++ * Some PHYs could take up to 3 seconds to complete and need about ++ * 1.7 usec delay after the reset is complete. ++ */ ++ for (i = 0; i < 30; i++) { ++ msleep(100); ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, ++ IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl); ++ if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) { ++ udelay(2); ++ break; ++ } ++ } ++ ++ if (ctrl & IXGBE_MDIO_PHY_XS_RESET) { ++ status = IXGBE_ERR_RESET_FAILED; ++ hw_dbg(hw, "PHY reset polling failed to complete.\n"); ++ } ++ ++out: ++ return status; ++} ++ ++/** ++ * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit address of PHY register to read ++ * @phy_data: Pointer to read data from PHY register ++ **/ ++s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 *phy_data) ++{ ++ u32 command; ++ u32 i; ++ u32 data; ++ s32 status = 0; ++ u16 gssr; ++ ++ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) ++ gssr = IXGBE_GSSR_PHY1_SM; ++ else ++ gssr = IXGBE_GSSR_PHY0_SM; ++ ++ if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) ++ status = IXGBE_ERR_SWFW_SYNC; ++ ++ if (status == 0) { ++ /* Setup and write the address cycle command */ ++ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | ++ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | ++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); ++ ++ /* ++ * Check every 10 usec to see if the address cycle completed. ++ * The MDI Command bit will clear when the operation is ++ * complete ++ */ ++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { ++ udelay(10); ++ ++ command = IXGBE_READ_REG(hw, IXGBE_MSCA); ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) ++ break; ++ } ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ++ hw_dbg(hw, "PHY address command did not complete.\n"); ++ status = IXGBE_ERR_PHY; ++ } ++ ++ if (status == 0) { ++ /* ++ * Address cycle complete, setup and write the read ++ * command ++ */ ++ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | ++ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | ++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); ++ ++ /* ++ * Check every 10 usec to see if the address cycle ++ * completed. The MDI Command bit will clear when the ++ * operation is complete ++ */ ++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { ++ udelay(10); ++ ++ command = IXGBE_READ_REG(hw, IXGBE_MSCA); ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) ++ break; ++ } ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ++ hw_dbg(hw, "PHY read command didn't complete\n"); ++ status = IXGBE_ERR_PHY; ++ } else { ++ /* ++ * Read operation is complete. Get the data ++ * from MSRWD ++ */ ++ data = IXGBE_READ_REG(hw, IXGBE_MSRWD); ++ data >>= IXGBE_MSRWD_READ_DATA_SHIFT; ++ *phy_data = (u16)(data); ++ } ++ } ++ ++ ixgbe_release_swfw_sync(hw, gssr); ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @device_type: 5 bit device type ++ * @phy_data: Data to write to the PHY register ++ **/ ++s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 phy_data) ++{ ++ u32 command; ++ u32 i; ++ s32 status = 0; ++ u16 gssr; ++ ++ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) ++ gssr = IXGBE_GSSR_PHY1_SM; ++ else ++ gssr = IXGBE_GSSR_PHY0_SM; ++ ++ if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) ++ status = IXGBE_ERR_SWFW_SYNC; ++ ++ if (status == 0) { ++ /* Put the data in the MDI single read and write data register*/ ++ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); ++ ++ /* Setup and write the address cycle command */ ++ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | ++ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | ++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); ++ ++ /* ++ * Check every 10 usec to see if the address cycle completed. ++ * The MDI Command bit will clear when the operation is ++ * complete ++ */ ++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { ++ udelay(10); ++ ++ command = IXGBE_READ_REG(hw, IXGBE_MSCA); ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) ++ break; ++ } ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ++ hw_dbg(hw, "PHY address cmd didn't complete\n"); ++ status = IXGBE_ERR_PHY; ++ } ++ ++ if (status == 0) { ++ /* ++ * Address cycle complete, setup and write the write ++ * command ++ */ ++ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | ++ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | ++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); ++ ++ /* ++ * Check every 10 usec to see if the address cycle ++ * completed. The MDI Command bit will clear when the ++ * operation is complete ++ */ ++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { ++ udelay(10); ++ ++ command = IXGBE_READ_REG(hw, IXGBE_MSCA); ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) ++ break; ++ } ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ++ hw_dbg(hw, "PHY address cmd didn't complete\n"); ++ status = IXGBE_ERR_PHY; ++ } ++ } ++ ++ ixgbe_release_swfw_sync(hw, gssr); ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_setup_phy_link_generic - Set and restart autoneg ++ * @hw: pointer to hardware structure ++ * ++ * Restart autonegotiation and PHY and waits for completion. ++ **/ ++s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ u32 time_out; ++ u32 max_time_out = 10; ++ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; ++ bool autoneg = false; ++ ixgbe_link_speed speed; ++ ++ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); ++ ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) { ++ /* Set or unset auto-negotiation 10G advertisement */ ++ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) ++ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; ++ ++ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ autoneg_reg); ++ } ++ ++ if (speed & IXGBE_LINK_SPEED_1GB_FULL) { ++ /* Set or unset auto-negotiation 1G advertisement */ ++ hw->phy.ops.read_reg(hw, ++ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) ++ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; ++ ++ hw->phy.ops.write_reg(hw, ++ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ autoneg_reg); ++ } ++ ++ if (speed & IXGBE_LINK_SPEED_100_FULL) { ++ /* Set or unset auto-negotiation 100M advertisement */ ++ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) ++ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; ++ ++ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ autoneg_reg); ++ } ++ ++ /* Restart PHY autonegotiation and wait for completion */ ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); ++ ++ autoneg_reg |= IXGBE_MII_RESTART; ++ ++ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); ++ ++ /* Wait for autonegotiation to finish */ ++ for (time_out = 0; time_out < max_time_out; time_out++) { ++ udelay(10); ++ /* Restart PHY autonegotiation and wait for completion */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; ++ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) { ++ break; ++ } ++ } ++ ++ if (time_out == max_time_out) { ++ status = IXGBE_ERR_LINK_SETUP; ++ hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * @autoneg: true if autonegotiation enabled ++ **/ ++s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg, ++ bool autoneg_wait_to_complete) ++{ ++ ++ /* ++ * Clear autoneg_advertised and set new values based on input link ++ * speed. ++ */ ++ hw->phy.autoneg_advertised = 0; ++ ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; ++ ++ if (speed & IXGBE_LINK_SPEED_1GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; ++ ++ if (speed & IXGBE_LINK_SPEED_100_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; ++ ++ /* Setup link based on the new speed settings */ ++ hw->phy.ops.setup_link(hw); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities ++ * @hw: pointer to hardware structure ++ * @speed: pointer to link speed ++ * @autoneg: boolean auto-negotiation value ++ * ++ * Determines the link capabilities by reading the AUTOC register. ++ **/ ++s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *autoneg) ++{ ++ s32 status = IXGBE_ERR_LINK_SETUP; ++ u16 speed_ability; ++ ++ *speed = 0; ++ *autoneg = true; ++ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, ++ &speed_ability); ++ ++ if (status == 0) { ++ if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) ++ *speed |= IXGBE_LINK_SPEED_10GB_FULL; ++ if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) ++ *speed |= IXGBE_LINK_SPEED_1GB_FULL; ++ if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M) ++ *speed |= IXGBE_LINK_SPEED_100_FULL; ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_check_phy_link_tnx - Determine link and speed status ++ * @hw: pointer to hardware structure ++ * ++ * Reads the VS1 register to determine if link is up and the current speed for ++ * the PHY. ++ **/ ++s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up) ++{ ++ s32 status = 0; ++ u32 time_out; ++ u32 max_time_out = 10; ++ u16 phy_link = 0; ++ u16 phy_speed = 0; ++ u16 phy_data = 0; ++ ++ /* Initialize speed and link to default case */ ++ *link_up = false; ++ *speed = IXGBE_LINK_SPEED_10GB_FULL; ++ ++ /* ++ * Check current speed and link status of the PHY register. ++ * This is a vendor specific register and may have to ++ * be changed for other copper PHYs. ++ */ ++ for (time_out = 0; time_out < max_time_out; time_out++) { ++ udelay(10); ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ &phy_data); ++ phy_link = phy_data & ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; ++ phy_speed = phy_data & ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; ++ if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { ++ *link_up = true; ++ if (phy_speed == ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ } ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_setup_phy_link_tnx - Set and restart autoneg ++ * @hw: pointer to hardware structure ++ * ++ * Restart autonegotiation and PHY and waits for completion. ++ **/ ++s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ u32 time_out; ++ u32 max_time_out = 10; ++ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; ++ bool autoneg = false; ++ ixgbe_link_speed speed; ++ ++ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); ++ ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) { ++ /* Set or unset auto-negotiation 10G advertisement */ ++ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) ++ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; ++ ++ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ autoneg_reg); ++ } ++ ++ if (speed & IXGBE_LINK_SPEED_1GB_FULL) { ++ /* Set or unset auto-negotiation 1G advertisement */ ++ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) ++ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; ++ ++ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ autoneg_reg); ++ } ++ ++ if (speed & IXGBE_LINK_SPEED_100_FULL) { ++ /* Set or unset auto-negotiation 100M advertisement */ ++ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) ++ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; ++ ++ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ autoneg_reg); ++ } ++ ++ /* Restart PHY autonegotiation and wait for completion */ ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); ++ ++ autoneg_reg |= IXGBE_MII_RESTART; ++ ++ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); ++ ++ /* Wait for autonegotiation to finish */ ++ for (time_out = 0; time_out < max_time_out; time_out++) { ++ udelay(10); ++ /* Restart PHY autonegotiation and wait for completion */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; ++ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) { ++ break; ++ } ++ } ++ ++ if (time_out == max_time_out) { ++ status = IXGBE_ERR_LINK_SETUP; ++ hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); ++ } ++ ++ return status; ++} ++ ++ ++/** ++ * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version ++ * @hw: pointer to hardware structure ++ * @firmware_version: pointer to the PHY Firmware Version ++ **/ ++s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, ++ u16 *firmware_version) ++{ ++ s32 status = 0; ++ ++ status = hw->phy.ops.read_reg(hw, TNX_FW_REV, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ firmware_version); ++ ++ return status; ++} ++ ++ ++/** ++ * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version ++ * @hw: pointer to hardware structure ++ * @firmware_version: pointer to the PHY Firmware Version ++ **/ ++s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, ++ u16 *firmware_version) ++{ ++ s32 status = 0; ++ ++ status = hw->phy.ops.read_reg(hw, AQ_FW_REV, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ firmware_version); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_reset_phy_nl - Performs a PHY reset ++ * @hw: pointer to hardware structure ++ **/ ++s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) ++{ ++ u16 phy_offset, control, eword, edata, block_crc; ++ bool end_data = false; ++ u16 list_offset, data_offset; ++ u16 phy_data = 0; ++ s32 ret_val = 0; ++ u32 i; ++ ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, ++ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); ++ ++ /* reset the PHY and poll for completion */ ++ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, ++ IXGBE_MDIO_PHY_XS_DEV_TYPE, ++ (phy_data | IXGBE_MDIO_PHY_XS_RESET)); ++ ++ for (i = 0; i < 100; i++) { ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, ++ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); ++ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) ++ break; ++ msleep(10); ++ } ++ ++ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { ++ hw_dbg(hw, "PHY reset did not complete.\n"); ++ ret_val = IXGBE_ERR_PHY; ++ goto out; ++ } ++ ++ /* Get init offsets */ ++ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, ++ &data_offset); ++ if (ret_val != 0) ++ goto out; ++ ++ ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); ++ data_offset++; ++ while (!end_data) { ++ /* ++ * Read control word from PHY init contents offset ++ */ ++ ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); ++ control = (eword & IXGBE_CONTROL_MASK_NL) >> ++ IXGBE_CONTROL_SHIFT_NL; ++ edata = eword & IXGBE_DATA_MASK_NL; ++ switch (control) { ++ case IXGBE_DELAY_NL: ++ data_offset++; ++ hw_dbg(hw, "DELAY: %d MS\n", edata); ++ msleep(edata); ++ break; ++ case IXGBE_DATA_NL: ++ hw_dbg(hw, "DATA: \n"); ++ data_offset++; ++ hw->eeprom.ops.read(hw, data_offset++, ++ &phy_offset); ++ for (i = 0; i < edata; i++) { ++ hw->eeprom.ops.read(hw, data_offset, &eword); ++ hw->phy.ops.write_reg(hw, phy_offset, ++ IXGBE_TWINAX_DEV, eword); ++ hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, ++ phy_offset); ++ data_offset++; ++ phy_offset++; ++ } ++ break; ++ case IXGBE_CONTROL_NL: ++ data_offset++; ++ hw_dbg(hw, "CONTROL: \n"); ++ if (edata == IXGBE_CONTROL_EOL_NL) { ++ hw_dbg(hw, "EOL\n"); ++ end_data = true; ++ } else if (edata == IXGBE_CONTROL_SOL_NL) { ++ hw_dbg(hw, "SOL\n"); ++ } else { ++ hw_dbg(hw, "Bad control value\n"); ++ ret_val = IXGBE_ERR_PHY; ++ goto out; ++ } ++ break; ++ default: ++ hw_dbg(hw, "Bad control type\n"); ++ ret_val = IXGBE_ERR_PHY; ++ goto out; ++ } ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_identify_sfp_module_generic - Identifies SFP modules ++ * @hw: pointer to hardware structure ++ * ++ * Searches for and identifies the SFP module and assigns appropriate PHY type. ++ **/ ++s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_ERR_PHY_ADDR_INVALID; ++ u32 vendor_oui = 0; ++ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; ++ u8 identifier = 0; ++ u8 comp_codes_1g = 0; ++ u8 comp_codes_10g = 0; ++ u8 oui_bytes[3] = {0, 0, 0}; ++ u8 cable_tech = 0; ++ u8 cable_spec = 0; ++ u16 enforce_sfp = 0; ++ ++ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { ++ hw->phy.sfp_type = ixgbe_sfp_type_not_present; ++ status = IXGBE_ERR_SFP_NOT_PRESENT; ++ goto out; ++ } ++ ++ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, ++ &identifier); ++ ++ if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) { ++ status = IXGBE_ERR_SFP_NOT_PRESENT; ++ hw->phy.sfp_type = ixgbe_sfp_type_not_present; ++ if (hw->phy.type != ixgbe_phy_nl) { ++ hw->phy.id = 0; ++ hw->phy.type = ixgbe_phy_unknown; ++ } ++ goto out; ++ } ++ ++ /* LAN ID is needed for sfp_type determination */ ++ hw->mac.ops.set_lan_id(hw); ++ ++ if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { ++ hw->phy.type = ixgbe_phy_sfp_unsupported; ++ status = IXGBE_ERR_SFP_NOT_SUPPORTED; ++ } else { ++ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, ++ &comp_codes_1g); ++ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, ++ &comp_codes_10g); ++ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, ++ &cable_tech); ++ ++ /* ID Module ++ * ========= ++ * 0 SFP_DA_CU ++ * 1 SFP_SR ++ * 2 SFP_LR ++ * 3 SFP_DA_CORE0 - 82599-specific ++ * 4 SFP_DA_CORE1 - 82599-specific ++ * 5 SFP_SR/LR_CORE0 - 82599-specific ++ * 6 SFP_SR/LR_CORE1 - 82599-specific ++ * 7 SFP_act_lmt_DA_CORE0 - 82599-specific ++ * 8 SFP_act_lmt_DA_CORE1 - 82599-specific ++ * 9 SFP_1g_cu_CORE0 - 82599-specific ++ * 10 SFP_1g_cu_CORE1 - 82599-specific ++ */ ++ if (hw->mac.type == ixgbe_mac_82598EB) { ++ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) ++ hw->phy.sfp_type = ixgbe_sfp_type_da_cu; ++ else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) ++ hw->phy.sfp_type = ixgbe_sfp_type_sr; ++ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) ++ hw->phy.sfp_type = ixgbe_sfp_type_lr; ++ else ++ hw->phy.sfp_type = ixgbe_sfp_type_unknown; ++ } else if (hw->mac.type == ixgbe_mac_82599EB) { ++ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { ++ if (hw->bus.lan_id == 0) ++ hw->phy.sfp_type = ++ ixgbe_sfp_type_da_cu_core0; ++ else ++ hw->phy.sfp_type = ++ ixgbe_sfp_type_da_cu_core1; ++ } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { ++ hw->phy.ops.read_i2c_eeprom( ++ hw, IXGBE_SFF_CABLE_SPEC_COMP, ++ &cable_spec); ++ if (cable_spec & ++ IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { ++ if (hw->bus.lan_id == 0) ++ hw->phy.sfp_type = ++ ixgbe_sfp_type_da_act_lmt_core0; ++ else ++ hw->phy.sfp_type = ++ ixgbe_sfp_type_da_act_lmt_core1; ++ } else ++ hw->phy.sfp_type = ++ ixgbe_sfp_type_unknown; ++ } else if (comp_codes_10g & ++ (IXGBE_SFF_10GBASESR_CAPABLE | ++ IXGBE_SFF_10GBASELR_CAPABLE)) { ++ if (hw->bus.lan_id == 0) ++ hw->phy.sfp_type = ++ ixgbe_sfp_type_srlr_core0; ++ else ++ hw->phy.sfp_type = ++ ixgbe_sfp_type_srlr_core1; ++ } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { ++ if (hw->bus.lan_id == 0) ++ hw->phy.sfp_type = ++ ixgbe_sfp_type_1g_cu_core0; ++ else ++ hw->phy.sfp_type = ++ ixgbe_sfp_type_1g_cu_core1; ++ } else { ++ hw->phy.sfp_type = ixgbe_sfp_type_unknown; ++ } ++ } ++ ++ if (hw->phy.sfp_type != stored_sfp_type) ++ hw->phy.sfp_setup_needed = true; ++ ++ /* Determine if the SFP+ PHY is dual speed or not. */ ++ hw->phy.multispeed_fiber = false; ++ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && ++ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || ++ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && ++ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) ++ hw->phy.multispeed_fiber = true; ++ ++ /* Determine PHY vendor */ ++ if (hw->phy.type != ixgbe_phy_nl) { ++ hw->phy.id = identifier; ++ hw->phy.ops.read_i2c_eeprom(hw, ++ IXGBE_SFF_VENDOR_OUI_BYTE0, ++ &oui_bytes[0]); ++ hw->phy.ops.read_i2c_eeprom(hw, ++ IXGBE_SFF_VENDOR_OUI_BYTE1, ++ &oui_bytes[1]); ++ hw->phy.ops.read_i2c_eeprom(hw, ++ IXGBE_SFF_VENDOR_OUI_BYTE2, ++ &oui_bytes[2]); ++ ++ vendor_oui = ++ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | ++ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | ++ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); ++ ++ switch (vendor_oui) { ++ case IXGBE_SFF_VENDOR_OUI_TYCO: ++ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) ++ hw->phy.type = ++ ixgbe_phy_sfp_passive_tyco; ++ break; ++ case IXGBE_SFF_VENDOR_OUI_FTL: ++ if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) ++ hw->phy.type = ixgbe_phy_sfp_ftl_active; ++ else ++ hw->phy.type = ixgbe_phy_sfp_ftl; ++ break; ++ case IXGBE_SFF_VENDOR_OUI_AVAGO: ++ hw->phy.type = ixgbe_phy_sfp_avago; ++ break; ++ case IXGBE_SFF_VENDOR_OUI_INTEL: ++ hw->phy.type = ixgbe_phy_sfp_intel; ++ break; ++ default: ++ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) ++ hw->phy.type = ++ ixgbe_phy_sfp_passive_unknown; ++ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) ++ hw->phy.type = ++ ixgbe_phy_sfp_active_unknown; ++ else ++ hw->phy.type = ixgbe_phy_sfp_unknown; ++ break; ++ } ++ } ++ ++ /* Allow any DA cable vendor */ ++ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | ++ IXGBE_SFF_DA_ACTIVE_CABLE)) { ++ status = 0; ++ goto out; ++ } ++ ++ /* Verify supporteed 1G SFP modules */ ++ if (comp_codes_10g == 0 && ++ !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || ++ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) { ++ hw->phy.type = ixgbe_phy_sfp_unsupported; ++ status = IXGBE_ERR_SFP_NOT_SUPPORTED; ++ goto out; ++ } ++ ++ /* Anything else 82598-based is supported */ ++ if (hw->mac.type == ixgbe_mac_82598EB) { ++ status = 0; ++ goto out; ++ } ++ ++ ixgbe_get_device_caps(hw, &enforce_sfp); ++ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && ++ !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || ++ (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) { ++ /* Make sure we're a supported PHY type */ ++ if (hw->phy.type == ixgbe_phy_sfp_intel) { ++ status = 0; ++ } else { ++ hw_dbg(hw, "SFP+ module not supported\n"); ++ hw->phy.type = ixgbe_phy_sfp_unsupported; ++ status = IXGBE_ERR_SFP_NOT_SUPPORTED; ++ } ++ } else { ++ status = 0; ++ } ++ } ++ ++out: ++ return status; ++} ++ ++/** ++ * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence ++ * @hw: pointer to hardware structure ++ * @list_offset: offset to the SFP ID list ++ * @data_offset: offset to the SFP data block ++ * ++ * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if ++ * so it returns the offsets to the phy init sequence block. ++ **/ ++s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, ++ u16 *list_offset, ++ u16 *data_offset) ++{ ++ u16 sfp_id; ++ u16 sfp_type = hw->phy.sfp_type; ++ ++ if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) ++ return IXGBE_ERR_SFP_NOT_SUPPORTED; ++ ++ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) ++ return IXGBE_ERR_SFP_NOT_PRESENT; ++ ++ if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && ++ (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) ++ return IXGBE_ERR_SFP_NOT_SUPPORTED; ++ ++ /* ++ * Limiting active cables and 1G Phys must be initialized as ++ * SR modules ++ */ ++ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || ++ sfp_type == ixgbe_sfp_type_1g_cu_core0) ++ sfp_type = ixgbe_sfp_type_srlr_core0; ++ else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || ++ sfp_type == ixgbe_sfp_type_1g_cu_core1) ++ sfp_type = ixgbe_sfp_type_srlr_core1; ++ ++ /* Read offset to PHY init contents */ ++ hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset); ++ ++ if ((!*list_offset) || (*list_offset == 0xFFFF)) ++ return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; ++ ++ /* Shift offset to first ID word */ ++ (*list_offset)++; ++ ++ /* ++ * Find the matching SFP ID in the EEPROM ++ * and program the init sequence ++ */ ++ hw->eeprom.ops.read(hw, *list_offset, &sfp_id); ++ ++ while (sfp_id != IXGBE_PHY_INIT_END_NL) { ++ if (sfp_id == sfp_type) { ++ (*list_offset)++; ++ hw->eeprom.ops.read(hw, *list_offset, data_offset); ++ if ((!*data_offset) || (*data_offset == 0xFFFF)) { ++ hw_dbg(hw, "SFP+ module not supported\n"); ++ return IXGBE_ERR_SFP_NOT_SUPPORTED; ++ } else { ++ break; ++ } ++ } else { ++ (*list_offset) += 2; ++ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) ++ return IXGBE_ERR_PHY; ++ } ++ } ++ ++ if (sfp_id == IXGBE_PHY_INIT_END_NL) { ++ hw_dbg(hw, "No matching SFP+ module found\n"); ++ return IXGBE_ERR_SFP_NOT_SUPPORTED; ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface ++ * @hw: pointer to hardware structure ++ * @byte_offset: EEPROM byte offset to read ++ * @eeprom_data: value read ++ * ++ * Performs byte read operation to SFP module's EEPROM over I2C interface. ++ **/ ++s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 *eeprom_data) ++{ ++ return hw->phy.ops.read_i2c_byte(hw, byte_offset, ++ IXGBE_I2C_EEPROM_DEV_ADDR, ++ eeprom_data); ++} ++ ++/** ++ * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface ++ * @hw: pointer to hardware structure ++ * @byte_offset: EEPROM byte offset to write ++ * @eeprom_data: value to write ++ * ++ * Performs byte write operation to SFP module's EEPROM over I2C interface. ++ **/ ++s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 eeprom_data) ++{ ++ return hw->phy.ops.write_i2c_byte(hw, byte_offset, ++ IXGBE_I2C_EEPROM_DEV_ADDR, ++ eeprom_data); ++} ++ ++/** ++ * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to read ++ * @data: value read ++ * ++ * Performs byte read operation to SFP module's EEPROM over I2C interface at ++ * a specified deivce address. ++ **/ ++s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data) ++{ ++ s32 status = 0; ++ u32 max_retry = 10; ++ u32 retry = 0; ++ u16 swfw_mask = 0; ++ bool nack = 1; ++ ++ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) ++ swfw_mask = IXGBE_GSSR_PHY1_SM; ++ else ++ swfw_mask = IXGBE_GSSR_PHY0_SM; ++ ++ ++ do { ++ if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) { ++ status = IXGBE_ERR_SWFW_SYNC; ++ goto read_byte_out; ++ } ++ ++ ixgbe_i2c_start(hw); ++ ++ /* Device Address and write indication */ ++ status = ixgbe_clock_out_i2c_byte(hw, dev_addr); ++ if (status != 0) ++ goto fail; ++ ++ status = ixgbe_get_i2c_ack(hw); ++ if (status != 0) ++ goto fail; ++ ++ status = ixgbe_clock_out_i2c_byte(hw, byte_offset); ++ if (status != 0) ++ goto fail; ++ ++ status = ixgbe_get_i2c_ack(hw); ++ if (status != 0) ++ goto fail; ++ ++ ixgbe_i2c_start(hw); ++ ++ /* Device Address and read indication */ ++ status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); ++ if (status != 0) ++ goto fail; ++ ++ status = ixgbe_get_i2c_ack(hw); ++ if (status != 0) ++ goto fail; ++ ++ status = ixgbe_clock_in_i2c_byte(hw, data); ++ if (status != 0) ++ goto fail; ++ ++ status = ixgbe_clock_out_i2c_bit(hw, nack); ++ if (status != 0) ++ goto fail; ++ ++ ixgbe_i2c_stop(hw); ++ break; ++ ++fail: ++ ixgbe_release_swfw_sync(hw, swfw_mask); ++ msleep(100); ++ ixgbe_i2c_bus_clear(hw); ++ retry++; ++ if (retry < max_retry) ++ hw_dbg(hw, "I2C byte read error - Retrying.\n"); ++ else ++ hw_dbg(hw, "I2C byte read error.\n"); ++ ++ } while (retry < max_retry); ++ ++ ixgbe_release_swfw_sync(hw, swfw_mask); ++ ++read_byte_out: ++ return status; ++} ++ ++/** ++ * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @data: value to write ++ * ++ * Performs byte write operation to SFP module's EEPROM over I2C interface at ++ * a specified device address. ++ **/ ++s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data) ++{ ++ s32 status = 0; ++ u32 max_retry = 1; ++ u32 retry = 0; ++ u16 swfw_mask = 0; ++ ++ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) ++ swfw_mask = IXGBE_GSSR_PHY1_SM; ++ else ++ swfw_mask = IXGBE_GSSR_PHY0_SM; ++ ++ if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) { ++ status = IXGBE_ERR_SWFW_SYNC; ++ goto write_byte_out; ++ } ++ ++ do { ++ ixgbe_i2c_start(hw); ++ ++ status = ixgbe_clock_out_i2c_byte(hw, dev_addr); ++ if (status != 0) ++ goto fail; ++ ++ status = ixgbe_get_i2c_ack(hw); ++ if (status != 0) ++ goto fail; ++ ++ status = ixgbe_clock_out_i2c_byte(hw, byte_offset); ++ if (status != 0) ++ goto fail; ++ ++ status = ixgbe_get_i2c_ack(hw); ++ if (status != 0) ++ goto fail; ++ ++ status = ixgbe_clock_out_i2c_byte(hw, data); ++ if (status != 0) ++ goto fail; ++ ++ status = ixgbe_get_i2c_ack(hw); ++ if (status != 0) ++ goto fail; ++ ++ ixgbe_i2c_stop(hw); ++ break; ++ ++fail: ++ ixgbe_i2c_bus_clear(hw); ++ retry++; ++ if (retry < max_retry) ++ hw_dbg(hw, "I2C byte write error - Retrying.\n"); ++ else ++ hw_dbg(hw, "I2C byte write error.\n"); ++ } while (retry < max_retry); ++ ++ ixgbe_release_swfw_sync(hw, swfw_mask); ++ ++write_byte_out: ++ return status; ++} ++ ++/** ++ * ixgbe_i2c_start - Sets I2C start condition ++ * @hw: pointer to hardware structure ++ * ++ * Sets I2C start condition (High -> Low on SDA while SCL is High) ++ **/ ++static void ixgbe_i2c_start(struct ixgbe_hw *hw) ++{ ++ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ ++ /* Start condition must begin with data and clock high */ ++ ixgbe_set_i2c_data(hw, &i2cctl, 1); ++ ixgbe_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Setup time for start condition (4.7us) */ ++ udelay(IXGBE_I2C_T_SU_STA); ++ ++ ixgbe_set_i2c_data(hw, &i2cctl, 0); ++ ++ /* Hold time for start condition (4us) */ ++ udelay(IXGBE_I2C_T_HD_STA); ++ ++ ixgbe_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us */ ++ udelay(IXGBE_I2C_T_LOW); ++ ++} ++ ++/** ++ * ixgbe_i2c_stop - Sets I2C stop condition ++ * @hw: pointer to hardware structure ++ * ++ * Sets I2C stop condition (Low -> High on SDA while SCL is High) ++ **/ ++static void ixgbe_i2c_stop(struct ixgbe_hw *hw) ++{ ++ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ ++ /* Stop condition must begin with data low and clock high */ ++ ixgbe_set_i2c_data(hw, &i2cctl, 0); ++ ixgbe_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Setup time for stop condition (4us) */ ++ udelay(IXGBE_I2C_T_SU_STO); ++ ++ ixgbe_set_i2c_data(hw, &i2cctl, 1); ++ ++ /* bus free time between stop and start (4.7us)*/ ++ udelay(IXGBE_I2C_T_BUF); ++} ++ ++/** ++ * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C ++ * @hw: pointer to hardware structure ++ * @data: data byte to clock in ++ * ++ * Clocks in one byte data via I2C data/clock ++ **/ ++static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) ++{ ++ s32 status = 0; ++ s32 i; ++ bool bit = 0; ++ ++ for (i = 7; i >= 0; i--) { ++ status = ixgbe_clock_in_i2c_bit(hw, &bit); ++ *data |= bit<= 0; i--) { ++ bit = (data >> i) & 0x1; ++ status = ixgbe_clock_out_i2c_bit(hw, bit); ++ ++ if (status != 0) ++ break; ++ } ++ ++ /* Release SDA line (set high) */ ++ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ i2cctl |= IXGBE_I2C_DATA_OUT; ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_get_i2c_ack - Polls for I2C ACK ++ * @hw: pointer to hardware structure ++ * ++ * Clocks in/out one bit via I2C data/clock ++ **/ ++static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ u32 i = 0; ++ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ u32 timeout = 10; ++ bool ack = 1; ++ ++ status = ixgbe_raise_i2c_clk(hw, &i2cctl); ++ ++ if (status != 0) ++ goto out; ++ ++ /* Minimum high period of clock is 4us */ ++ udelay(IXGBE_I2C_T_HIGH); ++ ++ /* Poll for ACK. Note that ACK in I2C spec is ++ * transition from 1 to 0 */ ++ for (i = 0; i < timeout; i++) { ++ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ ack = ixgbe_get_i2c_data(&i2cctl); ++ ++ udelay(1); ++ if (ack == 0) ++ break; ++ } ++ ++ if (ack == 1) { ++ hw_dbg(hw, "I2C ack was not received.\n"); ++ status = IXGBE_ERR_I2C; ++ } ++ ++ ixgbe_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us */ ++ udelay(IXGBE_I2C_T_LOW); ++ ++out: ++ return status; ++} ++ ++/** ++ * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock ++ * @hw: pointer to hardware structure ++ * @data: read data value ++ * ++ * Clocks in one bit via I2C data/clock ++ **/ ++static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) ++{ ++ s32 status; ++ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ ++ status = ixgbe_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum high period of clock is 4us */ ++ udelay(IXGBE_I2C_T_HIGH); ++ ++ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ *data = ixgbe_get_i2c_data(&i2cctl); ++ ++ ixgbe_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us */ ++ udelay(IXGBE_I2C_T_LOW); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock ++ * @hw: pointer to hardware structure ++ * @data: data value to write ++ * ++ * Clocks out one bit via I2C data/clock ++ **/ ++static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) ++{ ++ s32 status; ++ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ ++ status = ixgbe_set_i2c_data(hw, &i2cctl, data); ++ if (status == 0) { ++ status = ixgbe_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum high period of clock is 4us */ ++ udelay(IXGBE_I2C_T_HIGH); ++ ++ ixgbe_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us. ++ * This also takes care of the data hold time. ++ */ ++ udelay(IXGBE_I2C_T_LOW); ++ } else { ++ status = IXGBE_ERR_I2C; ++ hw_dbg(hw, "I2C data was not set to %X\n", data); ++ } ++ ++ return status; ++} ++/** ++ * ixgbe_raise_i2c_clk - Raises the I2C SCL clock ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * ++ * Raises the I2C clock line '0'->'1' ++ **/ ++static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) ++{ ++ s32 status = 0; ++ ++ *i2cctl |= IXGBE_I2C_CLK_OUT; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); ++ ++ /* SCL rise time (1000ns) */ ++ udelay(IXGBE_I2C_T_RISE); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * ++ * Lowers the I2C clock line '1'->'0' ++ **/ ++static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) ++{ ++ ++ *i2cctl &= ~IXGBE_I2C_CLK_OUT; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); ++ ++ /* SCL fall time (300ns) */ ++ udelay(IXGBE_I2C_T_FALL); ++} ++ ++/** ++ * ixgbe_set_i2c_data - Sets the I2C data bit ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * @data: I2C data value (0 or 1) to set ++ * ++ * Sets the I2C data bit ++ **/ ++static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) ++{ ++ s32 status = 0; ++ ++ if (data) ++ *i2cctl |= IXGBE_I2C_DATA_OUT; ++ else ++ *i2cctl &= ~IXGBE_I2C_DATA_OUT; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); ++ ++ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ ++ udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); ++ ++ /* Verify data was set correctly */ ++ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ if (data != ixgbe_get_i2c_data(i2cctl)) { ++ status = IXGBE_ERR_I2C; ++ hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_get_i2c_data - Reads the I2C SDA data bit ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * ++ * Returns the I2C data bit value ++ **/ ++static bool ixgbe_get_i2c_data(u32 *i2cctl) ++{ ++ bool data; ++ ++ if (*i2cctl & IXGBE_I2C_DATA_IN) ++ data = 1; ++ else ++ data = 0; ++ ++ return data; ++} ++ ++/** ++ * ixgbe_i2c_bus_clear - Clears the I2C bus ++ * @hw: pointer to hardware structure ++ * ++ * Clears the I2C bus by sending nine clock pulses. ++ * Used when data line is stuck low. ++ **/ ++void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) ++{ ++ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ u32 i; ++ ++ ixgbe_i2c_start(hw); ++ ++ ixgbe_set_i2c_data(hw, &i2cctl, 1); ++ ++ for (i = 0; i < 9; i++) { ++ ixgbe_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Min high period of clock is 4us */ ++ udelay(IXGBE_I2C_T_HIGH); ++ ++ ixgbe_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Min low period of clock is 4.7us*/ ++ udelay(IXGBE_I2C_T_LOW); ++ } ++ ++ ixgbe_i2c_start(hw); ++ ++ /* Put the i2c bus back to default state */ ++ ixgbe_i2c_stop(hw); ++} ++ ++/** ++ * ixgbe_check_overtemp - Checks if an overtemp occured. ++ * @hw: pointer to hardware structure ++ * ++ * Checks if the LASI temp alarm status was triggered due to overtemp ++ **/ ++s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ u16 phy_data = 0; ++ ++ if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) ++ goto out; ++ ++ /* Check that the LASI temp alarm status was triggered */ ++ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data); ++ ++ if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) ++ goto out; ++ ++ status = IXGBE_ERR_OVERTEMP; ++out: ++ return status; ++} ++ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_phy.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_phy.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_phy.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_phy.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,135 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_PHY_H_ ++#define _IXGBE_PHY_H_ ++ ++#include "ixgbe_type.h" ++#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 ++ ++/* EEPROM byte offsets */ ++#define IXGBE_SFF_IDENTIFIER 0x0 ++#define IXGBE_SFF_IDENTIFIER_SFP 0x3 ++#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 ++#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 ++#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 ++#define IXGBE_SFF_1GBE_COMP_CODES 0x6 ++#define IXGBE_SFF_10GBE_COMP_CODES 0x3 ++#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 ++#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C ++ ++/* Bitmasks */ ++#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 ++#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 ++#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 ++#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 ++#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 ++#define IXGBE_SFF_1GBASET_CAPABLE 0x8 ++#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 ++#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 ++#define IXGBE_I2C_EEPROM_READ_MASK 0x100 ++#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 ++#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 ++#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 ++#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 ++#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 ++ ++/* Flow control defines */ ++#define IXGBE_TAF_SYM_PAUSE 0x400 ++#define IXGBE_TAF_ASM_PAUSE 0x800 ++ ++/* Bit-shift macros */ ++#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 ++#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 ++#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 ++ ++/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ ++#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 ++#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 ++#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 ++#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 ++ ++/* I2C SDA and SCL timing parameters for standard mode */ ++#define IXGBE_I2C_T_HD_STA 4 ++#define IXGBE_I2C_T_LOW 5 ++#define IXGBE_I2C_T_HIGH 4 ++#define IXGBE_I2C_T_SU_STA 5 ++#define IXGBE_I2C_T_HD_DATA 5 ++#define IXGBE_I2C_T_SU_DATA 1 ++#define IXGBE_I2C_T_RISE 1 ++#define IXGBE_I2C_T_FALL 1 ++#define IXGBE_I2C_T_SU_STO 4 ++#define IXGBE_I2C_T_BUF 5 ++ ++#define IXGBE_TN_LASI_STATUS_REG 0x9005 ++#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 ++ ++ ++s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); ++bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); ++enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); ++s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); ++s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); ++s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); ++s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 *phy_data); ++s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 phy_data); ++s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); ++s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *autoneg); ++ ++/* PHY specific */ ++s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *link_up); ++s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); ++s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, ++ u16 *firmware_version); ++s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, ++ u16 *firmware_version); ++ ++s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); ++s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); ++s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, ++ u16 *list_offset, ++ u16 *data_offset); ++s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); ++s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data); ++s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data); ++s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 *eeprom_data); ++s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 eeprom_data); ++#endif /* _IXGBE_PHY_H_ */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sriov.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sriov.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sriov.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sriov.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,461 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef NETIF_F_HW_VLAN_TX ++#include ++#endif ++ ++#include "ixgbe.h" ++ ++#include "ixgbe_sriov.h" ++ ++int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, ++ int entries, u16 *hash_list, u32 vf) ++{ ++ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; ++ struct ixgbe_hw *hw = &adapter->hw; ++ int i; ++ u32 vector_bit; ++ u32 vector_reg; ++ u32 mta_reg; ++ ++ /* only so many hash values supported */ ++ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); ++ ++ /* salt away the number of multi cast addresses assigned ++ * to this VF for later use to restore when the PF multi cast ++ * list changes ++ */ ++ vfinfo->num_vf_mc_hashes = entries; ++ ++ /* VFs are limited to using the MTA hash table for their multicast ++ * addresses */ ++ for (i = 0; i < entries; i++) { ++ vfinfo->vf_mc_hashes[i] = hash_list[i];; ++ } ++ ++ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { ++ vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; ++ vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; ++ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); ++ mta_reg |= (1 << vector_bit); ++ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); ++ } ++ ++ return 0; ++} ++ ++void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct vf_data_storage *vfinfo; ++ int i, j; ++ u32 vector_bit; ++ u32 vector_reg; ++ u32 mta_reg; ++ ++ for (i = 0; i < adapter->num_vfs; i++) { ++ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); ++ vfinfo = &adapter->vfinfo[i]; ++ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { ++ hw->addr_ctrl.mta_in_use++; ++ vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; ++ vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; ++ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); ++ mta_reg |= (1 << vector_bit); ++ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); ++ } ++ if (vfinfo->num_vf_mc_hashes) ++ vmolr |= IXGBE_VMOLR_ROMPE; ++ else ++ vmolr &= ~IXGBE_VMOLR_ROMPE; ++ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); ++ } ++} ++ ++int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) ++{ ++ return ixgbe_set_vfta(&adapter->hw, vid, vf, (bool)add); ++} ++ ++void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) ++{ ++} ++ ++void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) ++{ ++ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); ++ vmolr |= IXGBE_VMOLR_BAM; ++ if (aupe) ++ vmolr |= IXGBE_VMOLR_AUPE; ++ else ++ vmolr &= ~IXGBE_VMOLR_AUPE; ++ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); ++} ++ ++static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ if (vid) ++ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ++ (vid | IXGBE_VMVIR_VLANA_DEFAULT)); ++ else ++ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); ++} ++ ++inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int rar_entry = hw->mac.num_rar_entries - (vf + 1); ++ ++ /* reset offloads to defaults */ ++ if (adapter->vfinfo[vf].pf_vlan) { ++ ixgbe_set_vf_vlan(adapter, true, ++ adapter->vfinfo[vf].pf_vlan, vf); ++ ixgbe_set_vmvir(adapter, ++ (adapter->vfinfo[vf].pf_vlan | ++ (adapter->vfinfo[vf].pf_qos << 13)), vf); ++ ixgbe_set_vmolr(hw, vf, false); ++ } else { ++ ixgbe_set_vmvir(adapter, 0, vf); ++ ixgbe_set_vmolr(hw, vf, true); ++ } ++ ++ ++ /* reset multicast table array for vf */ ++ adapter->vfinfo[vf].num_vf_mc_hashes = 0; ++ ++ /* Flush and reset the mta with the new values */ ++ ixgbe_set_rx_mode(adapter->netdev); ++ ++ hw->mac.ops.clear_rar(hw, rar_entry); ++} ++ ++int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, ++ int vf, unsigned char *mac_addr) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int rar_entry = hw->mac.num_rar_entries - (vf + 1); ++ ++ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); ++ hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV); ++ ++ return 0; ++} ++ ++int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) ++{ ++ unsigned char vf_mac_addr[6]; ++ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); ++ unsigned int vfn = (event_mask & 0x3f); ++ ++ bool enable = ((event_mask & 0x10000000U) != 0); ++ ++ if (enable) { ++ random_ether_addr(vf_mac_addr); ++ DPRINTK(PROBE, INFO, "IOV: VF %d is enabled " ++ "mac %02X:%02X:%02X:%02X:%02X:%02X\n", ++ vfn, ++ vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2], ++ vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]); ++ /* Store away the VF "permananet" MAC address, it will ask ++ * for it later. ++ */ ++ memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); ++ } ++ ++ return 0; ++} ++ ++inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 reg; ++ u32 reg_offset, vf_shift; ++ ++ vf_shift = vf % 32; ++ reg_offset = vf / 32; ++ ++ /* enable transmit and receive for vf */ ++ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); ++ reg |= (reg | (1 << vf_shift)); ++ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); ++ ++ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); ++ reg |= (reg | (1 << vf_shift)); ++ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); ++ ++ ixgbe_vf_reset_event(adapter, vf); ++} ++ ++static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) ++{ ++ u32 mbx_size = IXGBE_VFMAILBOX_SIZE; ++ u32 msgbuf[mbx_size]; ++ struct ixgbe_hw *hw = &adapter->hw; ++ s32 retval; ++ int entries; ++ u16 *hash_list; ++ int add, vid; ++ ++ retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); ++ ++ if (retval) ++ printk(KERN_ERR "Error receiving message from VF\n"); ++ ++ /* this is a message we already processed, do nothing */ ++ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) ++ return retval; ++ ++ /* ++ * until the vf completes a virtual function reset it should not be ++ * allowed to start any configuration. ++ */ ++ ++ if (msgbuf[0] == IXGBE_VF_RESET) { ++ unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; ++ u8 *new_mac = (u8 *)(&msgbuf[1]); ++ adapter->vfinfo[vf].clear_to_send = false; ++ ixgbe_vf_reset_msg(adapter, vf); ++ adapter->vfinfo[vf].clear_to_send = true; ++ ++ if (is_valid_ether_addr(new_mac) && ++ !adapter->vfinfo[vf].pf_set_mac) ++ ixgbe_set_vf_mac(adapter, vf, vf_mac); ++ else ++ ixgbe_set_vf_mac(adapter, ++ vf, adapter->vfinfo[vf].vf_mac_addresses); ++ ++ /* reply to reset with ack and vf mac address */ ++ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; ++ memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS); ++ /* Piggyback the multicast filter type so VF can compute the ++ * correct vectors */ ++ msgbuf[3] = hw->mac.mc_filter_type; ++ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); ++ ++ return retval; ++ } ++ ++ if (!adapter->vfinfo[vf].clear_to_send) { ++ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; ++ ixgbe_write_mbx(hw, msgbuf, 1, vf); ++ return retval; ++ } ++ ++ switch ((msgbuf[0] & 0xFFFF)) { ++ case IXGBE_VF_SET_MAC_ADDR: ++ DPRINTK(PROBE, INFO, "Set MAC msg received from vf %d\n", vf); ++ { ++ u8 *new_mac = ((u8 *)(&msgbuf[1])); ++ if (is_valid_ether_addr(new_mac)) ++ ixgbe_set_vf_mac(adapter, vf, new_mac); ++ else ++ retval = -1; ++ } ++ break; ++ case IXGBE_VF_SET_MULTICAST: ++ entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) ++ >> IXGBE_VT_MSGINFO_SHIFT; ++ hash_list = (u16 *)&msgbuf[1]; ++ retval = ixgbe_set_vf_multicasts(adapter, entries, ++ hash_list, vf); ++ break; ++ case IXGBE_VF_SET_LPE: ++ DPRINTK(PROBE, INFO, "Set LPE msg received from vf %d\n", vf); ++ ixgbe_set_vf_lpe(adapter, msgbuf); ++ break; ++ case IXGBE_VF_SET_VLAN: ++ add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) ++ >> IXGBE_VT_MSGINFO_SHIFT; ++ vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); ++ retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); ++ break; ++ default: ++ DPRINTK(PROBE, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]); ++ retval = IXGBE_ERR_MBX; ++ break; ++ } ++ ++ /* notify the VF of the results of what it sent us */ ++ if (retval) ++ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; ++ else ++ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; ++ ++ msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; ++ ++ ixgbe_write_mbx(hw, msgbuf, 1, vf); ++ ++ return retval; ++} ++ ++static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 msg = IXGBE_VT_MSGTYPE_NACK; ++ ++ /* if device isn't clear to send it shouldn't be reading either */ ++ if (!adapter->vfinfo[vf].clear_to_send) ++ ixgbe_write_mbx(hw, &msg, 1, vf); ++} ++ ++void ixgbe_msg_task(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 vf; ++ ++ for (vf = 0; vf < adapter->num_vfs; vf++) { ++ /* process any reset requests */ ++ if (!ixgbe_check_for_rst(hw, vf)) ++ ixgbe_vf_reset_event(adapter, vf); ++ ++ /* process any messages pending */ ++ if (!ixgbe_check_for_msg(hw, vf)) ++ ixgbe_rcv_msg_from_vf(adapter, vf); ++ ++ /* process any acks */ ++ if (!ixgbe_check_for_ack(hw, vf)) ++ ixgbe_rcv_ack_from_vf(adapter, vf); ++ } ++} ++ ++void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ /* disable transmit and receive for all vfs */ ++ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); ++} ++ ++void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 ping; ++ int i; ++ ++ for (i = 0 ; i < adapter->num_vfs; i++) { ++ ping = IXGBE_PF_CONTROL_MSG; ++ if (adapter->vfinfo[i].clear_to_send) ++ ping |= IXGBE_VT_MSGTYPE_CTS; ++ ixgbe_write_mbx(hw, &ping, 1, i); ++ } ++} ++ ++ ++#ifdef HAVE_IPLINK_VF_CONFIG ++int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) ++ return -EINVAL; ++ adapter->vfinfo[vf].pf_set_mac = true; ++ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); ++ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" ++ " change effective."); ++ if (test_bit(__IXGBE_DOWN, &adapter->state)) { ++ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," ++ " but the PF device is not up.\n"); ++ dev_warn(&adapter->pdev->dev, "Bring the PF device up before" ++ " attempting to use the VF device.\n"); ++ } ++ return ixgbe_set_vf_mac(adapter, vf, mac); ++} ++ ++int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) ++{ ++ int err = 0; ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) ++ return -EINVAL; ++ if (vlan || qos) { ++ err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); ++ if (err) ++ goto out; ++ ixgbe_set_vmvir(adapter, vlan | (qos << 13), vf); ++ ixgbe_set_vmolr(&adapter->hw, vf, false); ++ adapter->vfinfo[vf].pf_vlan = vlan; ++ adapter->vfinfo[vf].pf_qos = qos; ++ dev_info(&adapter->pdev->dev, ++ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); ++ if (test_bit(__IXGBE_DOWN, &adapter->state)) { ++ dev_warn(&adapter->pdev->dev, ++ "The VF VLAN has been set," ++ " but the PF device is not up.\n"); ++ dev_warn(&adapter->pdev->dev, ++ "Bring the PF device up before" ++ " attempting to use the VF device.\n"); ++ } ++ } else { ++ err = ixgbe_set_vf_vlan(adapter, false, ++ adapter->vfinfo[vf].pf_vlan, vf); ++ ixgbe_set_vmvir(adapter, vlan, vf); ++ ixgbe_set_vmolr(&adapter->hw, vf, true); ++ adapter->vfinfo[vf].pf_vlan = 0; ++ adapter->vfinfo[vf].pf_qos = 0; ++ } ++out: ++ return err; ++} ++ ++int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) ++{ ++ return -EOPNOTSUPP; ++} ++ ++int ixgbe_ndo_get_vf_config(struct net_device *netdev, ++ int vf, struct ifla_vf_info *ivi) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ if (vf >= adapter->num_vfs) ++ return -EINVAL; ++ ivi->vf = vf; ++ memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); ++ ivi->tx_rate = 0; ++ ivi->vlan = adapter->vfinfo[vf].pf_vlan; ++ ivi->qos = adapter->vfinfo[vf].pf_qos; ++ return 0; ++} ++#endif ++ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sriov.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sriov.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sriov.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sriov.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,56 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++#ifndef _IXGBE_SRIOV_H_ ++#define _IXGBE_SRIOV_H_ ++ ++int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, ++ int entries, u16 *hash_list, u32 vf); ++void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); ++int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); ++void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe); ++void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf); ++void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf); ++void ixgbe_msg_task(struct ixgbe_adapter *adapter); ++int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, ++ int vf, unsigned char *mac_addr); ++int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); ++void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); ++void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); ++#ifdef HAVE_IPLINK_VF_CONFIG ++int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); ++int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, ++ u8 qos); ++int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); ++int ixgbe_ndo_get_vf_config(struct net_device *netdev, ++ int vf, struct ifla_vf_info *ivi); ++#endif ++void ixgbe_dump_registers(struct ixgbe_adapter *adapter); ++ ++#endif /* _IXGBE_SRIOV_H_ */ ++ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sysfs.c linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sysfs.c +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_sysfs.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_sysfs.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,80 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe.h" ++ ++#ifdef IXGBE_FCOE ++#include ++#include ++#include ++ ++/* Ethernet payload size for FCoE to be able to carry full sized FC Frames ++ * 14 byte FCoE header + 24 byte FC header + 2112 max payload + 4 byte CRC ++ * + 4 byte FCoE trailing encapsulation = 2158 ++ * This is the Ethernet payload, replacing the default of 1500, and does ++ * not include Ethernet headers, VLAN tags, or Ethernet CRC. ++ */ ++#define IXGBE_FCOE_MTU 2158 ++ ++static ssize_t ixgbe_show_fcoe_mtu(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%d\n", IXGBE_FCOE_MTU); ++} ++ ++static struct device_attribute ixgbe_attrs[] = { ++ __ATTR(fcoe-mtu, S_IRUGO, ixgbe_show_fcoe_mtu, NULL), ++}; ++ ++int ixgbe_sysfs_create(struct ixgbe_adapter *adapter) ++{ ++ struct net_device *netdev = adapter->netdev; ++ int err; ++ int i; ++ ++ for (i = 0 ; i < ARRAY_SIZE(ixgbe_attrs); i++) { ++ err = device_create_file(&netdev->dev, &ixgbe_attrs[i]); ++ if (err) ++ goto fail; ++ } ++ return 0; ++ ++fail: ++ while (i-- >= 0) ++ device_remove_file(&netdev->dev, &ixgbe_attrs[i]); ++ return err; ++} ++ ++void ixgbe_sysfs_remove(struct ixgbe_adapter *adapter) ++{ ++ struct net_device *netdev = adapter->netdev; ++ int i; ++ ++ for (i = 0 ; i < ARRAY_SIZE(ixgbe_attrs); i++) ++ device_remove_file(&netdev->dev, &ixgbe_attrs[i]); ++} ++#endif /* IXGBE_FCOE */ ++ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/ixgbe_type.h linux-2.6.22-50/drivers/net/ixgbe/ixgbe_type.h +--- linux-2.6.22-40/drivers/net/ixgbe/ixgbe_type.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/ixgbe_type.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,2829 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_TYPE_H_ ++#define _IXGBE_TYPE_H_ ++ ++#include "ixgbe_osdep.h" ++ ++ ++/* Vendor ID */ ++#define IXGBE_INTEL_VENDOR_ID 0x8086 ++ ++/* Device IDs */ ++#define IXGBE_DEV_ID_82598 0x10B6 ++#define IXGBE_DEV_ID_82598_BX 0x1508 ++#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 ++#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 ++#define IXGBE_DEV_ID_82598AT 0x10C8 ++#define IXGBE_DEV_ID_82598AT2 0x150B ++#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB ++#define IXGBE_DEV_ID_82598EB_CX4 0x10DD ++#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC ++#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 ++#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 ++#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 ++#define IXGBE_DEV_ID_82599_KX4 0x10F7 ++#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 ++#define IXGBE_DEV_ID_82599_KR 0x1517 ++#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 ++#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C ++#define IXGBE_DEV_ID_82599_CX4 0x10F9 ++#define IXGBE_DEV_ID_82599_SFP 0x10FB ++#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 ++#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC ++#define IXGBE_DEV_ID_82599_T3_LOM 0x151C ++ ++/* General Registers */ ++#define IXGBE_CTRL 0x00000 ++#define IXGBE_STATUS 0x00008 ++#define IXGBE_CTRL_EXT 0x00018 ++#define IXGBE_ESDP 0x00020 ++#define IXGBE_EODSDP 0x00028 ++#define IXGBE_I2CCTL 0x00028 ++#define IXGBE_LEDCTL 0x00200 ++#define IXGBE_FRTIMER 0x00048 ++#define IXGBE_TCPTIMER 0x0004C ++#define IXGBE_CORESPARE 0x00600 ++#define IXGBE_EXVET 0x05078 ++ ++/* NVM Registers */ ++#define IXGBE_EEC 0x10010 ++#define IXGBE_EERD 0x10014 ++#define IXGBE_EEWR 0x10018 ++#define IXGBE_FLA 0x1001C ++#define IXGBE_EEMNGCTL 0x10110 ++#define IXGBE_EEMNGDATA 0x10114 ++#define IXGBE_FLMNGCTL 0x10118 ++#define IXGBE_FLMNGDATA 0x1011C ++#define IXGBE_FLMNGCNT 0x10120 ++#define IXGBE_FLOP 0x1013C ++#define IXGBE_GRC 0x10200 ++ ++/* General Receive Control */ ++#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ ++#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ ++ ++#define IXGBE_VPDDIAG0 0x10204 ++#define IXGBE_VPDDIAG1 0x10208 ++ ++/* I2CCTL Bit Masks */ ++#define IXGBE_I2C_CLK_IN 0x00000001 ++#define IXGBE_I2C_CLK_OUT 0x00000002 ++#define IXGBE_I2C_DATA_IN 0x00000004 ++#define IXGBE_I2C_DATA_OUT 0x00000008 ++ ++/* Interrupt Registers */ ++#define IXGBE_EICR 0x00800 ++#define IXGBE_EICS 0x00808 ++#define IXGBE_EIMS 0x00880 ++#define IXGBE_EIMC 0x00888 ++#define IXGBE_EIAC 0x00810 ++#define IXGBE_EIAM 0x00890 ++#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) ++#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) ++#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) ++#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) ++/* 82599 EITR is only 12 bits, with the lower 3 always zero */ ++/* ++ * 82598 EITR is 16 bits but set the limits based on the max ++ * supported by all ixgbe hardware ++ */ ++#define IXGBE_MAX_INT_RATE 488281 ++#define IXGBE_MIN_INT_RATE 956 ++#define IXGBE_MAX_EITR 0x00000FF8 ++#define IXGBE_MIN_EITR 8 ++#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ ++ (0x012300 + (((_i) - 24) * 4))) ++#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 ++#define IXGBE_EITR_LLI_MOD 0x00008000 ++#define IXGBE_EITR_CNT_WDIS 0x80000000 ++#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ ++#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ ++#define IXGBE_EITRSEL 0x00894 ++#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ ++#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ ++#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) ++#define IXGBE_GPIE 0x00898 ++ ++/* Flow Control Registers */ ++#define IXGBE_FCADBUL 0x03210 ++#define IXGBE_FCADBUH 0x03214 ++#define IXGBE_FCAMACL 0x04328 ++#define IXGBE_FCAMACH 0x0432C ++#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_PFCTOP 0x03008 ++#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ ++#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ ++#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ ++#define IXGBE_FCRTV 0x032A0 ++#define IXGBE_FCCFG 0x03D00 ++#define IXGBE_TFCS 0x0CE00 ++ ++/* Receive DMA Registers */ ++#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ ++ (0x0D000 + ((_i - 64) * 0x40))) ++#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ ++ (0x0D004 + ((_i - 64) * 0x40))) ++#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ ++ (0x0D008 + ((_i - 64) * 0x40))) ++#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ ++ (0x0D010 + ((_i - 64) * 0x40))) ++#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ ++ (0x0D018 + ((_i - 64) * 0x40))) ++#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ ++ (0x0D028 + ((_i - 64) * 0x40))) ++#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ ++ (0x0D02C + ((_i - 64) * 0x40))) ++#define IXGBE_RSCDBU 0x03028 ++#define IXGBE_RDDCC 0x02F20 ++#define IXGBE_RXMEMWRAP 0x03190 ++#define IXGBE_STARCTRL 0x03024 ++/* ++ * Split and Replication Receive Control Registers ++ * 00-15 : 0x02100 + n*4 ++ * 16-64 : 0x01014 + n*0x40 ++ * 64-127: 0x0D014 + (n-64)*0x40 ++ */ ++#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ ++ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ ++ (0x0D014 + ((_i - 64) * 0x40)))) ++/* ++ * Rx DCA Control Register: ++ * 00-15 : 0x02200 + n*4 ++ * 16-64 : 0x0100C + n*0x40 ++ * 64-127: 0x0D00C + (n-64)*0x40 ++ */ ++#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ ++ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ ++ (0x0D00C + ((_i - 64) * 0x40)))) ++#define IXGBE_RDRXCTL 0x02F00 ++#define IXGBE_RDRXCTL_RSC_PUSH 0x80 ++#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) ++ /* 8 of these 0x03C00 - 0x03C1C */ ++#define IXGBE_RXCTRL 0x03000 ++#define IXGBE_DROPEN 0x03D04 ++#define IXGBE_RXPBSIZE_SHIFT 10 ++ ++/* Receive Registers */ ++#define IXGBE_RXCSUM 0x05000 ++#define IXGBE_RFCTL 0x05008 ++#define IXGBE_DRECCCTL 0x02F08 ++#define IXGBE_DRECCCTL_DISABLE 0 ++ ++/* Multicast Table Array - 128 entries */ ++#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) ++#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ ++ (0x0A200 + ((_i) * 8))) ++#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ ++ (0x0A204 + ((_i) * 8))) ++#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) ++#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) ++/* Packet split receive type */ ++#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ ++ (0x0EA00 + ((_i) * 4))) ++/* array of 4096 1-bit vlan filters */ ++#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) ++/*array of 4096 4-bit vlan vmdq indices */ ++#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) ++#define IXGBE_FCTRL 0x05080 ++#define IXGBE_VLNCTRL 0x05088 ++#define IXGBE_MCSTCTRL 0x05090 ++#define IXGBE_MRQC 0x05818 ++#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ ++#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ ++#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ ++#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ ++#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ ++#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ ++#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ ++#define IXGBE_RQTC 0x0EC70 ++#define IXGBE_MTQC 0x08120 ++#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ ++#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ ++#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ ++#define IXGBE_VT_CTL 0x051B0 ++#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) ++#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) ++#define IXGBE_QDE 0x2F04 ++#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ ++#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) ++#define IXGBE_VMRCTL(_i) (0x0F600 + ((_i) * 4)) ++#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) ++#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) ++#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ ++#define IXGBE_LLITHRESH 0x0EC90 ++#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_IMIRVP 0x05AC0 ++#define IXGBE_VMD_CTL 0x0581C ++#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ ++#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ ++ ++/* Flow Director registers */ ++#define IXGBE_FDIRCTRL 0x0EE00 ++#define IXGBE_FDIRHKEY 0x0EE68 ++#define IXGBE_FDIRSKEY 0x0EE6C ++#define IXGBE_FDIRDIP4M 0x0EE3C ++#define IXGBE_FDIRSIP4M 0x0EE40 ++#define IXGBE_FDIRTCPM 0x0EE44 ++#define IXGBE_FDIRUDPM 0x0EE48 ++#define IXGBE_FDIRIP6M 0x0EE74 ++#define IXGBE_FDIRM 0x0EE70 ++ ++/* Flow Director Stats registers */ ++#define IXGBE_FDIRFREE 0x0EE38 ++#define IXGBE_FDIRLEN 0x0EE4C ++#define IXGBE_FDIRUSTAT 0x0EE50 ++#define IXGBE_FDIRFSTAT 0x0EE54 ++#define IXGBE_FDIRMATCH 0x0EE58 ++#define IXGBE_FDIRMISS 0x0EE5C ++ ++/* Flow Director Programming registers */ ++#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ ++#define IXGBE_FDIRIPSA 0x0EE18 ++#define IXGBE_FDIRIPDA 0x0EE1C ++#define IXGBE_FDIRPORT 0x0EE20 ++#define IXGBE_FDIRVLAN 0x0EE24 ++#define IXGBE_FDIRHASH 0x0EE28 ++#define IXGBE_FDIRCMD 0x0EE2C ++ ++/* Transmit DMA registers */ ++#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ ++#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) ++#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) ++#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) ++#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) ++#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) ++#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) ++#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) ++#define IXGBE_DTXCTL 0x07E00 ++ ++#define IXGBE_DMATXCTL 0x04A80 ++#define IXGBE_PFDTXGSWC 0x08220 ++#define IXGBE_DTXMXSZRQ 0x08100 ++#define IXGBE_DTXTCPFLGL 0x04A88 ++#define IXGBE_DTXTCPFLGH 0x04A8C ++#define IXGBE_LBDRPEN 0x0CA00 ++#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ ++ ++#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ ++#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ ++#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ ++#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ ++ ++#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ ++#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ ++/* Tx DCA Control register : 128 of these (0-127) */ ++#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) ++#define IXGBE_TIPG 0x0CB00 ++#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ ++#define IXGBE_MNGTXMAP 0x0CD10 ++#define IXGBE_TIPG_FIBER_DEFAULT 3 ++#define IXGBE_TXPBSIZE_SHIFT 10 ++ ++/* Wake up registers */ ++#define IXGBE_WUC 0x05800 ++#define IXGBE_WUFC 0x05808 ++#define IXGBE_WUS 0x05810 ++#define IXGBE_IPAV 0x05838 ++#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ ++#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ ++ ++#define IXGBE_WUPL 0x05900 ++#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ ++#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */ ++#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host ++ * Filter Table */ ++ ++#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 ++#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 ++ ++/* Each Flexible Filter is at most 128 (0x80) bytes in length */ ++#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 ++#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ ++#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ ++ ++/* Definitions for power management and wakeup registers */ ++/* Wake Up Control */ ++#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ ++#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ ++#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ ++ ++/* Wake Up Filter Control */ ++#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ ++#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ ++#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ ++#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ ++#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ ++#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ ++#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ ++#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ ++#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ ++ ++#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ ++#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ ++#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ ++#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ ++#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ ++#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ ++#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ ++#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ ++#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ ++#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */ ++#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ ++ ++/* Wake Up Status */ ++#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC ++#define IXGBE_WUS_MAG IXGBE_WUFC_MAG ++#define IXGBE_WUS_EX IXGBE_WUFC_EX ++#define IXGBE_WUS_MC IXGBE_WUFC_MC ++#define IXGBE_WUS_BC IXGBE_WUFC_BC ++#define IXGBE_WUS_ARP IXGBE_WUFC_ARP ++#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 ++#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 ++#define IXGBE_WUS_MNG IXGBE_WUFC_MNG ++#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 ++#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 ++#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 ++#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 ++#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 ++#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 ++#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS ++ ++/* Wake Up Packet Length */ ++#define IXGBE_WUPL_LENGTH_MASK 0xFFFF ++ ++/* DCB registers */ ++#define IXGBE_RMCS 0x03D00 ++#define IXGBE_DPMCS 0x07F40 ++#define IXGBE_PDPMCS 0x0CD00 ++#define IXGBE_RUPPBMR 0x050A0 ++#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ ++#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ ++#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ ++ ++ ++/* Security Control Registers */ ++#define IXGBE_SECTXCTRL 0x08800 ++#define IXGBE_SECTXSTAT 0x08804 ++#define IXGBE_SECTXBUFFAF 0x08808 ++#define IXGBE_SECTXMINIFG 0x08810 ++#define IXGBE_SECTXSTAT 0x08804 ++#define IXGBE_SECRXCTRL 0x08D00 ++#define IXGBE_SECRXSTAT 0x08D04 ++ ++/* Security Bit Fields and Masks */ ++#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 ++#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 ++#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 ++ ++#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 ++#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 ++ ++#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 ++#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 ++ ++#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 ++#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 ++ ++/* LinkSec (MacSec) Registers */ ++#define IXGBE_LSECTXCAP 0x08A00 ++#define IXGBE_LSECRXCAP 0x08F00 ++#define IXGBE_LSECTXCTRL 0x08A04 ++#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ ++#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ ++#define IXGBE_LSECTXSA 0x08A10 ++#define IXGBE_LSECTXPN0 0x08A14 ++#define IXGBE_LSECTXPN1 0x08A18 ++#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ ++#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ ++#define IXGBE_LSECRXCTRL 0x08F04 ++#define IXGBE_LSECRXSCL 0x08F08 ++#define IXGBE_LSECRXSCH 0x08F0C ++#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ ++#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ ++#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) ++#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ ++#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ ++#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ ++#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ ++#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ ++#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ ++#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ ++#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ ++#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ ++#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ ++#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ ++#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ ++#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ ++#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ ++#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ ++#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ ++#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ ++#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ ++#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ ++ ++/* LinkSec (MacSec) Bit Fields and Masks */ ++#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 ++#define IXGBE_LSECTXCAP_SUM_SHIFT 16 ++#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 ++#define IXGBE_LSECRXCAP_SUM_SHIFT 16 ++ ++#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 ++#define IXGBE_LSECTXCTRL_DISABLE 0x0 ++#define IXGBE_LSECTXCTRL_AUTH 0x1 ++#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 ++#define IXGBE_LSECTXCTRL_AISCI 0x00000020 ++#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 ++#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 ++ ++#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C ++#define IXGBE_LSECRXCTRL_EN_SHIFT 2 ++#define IXGBE_LSECRXCTRL_DISABLE 0x0 ++#define IXGBE_LSECRXCTRL_CHECK 0x1 ++#define IXGBE_LSECRXCTRL_STRICT 0x2 ++#define IXGBE_LSECRXCTRL_DROP 0x3 ++#define IXGBE_LSECRXCTRL_PLSH 0x00000040 ++#define IXGBE_LSECRXCTRL_RP 0x00000080 ++#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 ++ ++/* IpSec Registers */ ++#define IXGBE_IPSTXIDX 0x08900 ++#define IXGBE_IPSTXSALT 0x08904 ++#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ ++#define IXGBE_IPSRXIDX 0x08E00 ++#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ ++#define IXGBE_IPSRXSPI 0x08E14 ++#define IXGBE_IPSRXIPIDX 0x08E18 ++#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ ++#define IXGBE_IPSRXSALT 0x08E2C ++#define IXGBE_IPSRXMOD 0x08E30 ++ ++#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 ++ ++/* DCB registers */ ++#define IXGBE_RTRPCS 0x02430 ++#define IXGBE_RTTDCS 0x04900 ++#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ ++#define IXGBE_RTTPCS 0x0CD00 ++#define IXGBE_RTRUP2TC 0x03020 ++#define IXGBE_RTTUP2TC 0x0C800 ++#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RTTDQSEL 0x04904 ++#define IXGBE_RTTDT1C 0x04908 ++#define IXGBE_RTTDT1S 0x0490C ++#define IXGBE_RTTDTECC 0x04990 ++#define IXGBE_RTTDTECC_NO_BCN 0x00000100 ++ ++#define IXGBE_RTTBCNRC 0x04984 ++ ++ ++/* FCoE DMA Context Registers */ ++#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ ++#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ ++#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ ++#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ ++#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ ++#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) ++#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ ++#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ ++#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ ++#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ ++#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ ++#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 ++#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 ++#define IXGBE_FCBUFF_OFFSET_SHIFT 16 ++#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ ++#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ ++#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ ++#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ ++#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 ++/* FCoE SOF/EOF */ ++#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ ++#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ ++#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ ++#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ ++/* FCoE Filter Context Registers */ ++#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ ++#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ ++#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ ++#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ ++#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ ++#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ ++#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ ++#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ ++#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ ++#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ ++/* FCoE Receive Control */ ++#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ ++#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ ++#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ ++#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ ++#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ ++#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ ++#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ ++#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ ++#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ ++#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ ++#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 ++/* FCoE Redirection */ ++#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ ++#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ ++#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ ++#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ ++#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ ++#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ ++ ++/* Stats registers */ ++#define IXGBE_CRCERRS 0x04000 ++#define IXGBE_ILLERRC 0x04004 ++#define IXGBE_ERRBC 0x04008 ++#define IXGBE_MSPDC 0x04010 ++#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ ++#define IXGBE_MLFC 0x04034 ++#define IXGBE_MRFC 0x04038 ++#define IXGBE_RLEC 0x04040 ++#define IXGBE_LXONTXC 0x03F60 ++#define IXGBE_LXONRXC 0x0CF60 ++#define IXGBE_LXOFFTXC 0x03F68 ++#define IXGBE_LXOFFRXC 0x0CF68 ++#define IXGBE_LXONRXCNT 0x041A4 ++#define IXGBE_LXOFFRXCNT 0x041A8 ++#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ ++#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ ++#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ ++#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ ++#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ ++#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ ++#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ ++#define IXGBE_PRC64 0x0405C ++#define IXGBE_PRC127 0x04060 ++#define IXGBE_PRC255 0x04064 ++#define IXGBE_PRC511 0x04068 ++#define IXGBE_PRC1023 0x0406C ++#define IXGBE_PRC1522 0x04070 ++#define IXGBE_GPRC 0x04074 ++#define IXGBE_BPRC 0x04078 ++#define IXGBE_MPRC 0x0407C ++#define IXGBE_GPTC 0x04080 ++#define IXGBE_GORCL 0x04088 ++#define IXGBE_GORCH 0x0408C ++#define IXGBE_GOTCL 0x04090 ++#define IXGBE_GOTCH 0x04094 ++#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ ++#define IXGBE_RUC 0x040A4 ++#define IXGBE_RFC 0x040A8 ++#define IXGBE_ROC 0x040AC ++#define IXGBE_RJC 0x040B0 ++#define IXGBE_MNGPRC 0x040B4 ++#define IXGBE_MNGPDC 0x040B8 ++#define IXGBE_MNGPTC 0x0CF90 ++#define IXGBE_TORL 0x040C0 ++#define IXGBE_TORH 0x040C4 ++#define IXGBE_TPR 0x040D0 ++#define IXGBE_TPT 0x040D4 ++#define IXGBE_PTC64 0x040D8 ++#define IXGBE_PTC127 0x040DC ++#define IXGBE_PTC255 0x040E0 ++#define IXGBE_PTC511 0x040E4 ++#define IXGBE_PTC1023 0x040E8 ++#define IXGBE_PTC1522 0x040EC ++#define IXGBE_MPTC 0x040F0 ++#define IXGBE_BPTC 0x040F4 ++#define IXGBE_XEC 0x04120 ++#define IXGBE_SSVPC 0x08780 ++ ++#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) ++#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ ++ (0x08600 + ((_i) * 4))) ++#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) ++ ++#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ ++#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ ++#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */ ++#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ ++#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ ++#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ ++#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ ++#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ ++#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ ++#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */ ++#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */ ++ ++/* Management */ ++#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_MANC 0x05820 ++#define IXGBE_MFVAL 0x05824 ++#define IXGBE_MANC2H 0x05860 ++#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_MIPAF 0x058B0 ++#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ ++#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ ++#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ ++#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ ++#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_LSWFW 0x15014 ++ ++/* ARC Subsystem registers */ ++#define IXGBE_HICR 0x15F00 ++#define IXGBE_FWSTS 0x15F0C ++#define IXGBE_HSMC0R 0x15F04 ++#define IXGBE_HSMC1R 0x15F08 ++#define IXGBE_SWSR 0x15F10 ++#define IXGBE_HFDR 0x15FE8 ++#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ ++ ++/* PCI-E registers */ ++#define IXGBE_GCR 0x11000 ++#define IXGBE_GTV 0x11004 ++#define IXGBE_FUNCTAG 0x11008 ++#define IXGBE_GLT 0x1100C ++#define IXGBE_GSCL_1 0x11010 ++#define IXGBE_GSCL_2 0x11014 ++#define IXGBE_GSCL_3 0x11018 ++#define IXGBE_GSCL_4 0x1101C ++#define IXGBE_GSCN_0 0x11020 ++#define IXGBE_GSCN_1 0x11024 ++#define IXGBE_GSCN_2 0x11028 ++#define IXGBE_GSCN_3 0x1102C ++#define IXGBE_FACTPS 0x10150 ++#define IXGBE_PCIEANACTL 0x11040 ++#define IXGBE_SWSM 0x10140 ++#define IXGBE_FWSM 0x10148 ++#define IXGBE_GSSR 0x10160 ++#define IXGBE_MREVID 0x11064 ++#define IXGBE_DCA_ID 0x11070 ++#define IXGBE_DCA_CTRL 0x11074 ++#define IXGBE_SWFW_SYNC IXGBE_GSSR ++ ++/* PCI-E registers 82599-Specific */ ++#define IXGBE_GCR_EXT 0x11050 ++#define IXGBE_GSCL_5_82599 0x11030 ++#define IXGBE_GSCL_6_82599 0x11034 ++#define IXGBE_GSCL_7_82599 0x11038 ++#define IXGBE_GSCL_8_82599 0x1103C ++#define IXGBE_PHYADR_82599 0x11040 ++#define IXGBE_PHYDAT_82599 0x11044 ++#define IXGBE_PHYCTL_82599 0x11048 ++#define IXGBE_PBACLR_82599 0x11068 ++#define IXGBE_CIAA_82599 0x11088 ++#define IXGBE_CIAD_82599 0x1108C ++#define IXGBE_INTRPT_CSR_82599 0x110B0 ++#define IXGBE_INTRPT_MASK_82599 0x110B8 ++#define IXGBE_CDQ_MBR_82599 0x110B4 ++#define IXGBE_MISC_REG_82599 0x110F0 ++#define IXGBE_ECC_CTRL_0_82599 0x11100 ++#define IXGBE_ECC_CTRL_1_82599 0x11104 ++#define IXGBE_ECC_STATUS_82599 0x110E0 ++#define IXGBE_BAR_CTRL_82599 0x110F4 ++ ++/* PCI Express Control */ ++#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 ++#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 ++#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 ++#define IXGBE_GCR_CAP_VER2 0x00040000 ++ ++#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 ++#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 ++#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 ++#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 ++#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ ++ IXGBE_GCR_EXT_VT_MODE_64) ++/* Time Sync Registers */ ++#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ ++#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ ++#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ ++#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ ++#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ ++#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ ++#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ ++#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ ++#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ ++#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ ++#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ ++#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ ++#define IXGBE_RXUDP 0x08C1C /* Time Sync Rx UDP Port - RW */ ++ ++/* Diagnostic Registers */ ++#define IXGBE_RDSTATCTL 0x02C20 ++#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ ++#define IXGBE_RDHMPN 0x02F08 ++#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) ++#define IXGBE_RDPROBE 0x02F20 ++#define IXGBE_RDMAM 0x02F30 ++#define IXGBE_RDMAD 0x02F34 ++#define IXGBE_TDSTATCTL 0x07C20 ++#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ ++#define IXGBE_TDHMPN 0x07F08 ++#define IXGBE_TDHMPN2 0x082FC ++#define IXGBE_TXDESCIC 0x082CC ++#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) ++#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) ++#define IXGBE_TDPROBE 0x07F20 ++#define IXGBE_TXBUFCTRL 0x0C600 ++#define IXGBE_TXBUFDATA0 0x0C610 ++#define IXGBE_TXBUFDATA1 0x0C614 ++#define IXGBE_TXBUFDATA2 0x0C618 ++#define IXGBE_TXBUFDATA3 0x0C61C ++#define IXGBE_RXBUFCTRL 0x03600 ++#define IXGBE_RXBUFDATA0 0x03610 ++#define IXGBE_RXBUFDATA1 0x03614 ++#define IXGBE_RXBUFDATA2 0x03618 ++#define IXGBE_RXBUFDATA3 0x0361C ++#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ ++#define IXGBE_RFVAL 0x050A4 ++#define IXGBE_MDFTC1 0x042B8 ++#define IXGBE_MDFTC2 0x042C0 ++#define IXGBE_MDFTFIFO1 0x042C4 ++#define IXGBE_MDFTFIFO2 0x042C8 ++#define IXGBE_MDFTS 0x042CC ++#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ ++#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ ++#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ ++#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ ++#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ ++#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ ++#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ ++#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ ++#define IXGBE_PCIEECCCTL 0x1106C ++#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ ++#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ ++#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ ++#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ ++#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ ++#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ ++#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ ++#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ ++#define IXGBE_PCIEECCCTL0 0x11100 ++#define IXGBE_PCIEECCCTL1 0x11104 ++#define IXGBE_RXDBUECC 0x03F70 ++#define IXGBE_TXDBUECC 0x0CF70 ++#define IXGBE_RXDBUEST 0x03F74 ++#define IXGBE_TXDBUEST 0x0CF74 ++#define IXGBE_PBTXECC 0x0C300 ++#define IXGBE_PBRXECC 0x03300 ++#define IXGBE_GHECCR 0x110B0 ++ ++/* MAC Registers */ ++#define IXGBE_PCS1GCFIG 0x04200 ++#define IXGBE_PCS1GLCTL 0x04208 ++#define IXGBE_PCS1GLSTA 0x0420C ++#define IXGBE_PCS1GDBG0 0x04210 ++#define IXGBE_PCS1GDBG1 0x04214 ++#define IXGBE_PCS1GANA 0x04218 ++#define IXGBE_PCS1GANLP 0x0421C ++#define IXGBE_PCS1GANNP 0x04220 ++#define IXGBE_PCS1GANLPNP 0x04224 ++#define IXGBE_HLREG0 0x04240 ++#define IXGBE_HLREG1 0x04244 ++#define IXGBE_PAP 0x04248 ++#define IXGBE_MACA 0x0424C ++#define IXGBE_APAE 0x04250 ++#define IXGBE_ARD 0x04254 ++#define IXGBE_AIS 0x04258 ++#define IXGBE_MSCA 0x0425C ++#define IXGBE_MSRWD 0x04260 ++#define IXGBE_MLADD 0x04264 ++#define IXGBE_MHADD 0x04268 ++#define IXGBE_MAXFRS 0x04268 ++#define IXGBE_TREG 0x0426C ++#define IXGBE_PCSS1 0x04288 ++#define IXGBE_PCSS2 0x0428C ++#define IXGBE_XPCSS 0x04290 ++#define IXGBE_MFLCN 0x04294 ++#define IXGBE_SERDESC 0x04298 ++#define IXGBE_MACS 0x0429C ++#define IXGBE_AUTOC 0x042A0 ++#define IXGBE_LINKS 0x042A4 ++#define IXGBE_LINKS2 0x04324 ++#define IXGBE_AUTOC2 0x042A8 ++#define IXGBE_AUTOC3 0x042AC ++#define IXGBE_ANLP1 0x042B0 ++#define IXGBE_ANLP2 0x042B4 ++#define IXGBE_ATLASCTL 0x04800 ++#define IXGBE_MMNGC 0x042D0 ++#define IXGBE_ANLPNP1 0x042D4 ++#define IXGBE_ANLPNP2 0x042D8 ++#define IXGBE_KRPCSFC 0x042E0 ++#define IXGBE_KRPCSS 0x042E4 ++#define IXGBE_FECS1 0x042E8 ++#define IXGBE_FECS2 0x042EC ++#define IXGBE_SMADARCTL 0x14F10 ++#define IXGBE_MPVC 0x04318 ++#define IXGBE_SGMIIC 0x04314 ++ ++/* Copper Pond 2 link timeout */ ++#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 ++ ++/* Omer CORECTL */ ++#define IXGBE_CORECTL 0x014F00 ++/* BARCTRL */ ++#define IXGBE_BARCTRL 0x110F4 ++#define IXGBE_BARCTRL_FLSIZE 0x0700 ++#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 ++#define IXGBE_BARCTRL_CSRSIZE 0x2000 ++ ++/* RSCCTL Bit Masks */ ++#define IXGBE_RSCCTL_RSCEN 0x01 ++#define IXGBE_RSCCTL_MAXDESC_1 0x00 ++#define IXGBE_RSCCTL_MAXDESC_4 0x04 ++#define IXGBE_RSCCTL_MAXDESC_8 0x08 ++#define IXGBE_RSCCTL_MAXDESC_16 0x0C ++ ++/* RSCDBU Bit Masks */ ++#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F ++#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 ++ ++/* RDRXCTL Bit Masks */ ++#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ ++#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ ++#define IXGBE_RDRXCTL_MVMEN 0x00000020 ++#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ ++#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ ++#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ ++#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ ++#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ ++#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ ++ ++/* RQTC Bit Masks and Shifts */ ++#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) ++#define IXGBE_RQTC_TC0_MASK (0x7 << 0) ++#define IXGBE_RQTC_TC1_MASK (0x7 << 4) ++#define IXGBE_RQTC_TC2_MASK (0x7 << 8) ++#define IXGBE_RQTC_TC3_MASK (0x7 << 12) ++#define IXGBE_RQTC_TC4_MASK (0x7 << 16) ++#define IXGBE_RQTC_TC5_MASK (0x7 << 20) ++#define IXGBE_RQTC_TC6_MASK (0x7 << 24) ++#define IXGBE_RQTC_TC7_MASK (0x7 << 28) ++ ++/* PSRTYPE.RQPL Bit masks and shift */ ++#define IXGBE_PSRTYPE_RQPL_MASK 0x7 ++#define IXGBE_PSRTYPE_RQPL_SHIFT 29 ++ ++/* CTRL Bit Masks */ ++#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ ++#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ ++#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ ++ ++/* FACTPS */ ++#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ ++ ++/* MHADD Bit Masks */ ++#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 ++#define IXGBE_MHADD_MFS_SHIFT 16 ++ ++/* Extended Device Control */ ++#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ ++#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ ++#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ ++#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ ++ ++/* Direct Cache Access (DCA) definitions */ ++#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ ++#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ ++ ++#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ ++#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ ++ ++#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ ++#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ ++#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ ++#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ ++#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ ++#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ ++#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ ++#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */ ++#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */ ++ ++#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ ++#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ ++#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ ++#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ ++#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ ++#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ ++ ++/* MSCA Bit Masks */ ++#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */ ++#define IXGBE_MSCA_NP_ADDR_SHIFT 0 ++#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */ ++#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */ ++#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ ++#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ ++#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ ++#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ ++#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ ++#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ ++#define IXGBE_MSCA_READ 0x08000000 /* OP CODE 10 (read) */ ++#define IXGBE_MSCA_READ_AUTOINC 0x0C000000 /* OP CODE 11 (read, auto inc)*/ ++#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ ++#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ ++#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ ++#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */ ++#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ ++#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ ++ ++/* MSRWD bit masks */ ++#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF ++#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 ++#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 ++#define IXGBE_MSRWD_READ_DATA_SHIFT 16 ++ ++/* Atlas registers */ ++#define IXGBE_ATLAS_PDN_LPBK 0x24 ++#define IXGBE_ATLAS_PDN_10G 0xB ++#define IXGBE_ATLAS_PDN_1G 0xC ++#define IXGBE_ATLAS_PDN_AN 0xD ++ ++/* Atlas bit masks */ ++#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 ++#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 ++#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 ++#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 ++#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 ++ ++/* Omer bit masks */ ++#define IXGBE_CORECTL_WRITE_CMD 0x00010000 ++ ++/* Device Type definitions for new protocol MDIO commands */ ++#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 ++#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 ++#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 ++#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ ++#define IXGBE_TWINAX_DEV 1 ++ ++#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ ++ ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 ++ ++#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ ++#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ ++#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ ++#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ ++#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ ++#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ ++#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ ++#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ ++#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ ++#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ ++#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ ++#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ ++#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ ++#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ ++#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ ++#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ ++#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ ++ ++#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */ ++#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ ++#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ ++#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ ++ ++/* MII clause 22/28 definitions */ ++#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 ++ ++#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ ++#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ ++#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ ++#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ ++#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ ++#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ ++#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ ++#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ ++#define IXGBE_MII_RESTART 0x200 ++#define IXGBE_MII_AUTONEG_COMPLETE 0x20 ++#define IXGBE_MII_AUTONEG_LINK_UP 0x04 ++#define IXGBE_MII_AUTONEG_REG 0x0 ++ ++#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 ++#define IXGBE_MAX_PHY_ADDR 32 ++ ++/* PHY IDs*/ ++#define TN1010_PHY_ID 0x00A19410 ++#define TNX_FW_REV 0xB ++#define AQ1002_PHY_ID 0x03A1B420 ++#define AQ_FW_REV 0x20 ++#define QT2022_PHY_ID 0x0043A400 ++#define ATH_PHY_ID 0x03429050 ++ ++/* PHY Types */ ++#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 ++ ++/* Special PHY Init Routine */ ++#define IXGBE_PHY_INIT_OFFSET_NL 0x002B ++#define IXGBE_PHY_INIT_END_NL 0xFFFF ++#define IXGBE_CONTROL_MASK_NL 0xF000 ++#define IXGBE_DATA_MASK_NL 0x0FFF ++#define IXGBE_CONTROL_SHIFT_NL 12 ++#define IXGBE_DELAY_NL 0 ++#define IXGBE_DATA_NL 1 ++#define IXGBE_CONTROL_NL 0x000F ++#define IXGBE_CONTROL_EOL_NL 0x0FFF ++#define IXGBE_CONTROL_SOL_NL 0x0000 ++ ++/* General purpose Interrupt Enable */ ++#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ ++#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ ++#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ ++#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ ++#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ ++#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ ++#define IXGBE_GPIE_EIAME 0x40000000 ++#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 ++#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 ++#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ ++#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ ++#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ ++#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ ++ ++/* Transmit Flow Control status */ ++#define IXGBE_TFCS_TXOFF 0x00000001 ++#define IXGBE_TFCS_TXOFF0 0x00000100 ++#define IXGBE_TFCS_TXOFF1 0x00000200 ++#define IXGBE_TFCS_TXOFF2 0x00000400 ++#define IXGBE_TFCS_TXOFF3 0x00000800 ++#define IXGBE_TFCS_TXOFF4 0x00001000 ++#define IXGBE_TFCS_TXOFF5 0x00002000 ++#define IXGBE_TFCS_TXOFF6 0x00004000 ++#define IXGBE_TFCS_TXOFF7 0x00008000 ++ ++/* TCP Timer */ ++#define IXGBE_TCPTIMER_KS 0x00000100 ++#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 ++#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 ++#define IXGBE_TCPTIMER_LOOP 0x00000800 ++#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF ++ ++/* HLREG0 Bit Masks */ ++#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ ++#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ ++#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ ++#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ ++#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ ++#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ ++#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ ++#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ ++#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ ++#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ ++#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ ++#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ ++#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ ++#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ ++#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ ++ ++/* VMD_CTL bitmasks */ ++#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 ++#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 ++ ++/* VT_CTL bitmasks */ ++#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ ++#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ ++#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ ++#define IXGBE_VT_CTL_POOL_SHIFT 7 ++#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) ++ ++/* VMOLR bitmasks */ ++#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ ++#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ ++#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ ++#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ ++#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ ++ ++/* VFRE bitmask */ ++#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF ++ ++#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ ++ ++/* RDHMPN and TDHMPN bitmasks */ ++#define IXGBE_RDHMPN_RDICADDR 0x007FF800 ++#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 ++#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 ++#define IXGBE_TDHMPN_TDICADDR 0x003FF800 ++#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 ++#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 ++ ++#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 ++#define IXGBE_RDMAM_DWORD_SHIFT 9 ++#define IXGBE_RDMAM_DESC_COMP_FIFO 1 ++#define IXGBE_RDMAM_DFC_CMD_FIFO 2 ++#define IXGBE_RDMAM_RSC_HEADER_ADDR 3 ++#define IXGBE_RDMAM_TCN_STATUS_RAM 4 ++#define IXGBE_RDMAM_WB_COLL_FIFO 5 ++#define IXGBE_RDMAM_QSC_CNT_RAM 6 ++#define IXGBE_RDMAM_QSC_FCOE_RAM 7 ++#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 ++#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA ++#define IXGBE_RDMAM_QSC_RSC_RAM 0xB ++#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 ++#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 ++#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 ++#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 ++#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32 ++#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4 ++#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 ++#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 ++#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 ++#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 ++#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 ++#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 ++#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512 ++#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5 ++#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 ++#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 ++#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 ++#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 ++#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32 ++#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8 ++ ++#define IXGBE_TXDESCIC_READY 0x80000000 ++ ++/* Receive Checksum Control */ ++#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ ++#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ ++ ++/* FCRTL Bit Masks */ ++#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ ++#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ ++ ++/* PAP bit masks*/ ++#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ ++ ++/* RMCS Bit Masks */ ++#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ ++/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ ++#define IXGBE_RMCS_RAC 0x00000004 ++#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ ++#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ ++#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ ++#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ ++ ++/* FCCFG Bit Masks */ ++#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ ++#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ ++ ++/* Interrupt register bitmasks */ ++ ++/* Extended Interrupt Cause Read */ ++#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ ++#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ ++#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ ++#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ ++#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ ++#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ ++#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ ++#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ ++#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ ++#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ ++#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ ++#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ ++#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ ++#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ ++#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ ++#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ ++ ++/* Extended Interrupt Cause Set */ ++#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ ++#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ ++#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ ++#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ ++#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ ++#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ ++#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ ++#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ ++#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ ++#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ ++#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ ++#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ ++#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ ++#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ ++#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ ++ ++/* Extended Interrupt Mask Set */ ++#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ ++#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ ++#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ ++#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ ++#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ ++#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ ++#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ ++#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ ++#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ ++#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ ++#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ ++#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ ++#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ ++#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ ++#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ ++ ++/* Extended Interrupt Mask Clear */ ++#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ ++#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ ++#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ ++#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ ++#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ ++#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ ++#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ ++#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ ++#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ ++#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ ++#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ ++#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ ++#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ ++#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ ++#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ ++ ++#define IXGBE_EIMS_ENABLE_MASK ( \ ++ IXGBE_EIMS_RTX_QUEUE | \ ++ IXGBE_EIMS_LSC | \ ++ IXGBE_EIMS_TCP_TIMER | \ ++ IXGBE_EIMS_OTHER) ++ ++/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ ++#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ ++#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ ++#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ ++#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ ++#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ ++#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ ++#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ ++#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ ++#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ ++#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ ++#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ ++#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ ++#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ ++#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ ++#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ ++#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ ++#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ ++#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */ ++#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ ++#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ ++#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ ++#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ ++#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ ++ ++#define IXGBE_MAX_FTQF_FILTERS 128 ++#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 ++#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 ++#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 ++#define IXGBE_FTQF_PROTOCOL_SCTP 2 ++#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 ++#define IXGBE_FTQF_PRIORITY_SHIFT 2 ++#define IXGBE_FTQF_POOL_MASK 0x0000003F ++#define IXGBE_FTQF_POOL_SHIFT 8 ++#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F ++#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 ++#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E ++#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D ++#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B ++#define IXGBE_FTQF_DEST_PORT_MASK 0x17 ++#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F ++#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 ++#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 ++ ++/* Interrupt clear mask */ ++#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF ++ ++/* Interrupt Vector Allocation Registers */ ++#define IXGBE_IVAR_REG_NUM 25 ++#define IXGBE_IVAR_REG_NUM_82599 64 ++#define IXGBE_IVAR_TXRX_ENTRY 96 ++#define IXGBE_IVAR_RX_ENTRY 64 ++#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) ++#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) ++#define IXGBE_IVAR_TX_ENTRY 32 ++ ++#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ ++#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ ++ ++#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) ++ ++#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ ++ ++/* ETYPE Queue Filter/Select Bit Masks */ ++#define IXGBE_MAX_ETQF_FILTERS 8 ++#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ ++#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ ++#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ ++#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ ++#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ ++ ++#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ ++#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 ++#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ ++#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ ++ ++/* ++ * ETQF filter list: one static filter per filter consumer. This is ++ * to avoid filter collisions later. Add new filters ++ * here!! ++ * ++ * Current filters: ++ * EAPOL 802.1x (0x888e): Filter 0 ++ * FCoE (0x8906): Filter 2 ++ * 1588 (0x88f7): Filter 3 ++ * FIP (0x8914): Filter 4 ++ */ ++#define IXGBE_ETQF_FILTER_EAPOL 0 ++#define IXGBE_ETQF_FILTER_FCOE 2 ++#define IXGBE_ETQF_FILTER_1588 3 ++#define IXGBE_ETQF_FILTER_FIP 4 ++/* VLAN Control Bit Masks */ ++#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ ++#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ ++#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ ++#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ ++#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ ++ ++/* VLAN pool filtering masks */ ++#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ ++#define IXGBE_VLVF_ENTRIES 64 ++#define IXGBE_VLVF_VLANID_MASK 0x00000FFF ++/* Per VF Port VLAN insertion rules */ ++#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ ++#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ ++ ++#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ ++ ++/* STATUS Bit Masks */ ++#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ ++#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ ++#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ ++ ++#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ ++#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ ++ ++/* ESDP Bit Masks */ ++#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ ++#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ ++#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ ++#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ ++#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ ++#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ ++#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ ++#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ ++#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ ++ ++/* LEDCTL Bit Masks */ ++#define IXGBE_LED_IVRT_BASE 0x00000040 ++#define IXGBE_LED_BLINK_BASE 0x00000080 ++#define IXGBE_LED_MODE_MASK_BASE 0x0000000F ++#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) ++#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) ++#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) ++#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) ++#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) ++ ++/* LED modes */ ++#define IXGBE_LED_LINK_UP 0x0 ++#define IXGBE_LED_LINK_10G 0x1 ++#define IXGBE_LED_MAC 0x2 ++#define IXGBE_LED_FILTER 0x3 ++#define IXGBE_LED_LINK_ACTIVE 0x4 ++#define IXGBE_LED_LINK_1G 0x5 ++#define IXGBE_LED_ON 0xE ++#define IXGBE_LED_OFF 0xF ++ ++/* AUTOC Bit Masks */ ++#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 ++#define IXGBE_AUTOC_KX4_SUPP 0x80000000 ++#define IXGBE_AUTOC_KX_SUPP 0x40000000 ++#define IXGBE_AUTOC_PAUSE 0x30000000 ++#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 ++#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 ++#define IXGBE_AUTOC_RF 0x08000000 ++#define IXGBE_AUTOC_PD_TMR 0x06000000 ++#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 ++#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 ++#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 ++#define IXGBE_AUTOC_FECA 0x00040000 ++#define IXGBE_AUTOC_FECR 0x00020000 ++#define IXGBE_AUTOC_KR_SUPP 0x00010000 ++#define IXGBE_AUTOC_AN_RESTART 0x00001000 ++#define IXGBE_AUTOC_FLU 0x00000001 ++#define IXGBE_AUTOC_LMS_SHIFT 13 ++#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) ++ ++#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 ++#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 ++#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 ++#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 ++#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) ++ ++#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 ++#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 ++#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 ++#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) ++ ++ ++/* LINKS Bit Masks */ ++#define IXGBE_LINKS_KX_AN_COMP 0x80000000 ++#define IXGBE_LINKS_UP 0x40000000 ++#define IXGBE_LINKS_SPEED 0x20000000 ++#define IXGBE_LINKS_MODE 0x18000000 ++#define IXGBE_LINKS_RX_MODE 0x06000000 ++#define IXGBE_LINKS_TX_MODE 0x01800000 ++#define IXGBE_LINKS_XGXS_EN 0x00400000 ++#define IXGBE_LINKS_SGMII_EN 0x02000000 ++#define IXGBE_LINKS_PCS_1G_EN 0x00200000 ++#define IXGBE_LINKS_1G_AN_EN 0x00100000 ++#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 ++#define IXGBE_LINKS_1G_SYNC 0x00040000 ++#define IXGBE_LINKS_10G_ALIGN 0x00020000 ++#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 ++#define IXGBE_LINKS_TL_FAULT 0x00001000 ++#define IXGBE_LINKS_SIGNAL 0x00000F00 ++ ++#define IXGBE_LINKS_SPEED_82599 0x30000000 ++#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 ++#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 ++#define IXGBE_LINKS_SPEED_100_82599 0x10000000 ++#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ ++#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ ++ ++#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 ++ ++/* PCS1GLSTA Bit Masks */ ++#define IXGBE_PCS1GLSTA_LINK_OK 1 ++#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 ++#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 ++#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 ++#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 ++#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 ++#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 ++ ++#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 ++#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 ++ ++/* PCS1GLCTL Bit Masks */ ++#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ ++#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 ++#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 ++#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 ++#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 ++#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 ++ ++/* ANLP1 Bit Masks */ ++#define IXGBE_ANLP1_PAUSE 0x0C00 ++#define IXGBE_ANLP1_SYM_PAUSE 0x0400 ++#define IXGBE_ANLP1_ASM_PAUSE 0x0800 ++#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 ++ ++/* SW Semaphore Register bitmasks */ ++#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ ++#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ ++#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ ++#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ ++ ++/* SW_FW_SYNC/GSSR definitions */ ++#define IXGBE_GSSR_EEP_SM 0x0001 ++#define IXGBE_GSSR_PHY0_SM 0x0002 ++#define IXGBE_GSSR_PHY1_SM 0x0004 ++#define IXGBE_GSSR_MAC_CSR_SM 0x0008 ++#define IXGBE_GSSR_FLASH_SM 0x0010 ++ ++/* EEC Register */ ++#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ ++#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ ++#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ ++#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ ++#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ ++#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ ++#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ ++#define IXGBE_EEC_FWE_SHIFT 4 ++#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ ++#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ ++#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ ++#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ ++#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ ++#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ ++/* EEPROM Addressing bits based on type (0-small, 1-large) */ ++#define IXGBE_EEC_ADDR_SIZE 0x00000400 ++#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ ++ ++#define IXGBE_EEC_SIZE_SHIFT 11 ++#define IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT 6 ++#define IXGBE_EEPROM_OPCODE_BITS 8 ++ ++/* Checksum and EEPROM pointers */ ++#define IXGBE_PBANUM_PTR_GUARD 0xFAFA ++#define IXGBE_EEPROM_CHECKSUM 0x3F ++#define IXGBE_EEPROM_SUM 0xBABA ++#define IXGBE_PCIE_ANALOG_PTR 0x03 ++#define IXGBE_ATLAS0_CONFIG_PTR 0x04 ++#define IXGBE_PHY_PTR 0x04 ++#define IXGBE_ATLAS1_CONFIG_PTR 0x05 ++#define IXGBE_OPTION_ROM_PTR 0x05 ++#define IXGBE_PCIE_GENERAL_PTR 0x06 ++#define IXGBE_PCIE_CONFIG0_PTR 0x07 ++#define IXGBE_PCIE_CONFIG1_PTR 0x08 ++#define IXGBE_CORE0_PTR 0x09 ++#define IXGBE_CORE1_PTR 0x0A ++#define IXGBE_MAC0_PTR 0x0B ++#define IXGBE_MAC1_PTR 0x0C ++#define IXGBE_CSR0_CONFIG_PTR 0x0D ++#define IXGBE_CSR1_CONFIG_PTR 0x0E ++#define IXGBE_FW_PTR 0x0F ++#define IXGBE_PBANUM0_PTR 0x15 ++#define IXGBE_PBANUM1_PTR 0x16 ++#define IXGBE_SAN_MAC_ADDR_PTR 0x28 ++#define IXGBE_DEVICE_CAPS 0x2C ++#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 ++#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 ++#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 ++ ++/* MSI-X capability fields masks */ ++#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF ++ ++/* Legacy EEPROM word offsets */ ++#define IXGBE_ISCSI_BOOT_CAPS 0x0033 ++#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 ++#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 ++ ++/* EEPROM Commands - SPI */ ++#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ ++#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 ++#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ ++#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ ++#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ ++#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ ++/* EEPROM reset Write Enable latch */ ++#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 ++#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ ++#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ ++#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ ++#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ ++#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ ++ ++/* EEPROM Read Register */ ++#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ ++#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ ++#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ ++#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ ++#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ ++#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ ++ ++#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 ++ ++#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS ++#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ ++#endif ++ ++#ifndef IXGBE_EERD_EEWR_ATTEMPTS ++/* Number of 5 microseconds we wait for EERD read and ++ * EERW write to complete */ ++#define IXGBE_EERD_EEWR_ATTEMPTS 100000 ++#endif ++ ++#ifndef IXGBE_FLUDONE_ATTEMPTS ++/* # attempts we wait for flush update to complete */ ++#define IXGBE_FLUDONE_ATTEMPTS 20000 ++#endif ++ ++#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ ++#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ ++#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ ++#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ ++ ++#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 ++#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 ++#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 ++#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 ++#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 ++#define IXGBE_FW_PATCH_VERSION_4 0x7 ++#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ ++#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ ++#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ ++#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ ++#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ ++#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ ++#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ ++#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ ++#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */ ++#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */ ++#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */ ++#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */ ++#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ ++ ++/* PCI Bus Info */ ++#define IXGBE_PCI_DEVICE_STATUS 0xAA ++#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 ++#define IXGBE_PCI_LINK_STATUS 0xB2 ++#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 ++#define IXGBE_PCI_LINK_WIDTH 0x3F0 ++#define IXGBE_PCI_LINK_WIDTH_1 0x10 ++#define IXGBE_PCI_LINK_WIDTH_2 0x20 ++#define IXGBE_PCI_LINK_WIDTH_4 0x40 ++#define IXGBE_PCI_LINK_WIDTH_8 0x80 ++#define IXGBE_PCI_LINK_SPEED 0xF ++#define IXGBE_PCI_LINK_SPEED_2500 0x1 ++#define IXGBE_PCI_LINK_SPEED_5000 0x2 ++#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E ++#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 ++#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 ++ ++/* Number of 100 microseconds we wait for PCI Express master disable */ ++#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 ++ ++/* Check whether address is multicast. This is little-endian specific check.*/ ++#define IXGBE_IS_MULTICAST(Address) \ ++ (bool)(((u8 *)(Address))[0] & ((u8)0x01)) ++ ++/* Check whether an address is broadcast. */ ++#define IXGBE_IS_BROADCAST(Address) \ ++ ((((u8 *)(Address))[0] == ((u8)0xff)) && \ ++ (((u8 *)(Address))[1] == ((u8)0xff))) ++ ++/* RAH */ ++#define IXGBE_RAH_VIND_MASK 0x003C0000 ++#define IXGBE_RAH_VIND_SHIFT 18 ++#define IXGBE_RAH_AV 0x80000000 ++#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF ++ ++/* Header split receive */ ++#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 ++#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E ++#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 ++#define IXGBE_RFCTL_NFSW_DIS 0x00000040 ++#define IXGBE_RFCTL_NFSR_DIS 0x00000080 ++#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 ++#define IXGBE_RFCTL_NFS_VER_SHIFT 8 ++#define IXGBE_RFCTL_NFS_VER_2 0 ++#define IXGBE_RFCTL_NFS_VER_3 1 ++#define IXGBE_RFCTL_NFS_VER_4 2 ++#define IXGBE_RFCTL_IPV6_DIS 0x00000400 ++#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 ++#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 ++#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 ++#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 ++ ++/* Transmit Config masks */ ++#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ ++#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ ++#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ ++/* Enable short packet padding to 64 bytes */ ++#define IXGBE_TX_PAD_ENABLE 0x00000400 ++#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ ++/* This allows for 16K packets + 4k for vlan */ ++#define IXGBE_MAX_FRAME_SZ 0x40040000 ++ ++#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ ++#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ ++ ++/* Receive Config masks */ ++#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ ++#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ ++#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ ++#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ ++ ++#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ ++#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ ++#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ ++#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ ++#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ ++#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ ++/* Receive Priority Flow Control Enable */ ++#define IXGBE_FCTRL_RPFCE 0x00004000 ++#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ ++#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ ++#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ ++#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ ++#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ ++ ++/* Multiple Receive Queue Control */ ++#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ ++#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ ++#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ ++#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ ++#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ ++#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ ++#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ ++#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ ++#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ ++#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ ++#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ ++#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 ++#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 ++#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 ++#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 ++#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 ++#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 ++#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 ++#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 ++#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 ++#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 ++#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 ++ ++/* Queue Drop Enable */ ++#define IXGBE_QDE_ENABLE 0x00000001 ++#define IXGBE_QDE_IDX_MASK 0x00007F00 ++#define IXGBE_QDE_IDX_SHIFT 8 ++ ++#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ ++#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ ++#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ ++#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ ++#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ ++#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ ++#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ ++#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ ++#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ ++ ++#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 ++#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 ++#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 ++#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 ++#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 ++/* Multiple Transmit Queue Command Register */ ++#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ ++#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ ++#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ ++#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ ++#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ ++#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */ ++#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ ++ ++/* Receive Descriptor bit definitions */ ++#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ ++#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ ++#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ ++#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ ++#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ ++#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 ++#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ ++#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ ++#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ ++#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ ++#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ ++#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ ++#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ ++#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ ++#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ ++#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ ++#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ ++#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ ++#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ ++#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ ++#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ ++#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ ++#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ ++#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ ++#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ ++#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ ++#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ ++#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ ++#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ ++#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ ++#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ ++#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ ++#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ ++#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ ++#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ ++#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ ++#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ ++#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ ++#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ ++#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ ++#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ ++#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ ++#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ ++#define IXGBE_RXD_PRI_SHIFT 13 ++#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ ++#define IXGBE_RXD_CFI_SHIFT 12 ++ ++#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ ++#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ ++#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ ++#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ ++#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ ++#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ ++#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ ++#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ ++#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ ++#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ ++#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ ++ ++/* PSRTYPE bit definitions */ ++#define IXGBE_PSRTYPE_TCPHDR 0x00000010 ++#define IXGBE_PSRTYPE_UDPHDR 0x00000020 ++#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 ++#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 ++#define IXGBE_PSRTYPE_L2HDR 0x00001000 ++ ++/* SRRCTL bit definitions */ ++#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ ++#define IXGBE_SRRCTL_RDMTS_SHIFT 22 ++#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 ++#define IXGBE_SRRCTL_DROP_EN 0x10000000 ++#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F ++#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 ++#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 ++#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 ++#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 ++#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 ++#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 ++#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 ++ ++#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 ++#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF ++ ++#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F ++#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 ++#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 ++#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 ++#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 ++#define IXGBE_RXDADV_RSCCNT_SHIFT 17 ++#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 ++#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 ++#define IXGBE_RXDADV_SPH 0x8000 ++ ++/* RSS Hash results */ ++#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 ++#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 ++#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 ++#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 ++#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 ++#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 ++#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 ++#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 ++#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 ++#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 ++ ++/* RSS Packet Types as indicated in the receive descriptor. */ ++#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 ++#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ ++#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ ++#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ ++#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ ++#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ ++#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ ++#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ ++#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ ++ ++/* Security Processing bit Indication */ ++#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 ++#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 ++#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 ++#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 ++#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 ++ ++/* Masks to determine if packets should be dropped due to frame errors */ ++#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ ++ IXGBE_RXD_ERR_CE | \ ++ IXGBE_RXD_ERR_LE | \ ++ IXGBE_RXD_ERR_PE | \ ++ IXGBE_RXD_ERR_OSE | \ ++ IXGBE_RXD_ERR_USE) ++ ++#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ ++ IXGBE_RXDADV_ERR_CE | \ ++ IXGBE_RXDADV_ERR_LE | \ ++ IXGBE_RXDADV_ERR_PE | \ ++ IXGBE_RXDADV_ERR_OSE | \ ++ IXGBE_RXDADV_ERR_USE) ++ ++/* Multicast bit mask */ ++#define IXGBE_MCSTCTRL_MFE 0x4 ++ ++/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ ++#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 ++#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 ++#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 ++ ++/* Vlan-specific macros */ ++#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ ++#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ ++#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ ++#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT ++ ++/* SR-IOV specific macros */ ++#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) ++#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4)) ++#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) ++#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) ++/* Translated register #defines */ ++#define IXGBE_PVFCTRL(P) (0x00300 + (4 * P)) ++#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * P)) ++#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * P)) ++#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * P)) ++#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * P)) ++#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * P)) ++#define IXGBE_PVTEICR(P) (0x00B00 + (4 * P)) ++#define IXGBE_PVTEICS(P) (0x00C00 + (4 * P)) ++#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * P)) ++#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * P)) ++#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * P)) ++#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * P)) ++#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \ ++ (0x012300 + (((P) - 24) * 4))) ++#define IXGBE_PVTIVAR(P) (0x12500 + (4 * P)) ++#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * P)) ++#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * P)) ++#define IXGBE_VFPBACL(P) (0x110C8 + (4 * P)) ++#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * P)) \ ++ : (0x0D000 + (0x40 * (P - 64)))) ++#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * P)) \ ++ : (0x0D004 + (0x40 * (P - 64)))) ++#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * P)) \ ++ : (0x0D008 + (0x40 * (P - 64)))) ++#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * P)) \ ++ : (0x0D010 + (0x40 * (P - 64)))) ++#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * P)) \ ++ : (0x0D018 + (0x40 * (P - 64)))) ++#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * P)) \ ++ : (0x0D028 + (0x40 * (P - 64)))) ++#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * P)) \ ++ : (0x0D014 + (0x40 * (P - 64)))) ++#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * P)) ++#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * P)) ++#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * P)) ++#define IXGBE_PVFTTDLEN(P) (0x06008 + (0x40 * P)) ++#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * P)) ++#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * P)) ++#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * P)) ++#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * P)) ++#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * P)) ++#define IXGBE_PVFDCA_RXCTRL(P) ((P < 64) ? (0x0100C + (0x40 * P)) \ ++ : (0x0D00C + (0x40 * (P - 64)))) ++#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * P)) ++#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * x)) ++#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * x)) ++#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * x)) ++#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * x)) ++#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * x)) ++#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * x)) ++#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * x)) ++ ++/* Little Endian defines */ ++#ifndef __le16 ++#define __le16 u16 ++#endif ++#ifndef __le32 ++#define __le32 u32 ++#endif ++#ifndef __le64 ++#define __le64 u64 ++ ++#endif ++#ifndef __be16 ++/* Big Endian defines */ ++#define __be16 u16 ++#define __be32 u32 ++#define __be64 u64 ++ ++#endif ++enum ixgbe_fdir_pballoc_type { ++ IXGBE_FDIR_PBALLOC_64K = 0, ++ IXGBE_FDIR_PBALLOC_128K, ++ IXGBE_FDIR_PBALLOC_256K, ++}; ++#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 ++ ++/* Flow Director register values */ ++#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 ++#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 ++#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 ++#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 ++#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 ++#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 ++#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 ++#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 ++#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 ++#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 ++#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 ++#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 ++#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 ++ ++#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 ++#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 ++#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 ++#define IXGBE_FDIRM_VLANID 0x00000001 ++#define IXGBE_FDIRM_VLANP 0x00000002 ++#define IXGBE_FDIRM_POOL 0x00000004 ++#define IXGBE_FDIRM_L4P 0x00000008 ++#define IXGBE_FDIRM_FLEX 0x00000010 ++#define IXGBE_FDIRM_DIPv6 0x00000020 ++ ++#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF ++#define IXGBE_FDIRFREE_FREE_SHIFT 0 ++#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 ++#define IXGBE_FDIRFREE_COLL_SHIFT 16 ++#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F ++#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 ++#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 ++#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 ++#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF ++#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 ++#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 ++#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 ++#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF ++#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 ++#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 ++#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 ++#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 ++#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 ++#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 ++#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 ++ ++#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 ++#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 ++#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 ++#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 ++#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH 0x00000007 ++#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 ++#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 ++#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 ++#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 ++#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 ++#define IXGBE_FDIRCMD_IPV6 0x00000080 ++#define IXGBE_FDIRCMD_CLEARHT 0x00000100 ++#define IXGBE_FDIRCMD_DROP 0x00000200 ++#define IXGBE_FDIRCMD_INT 0x00000400 ++#define IXGBE_FDIRCMD_LAST 0x00000800 ++#define IXGBE_FDIRCMD_COLLISION 0x00001000 ++#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 ++#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 ++#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 ++#define IXGBE_FDIR_INIT_DONE_POLL 10 ++#define IXGBE_FDIRCMD_CMD_POLL 10 ++ ++/* Transmit Descriptor - Legacy */ ++struct ixgbe_legacy_tx_desc { ++ u64 buffer_addr; /* Address of the descriptor's data buffer */ ++ union { ++ __le32 data; ++ struct { ++ __le16 length; /* Data buffer length */ ++ u8 cso; /* Checksum offset */ ++ u8 cmd; /* Descriptor control */ ++ } flags; ++ } lower; ++ union { ++ __le32 data; ++ struct { ++ u8 status; /* Descriptor status */ ++ u8 css; /* Checksum start */ ++ __le16 vlan; ++ } fields; ++ } upper; ++}; ++ ++/* Transmit Descriptor - Advanced */ ++union ixgbe_adv_tx_desc { ++ struct { ++ __le64 buffer_addr; /* Address of descriptor's data buf */ ++ __le32 cmd_type_len; ++ __le32 olinfo_status; ++ } read; ++ struct { ++ __le64 rsvd; /* Reserved */ ++ __le32 nxtseq_seed; ++ __le32 status; ++ } wb; ++}; ++ ++/* Receive Descriptor - Legacy */ ++struct ixgbe_legacy_rx_desc { ++ __le64 buffer_addr; /* Address of the descriptor's data buffer */ ++ __le16 length; /* Length of data DMAed into data buffer */ ++ __le16 csum; /* Packet checksum */ ++ u8 status; /* Descriptor status */ ++ u8 errors; /* Descriptor Errors */ ++ __le16 vlan; ++}; ++ ++/* Receive Descriptor - Advanced */ ++union ixgbe_adv_rx_desc { ++ struct { ++ __le64 pkt_addr; /* Packet buffer address */ ++ __le64 hdr_addr; /* Header buffer address */ ++ } read; ++ struct { ++ struct { ++ union { ++ __le32 data; ++ struct { ++ __le16 pkt_info; /* RSS, Pkt type */ ++ __le16 hdr_info; /* Splithdr, hdrlen */ ++ } hs_rss; ++ } lo_dword; ++ union { ++ __le32 rss; /* RSS Hash */ ++ struct { ++ __le16 ip_id; /* IP id */ ++ __le16 csum; /* Packet Checksum */ ++ } csum_ip; ++ } hi_dword; ++ } lower; ++ struct { ++ __le32 status_error; /* ext status/error */ ++ __le16 length; /* Packet length */ ++ __le16 vlan; /* VLAN tag */ ++ } upper; ++ } wb; /* writeback */ ++}; ++ ++/* Context descriptors */ ++struct ixgbe_adv_tx_context_desc { ++ __le32 vlan_macip_lens; ++ __le32 seqnum_seed; ++ __le32 type_tucmd_mlhl; ++ __le32 mss_l4len_idx; ++}; ++ ++/* Adv Transmit Descriptor Config Masks */ ++#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ ++#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ ++#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ ++#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ ++#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ ++#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ ++#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ ++#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ ++#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ ++#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ ++#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ ++#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ ++#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ ++#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ ++#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ ++#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ ++#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ ++#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ ++#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ ++#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ ++#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ ++ IXGBE_ADVTXD_POPTS_SHIFT) ++#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ ++ IXGBE_ADVTXD_POPTS_SHIFT) ++#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ ++#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ ++#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ ++#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ ++#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ ++#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ ++#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ ++#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ ++#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ ++#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ ++#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ ++#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ ++#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ ++#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ ++#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ ++#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ ++#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ ++#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ ++#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ ++#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ ++#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ ++#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */ ++#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */ ++#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ ++#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ ++#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ ++#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ ++#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ ++#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ ++ ++/* Autonegotiation advertised speeds */ ++typedef u32 ixgbe_autoneg_advertised; ++/* Link speed */ ++typedef u32 ixgbe_link_speed; ++#define IXGBE_LINK_SPEED_UNKNOWN 0 ++#define IXGBE_LINK_SPEED_100_FULL 0x0008 ++#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 ++#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 ++#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ ++ IXGBE_LINK_SPEED_10GB_FULL) ++#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ ++ IXGBE_LINK_SPEED_1GB_FULL | \ ++ IXGBE_LINK_SPEED_10GB_FULL) ++ ++ ++/* Physical layer type */ ++typedef u32 ixgbe_physical_layer; ++#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 ++#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 ++#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 ++#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100 ++#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 ++#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 ++#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 ++ ++ ++/* Software ATR hash keys */ ++#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D ++#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 ++ ++/* Software ATR input stream offsets and masks */ ++#define IXGBE_ATR_VLAN_OFFSET 0 ++#define IXGBE_ATR_SRC_IPV6_OFFSET 2 ++#define IXGBE_ATR_SRC_IPV4_OFFSET 14 ++#define IXGBE_ATR_DST_IPV6_OFFSET 18 ++#define IXGBE_ATR_DST_IPV4_OFFSET 30 ++#define IXGBE_ATR_SRC_PORT_OFFSET 34 ++#define IXGBE_ATR_DST_PORT_OFFSET 36 ++#define IXGBE_ATR_FLEX_BYTE_OFFSET 38 ++#define IXGBE_ATR_VM_POOL_OFFSET 40 ++#define IXGBE_ATR_L4TYPE_OFFSET 41 ++ ++#define IXGBE_ATR_L4TYPE_MASK 0x3 ++#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 ++#define IXGBE_ATR_L4TYPE_UDP 0x1 ++#define IXGBE_ATR_L4TYPE_TCP 0x2 ++#define IXGBE_ATR_L4TYPE_SCTP 0x3 ++#define IXGBE_ATR_HASH_MASK 0x7fff ++ ++/* Flow Director ATR input struct. */ ++struct ixgbe_atr_input { ++ /* Byte layout in order, all values with MSB first: ++ * ++ * vlan_id - 2 bytes ++ * src_ip - 16 bytes ++ * dst_ip - 16 bytes ++ * src_port - 2 bytes ++ * dst_port - 2 bytes ++ * flex_bytes - 2 bytes ++ * vm_pool - 1 byte ++ * l4type - 1 byte ++ */ ++ u8 byte_stream[42]; ++}; ++ ++struct ixgbe_atr_input_masks { ++ u32 src_ip_mask; ++ u32 dst_ip_mask; ++ u16 src_port_mask; ++ u16 dst_port_mask; ++ u16 vlan_id_mask; ++ u16 data_mask; ++}; ++ ++/* ++ * Unavailable: The FCoE Boot Option ROM is not present in the flash. ++ * Disabled: Present; boot order is not set for any targets on the port. ++ * Enabled: Present; boot order is set for at least one target on the port. ++ */ ++enum ixgbe_fcoe_boot_status { ++ ixgbe_fcoe_bootstatus_disabled = 0, ++ ixgbe_fcoe_bootstatus_enabled = 1, ++ ixgbe_fcoe_bootstatus_unavailable = 0xFFFF ++}; ++ ++enum ixgbe_eeprom_type { ++ ixgbe_eeprom_uninitialized = 0, ++ ixgbe_eeprom_spi, ++ ixgbe_flash, ++ ixgbe_eeprom_none /* No NVM support */ ++}; ++ ++enum ixgbe_mac_type { ++ ixgbe_mac_unknown = 0, ++ ixgbe_mac_82598EB, ++ ixgbe_mac_82599EB, ++ ixgbe_num_macs ++}; ++ ++enum ixgbe_phy_type { ++ ixgbe_phy_unknown = 0, ++ ixgbe_phy_none, ++ ixgbe_phy_tn, ++ ixgbe_phy_aq, ++ ixgbe_phy_cu_unknown, ++ ixgbe_phy_qt, ++ ixgbe_phy_xaui, ++ ixgbe_phy_nl, ++ ixgbe_phy_sfp_passive_tyco, ++ ixgbe_phy_sfp_passive_unknown, ++ ixgbe_phy_sfp_active_unknown, ++ ixgbe_phy_sfp_avago, ++ ixgbe_phy_sfp_ftl, ++ ixgbe_phy_sfp_ftl_active, ++ ixgbe_phy_sfp_unknown, ++ ixgbe_phy_sfp_intel, ++ ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ ++ ixgbe_phy_generic ++}; ++ ++/* ++ * SFP+ module type IDs: ++ * ++ * ID Module Type ++ * ============= ++ * 0 SFP_DA_CU ++ * 1 SFP_SR ++ * 2 SFP_LR ++ * 3 SFP_DA_CU_CORE0 - 82599-specific ++ * 4 SFP_DA_CU_CORE1 - 82599-specific ++ * 5 SFP_SR/LR_CORE0 - 82599-specific ++ * 6 SFP_SR/LR_CORE1 - 82599-specific ++ */ ++enum ixgbe_sfp_type { ++ ixgbe_sfp_type_da_cu = 0, ++ ixgbe_sfp_type_sr = 1, ++ ixgbe_sfp_type_lr = 2, ++ ixgbe_sfp_type_da_cu_core0 = 3, ++ ixgbe_sfp_type_da_cu_core1 = 4, ++ ixgbe_sfp_type_srlr_core0 = 5, ++ ixgbe_sfp_type_srlr_core1 = 6, ++ ixgbe_sfp_type_da_act_lmt_core0 = 7, ++ ixgbe_sfp_type_da_act_lmt_core1 = 8, ++ ixgbe_sfp_type_1g_cu_core0 = 9, ++ ixgbe_sfp_type_1g_cu_core1 = 10, ++ ixgbe_sfp_type_not_present = 0xFFFE, ++ ixgbe_sfp_type_unknown = 0xFFFF ++}; ++ ++enum ixgbe_media_type { ++ ixgbe_media_type_unknown = 0, ++ ixgbe_media_type_fiber, ++ ixgbe_media_type_copper, ++ ixgbe_media_type_backplane, ++ ixgbe_media_type_cx4, ++ ixgbe_media_type_virtual ++}; ++ ++/* Flow Control Settings */ ++enum ixgbe_fc_mode { ++ ixgbe_fc_none = 0, ++ ixgbe_fc_rx_pause, ++ ixgbe_fc_tx_pause, ++ ixgbe_fc_full, ++#ifdef CONFIG_DCB ++ ixgbe_fc_pfc, ++#endif ++ ixgbe_fc_default ++}; ++ ++/* Smart Speed Settings */ ++#define IXGBE_SMARTSPEED_MAX_RETRIES 3 ++enum ixgbe_smart_speed { ++ ixgbe_smart_speed_auto = 0, ++ ixgbe_smart_speed_on, ++ ixgbe_smart_speed_off ++}; ++ ++/* PCI bus types */ ++enum ixgbe_bus_type { ++ ixgbe_bus_type_unknown = 0, ++ ixgbe_bus_type_pci, ++ ixgbe_bus_type_pcix, ++ ixgbe_bus_type_pci_express, ++ ixgbe_bus_type_reserved ++}; ++ ++/* PCI bus speeds */ ++enum ixgbe_bus_speed { ++ ixgbe_bus_speed_unknown = 0, ++ ixgbe_bus_speed_33 = 33, ++ ixgbe_bus_speed_66 = 66, ++ ixgbe_bus_speed_100 = 100, ++ ixgbe_bus_speed_120 = 120, ++ ixgbe_bus_speed_133 = 133, ++ ixgbe_bus_speed_2500 = 2500, ++ ixgbe_bus_speed_5000 = 5000, ++ ixgbe_bus_speed_reserved ++}; ++ ++/* PCI bus widths */ ++enum ixgbe_bus_width { ++ ixgbe_bus_width_unknown = 0, ++ ixgbe_bus_width_pcie_x1 = 1, ++ ixgbe_bus_width_pcie_x2 = 2, ++ ixgbe_bus_width_pcie_x4 = 4, ++ ixgbe_bus_width_pcie_x8 = 8, ++ ixgbe_bus_width_32 = 32, ++ ixgbe_bus_width_64 = 64, ++ ixgbe_bus_width_reserved ++}; ++ ++struct ixgbe_addr_filter_info { ++ u32 num_mc_addrs; ++ u32 rar_used_count; ++ u32 mta_in_use; ++ u32 overflow_promisc; ++ bool user_set_promisc; ++}; ++ ++/* Bus parameters */ ++struct ixgbe_bus_info { ++ enum ixgbe_bus_speed speed; ++ enum ixgbe_bus_width width; ++ enum ixgbe_bus_type type; ++ ++ u16 func; ++ u16 lan_id; ++}; ++ ++/* Flow control parameters */ ++struct ixgbe_fc_info { ++ u32 high_water; /* Flow Control High-water */ ++ u32 low_water; /* Flow Control Low-water */ ++ u16 pause_time; /* Flow Control Pause timer */ ++ bool send_xon; /* Flow control send XON */ ++ bool strict_ieee; /* Strict IEEE mode */ ++ bool disable_fc_autoneg; /* Do not autonegotiate FC */ ++ bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ ++ enum ixgbe_fc_mode current_mode; /* FC mode in effect */ ++ enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ ++}; ++ ++/* Statistics counters collected by the MAC */ ++struct ixgbe_hw_stats { ++ u64 crcerrs; ++ u64 illerrc; ++ u64 errbc; ++ u64 mspdc; ++ u64 mpctotal; ++ u64 mpc[8]; ++ u64 mlfc; ++ u64 mrfc; ++ u64 rlec; ++ u64 lxontxc; ++ u64 lxonrxc; ++ u64 lxofftxc; ++ u64 lxoffrxc; ++ u64 pxontxc[8]; ++ u64 pxonrxc[8]; ++ u64 pxofftxc[8]; ++ u64 pxoffrxc[8]; ++ u64 prc64; ++ u64 prc127; ++ u64 prc255; ++ u64 prc511; ++ u64 prc1023; ++ u64 prc1522; ++ u64 gprc; ++ u64 bprc; ++ u64 mprc; ++ u64 gptc; ++ u64 gorc; ++ u64 gotc; ++ u64 rnbc[8]; ++ u64 ruc; ++ u64 rfc; ++ u64 roc; ++ u64 rjc; ++ u64 mngprc; ++ u64 mngpdc; ++ u64 mngptc; ++ u64 tor; ++ u64 tpr; ++ u64 tpt; ++ u64 ptc64; ++ u64 ptc127; ++ u64 ptc255; ++ u64 ptc511; ++ u64 ptc1023; ++ u64 ptc1522; ++ u64 mptc; ++ u64 bptc; ++ u64 xec; ++ u64 qprc[16]; ++ u64 qptc[16]; ++ u64 qbrc[16]; ++ u64 qbtc[16]; ++ u64 qprdc[16]; ++ u64 pxon2offc[8]; ++ u64 fdirustat_add; ++ u64 fdirustat_remove; ++ u64 fdirfstat_fadd; ++ u64 fdirfstat_fremove; ++ u64 fdirmatch; ++ u64 fdirmiss; ++ u64 fccrc; ++ u64 fclast; ++ u64 fcoerpdc; ++ u64 fcoeprc; ++ u64 fcoeptc; ++ u64 fcoedwrc; ++ u64 fcoedwtc; ++}; ++ ++/* forward declaration */ ++struct ixgbe_hw; ++ ++/* iterator type for walking multicast address lists */ ++typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, ++ u32 *vmdq); ++ ++/* Function pointer table */ ++struct ixgbe_eeprom_operations { ++ s32 (*init_params)(struct ixgbe_hw *); ++ s32 (*read)(struct ixgbe_hw *, u16, u16 *); ++ s32 (*write)(struct ixgbe_hw *, u16, u16); ++ s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); ++ s32 (*update_checksum)(struct ixgbe_hw *); ++ u16 (*calc_checksum)(struct ixgbe_hw *); ++}; ++ ++struct ixgbe_mac_operations { ++ s32 (*init_hw)(struct ixgbe_hw *); ++ s32 (*reset_hw)(struct ixgbe_hw *); ++ s32 (*start_hw)(struct ixgbe_hw *); ++ s32 (*clear_hw_cntrs)(struct ixgbe_hw *); ++ void (*enable_relaxed_ordering)(struct ixgbe_hw *); ++ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); ++ u32 (*get_supported_physical_layer)(struct ixgbe_hw *); ++ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); ++ s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); ++ s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *); ++ s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); ++ s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); ++ s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *); ++ s32 (*stop_adapter)(struct ixgbe_hw *); ++ s32 (*get_bus_info)(struct ixgbe_hw *); ++ void (*set_lan_id)(struct ixgbe_hw *); ++ s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); ++ s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); ++ s32 (*setup_sfp)(struct ixgbe_hw *); ++ s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); ++ s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); ++ void (*release_swfw_sync)(struct ixgbe_hw *, u16); ++ ++ /* Link */ ++ void (*disable_tx_laser)(struct ixgbe_hw *); ++ void (*enable_tx_laser)(struct ixgbe_hw *); ++ void (*flap_tx_laser)(struct ixgbe_hw *); ++ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); ++ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); ++ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, ++ bool *); ++ ++ /* LED */ ++ s32 (*led_on)(struct ixgbe_hw *, u32); ++ s32 (*led_off)(struct ixgbe_hw *, u32); ++ s32 (*blink_led_start)(struct ixgbe_hw *, u32); ++ s32 (*blink_led_stop)(struct ixgbe_hw *, u32); ++ ++ /* RAR, Multicast, VLAN */ ++ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); ++ s32 (*clear_rar)(struct ixgbe_hw *, u32); ++ s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32); ++ s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); ++ s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); ++ s32 (*init_rx_addrs)(struct ixgbe_hw *); ++ s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, ++ ixgbe_mc_addr_itr); ++ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, ++ ixgbe_mc_addr_itr); ++ s32 (*enable_mc)(struct ixgbe_hw *); ++ s32 (*disable_mc)(struct ixgbe_hw *); ++ s32 (*clear_vfta)(struct ixgbe_hw *); ++ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); ++ s32 (*init_uta_tables)(struct ixgbe_hw *); ++ ++ /* Flow Control */ ++ s32 (*fc_enable)(struct ixgbe_hw *, s32); ++}; ++ ++struct ixgbe_phy_operations { ++ s32 (*identify)(struct ixgbe_hw *); ++ s32 (*identify_sfp)(struct ixgbe_hw *); ++ s32 (*init)(struct ixgbe_hw *); ++ s32 (*reset)(struct ixgbe_hw *); ++ s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); ++ s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); ++ s32 (*setup_link)(struct ixgbe_hw *); ++ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, ++ bool); ++ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); ++ s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); ++ s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); ++ s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); ++ s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); ++ s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); ++ void (*i2c_bus_clear)(struct ixgbe_hw *); ++ s32 (*check_overtemp)(struct ixgbe_hw *); ++}; ++ ++struct ixgbe_eeprom_info { ++ struct ixgbe_eeprom_operations ops; ++ enum ixgbe_eeprom_type type; ++ u32 semaphore_delay; ++ u16 word_size; ++ u16 address_bits; ++}; ++ ++#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 ++struct ixgbe_mac_info { ++ struct ixgbe_mac_operations ops; ++ enum ixgbe_mac_type type; ++ u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; ++ u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; ++ u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; ++ /* prefix for World Wide Node Name (WWNN) */ ++ u16 wwnn_prefix; ++ /* prefix for World Wide Port Name (WWPN) */ ++ u16 wwpn_prefix; ++#define IXGBE_MAX_MTA 128 ++ u32 mta_shadow[IXGBE_MAX_MTA]; ++ s32 mc_filter_type; ++ u32 mcft_size; ++ u32 vft_size; ++ u32 num_rar_entries; ++ u32 rar_highwater; ++ u32 rx_pb_size; ++ u32 max_tx_queues; ++ u32 max_rx_queues; ++ u32 max_msix_vectors; ++ bool msix_vectors_from_pcie; ++ u32 orig_autoc; ++ u32 orig_autoc2; ++ bool orig_link_settings_stored; ++ bool autotry_restart; ++ u8 flags; ++}; ++ ++struct ixgbe_phy_info { ++ struct ixgbe_phy_operations ops; ++ enum ixgbe_phy_type type; ++ u32 addr; ++ u32 id; ++ enum ixgbe_sfp_type sfp_type; ++ bool sfp_setup_needed; ++ u32 revision; ++ enum ixgbe_media_type media_type; ++ bool reset_disable; ++ ixgbe_autoneg_advertised autoneg_advertised; ++ enum ixgbe_smart_speed smart_speed; ++ bool smart_speed_active; ++ bool multispeed_fiber; ++ bool reset_if_overtemp; ++}; ++ ++#include "ixgbe_mbx.h" ++ ++struct ixgbe_mbx_operations { ++ void (*init_params)(struct ixgbe_hw *hw); ++ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); ++ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); ++ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); ++ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); ++ s32 (*check_for_msg)(struct ixgbe_hw *, u16); ++ s32 (*check_for_ack)(struct ixgbe_hw *, u16); ++ s32 (*check_for_rst)(struct ixgbe_hw *, u16); ++}; ++ ++struct ixgbe_mbx_stats { ++ u32 msgs_tx; ++ u32 msgs_rx; ++ ++ u32 acks; ++ u32 reqs; ++ u32 rsts; ++}; ++ ++struct ixgbe_mbx_info { ++ struct ixgbe_mbx_operations ops; ++ struct ixgbe_mbx_stats stats; ++ u32 timeout; ++ u32 udelay; ++ u32 v2p_mailbox; ++ u16 size; ++}; ++ ++struct ixgbe_hw { ++ u8 __iomem *hw_addr; ++ void *back; ++ struct ixgbe_mac_info mac; ++ struct ixgbe_addr_filter_info addr_ctrl; ++ struct ixgbe_fc_info fc; ++ struct ixgbe_phy_info phy; ++ struct ixgbe_eeprom_info eeprom; ++ struct ixgbe_bus_info bus; ++ struct ixgbe_mbx_info mbx; ++ u16 device_id; ++ u16 vendor_id; ++ u16 subsystem_device_id; ++ u16 subsystem_vendor_id; ++ u8 revision_id; ++ bool adapter_stopped; ++}; ++ ++#define ixgbe_call_func(hw, func, params, error) \ ++ (func != NULL) ? func params : error ++ ++ ++/* Error Codes */ ++#define IXGBE_ERR_EEPROM -1 ++#define IXGBE_ERR_EEPROM_CHECKSUM -2 ++#define IXGBE_ERR_PHY -3 ++#define IXGBE_ERR_CONFIG -4 ++#define IXGBE_ERR_PARAM -5 ++#define IXGBE_ERR_MAC_TYPE -6 ++#define IXGBE_ERR_UNKNOWN_PHY -7 ++#define IXGBE_ERR_LINK_SETUP -8 ++#define IXGBE_ERR_ADAPTER_STOPPED -9 ++#define IXGBE_ERR_INVALID_MAC_ADDR -10 ++#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 ++#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 ++#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 ++#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 ++#define IXGBE_ERR_RESET_FAILED -15 ++#define IXGBE_ERR_SWFW_SYNC -16 ++#define IXGBE_ERR_PHY_ADDR_INVALID -17 ++#define IXGBE_ERR_I2C -18 ++#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 ++#define IXGBE_ERR_SFP_NOT_PRESENT -20 ++#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 ++#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 ++#define IXGBE_ERR_FDIR_REINIT_FAILED -23 ++#define IXGBE_ERR_EEPROM_VERSION -24 ++#define IXGBE_ERR_NO_SPACE -25 ++#define IXGBE_ERR_OVERTEMP -26 ++#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 ++#define IXGBE_ERR_FC_NOT_SUPPORTED -28 ++#define IXGBE_ERR_FLOW_CONTROL -29 ++#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 ++#define IXGBE_ERR_PBA_SECTION -31 ++#define IXGBE_ERR_INVALID_ARGUMENT -32 ++#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF ++ ++#define UNREFERENCED_2PARAMETER(_p, _q) (_p); (_q); ++#define UNREFERENCED_3PARAMETER(_p, _q, _r) (_p); (_q); (_r); ++#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s); ++ ++#endif /* _IXGBE_TYPE_H_ */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/kcompat.c linux-2.6.22-50/drivers/net/ixgbe/kcompat.c +--- linux-2.6.22-40/drivers/net/ixgbe/kcompat.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/kcompat.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,1090 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe.h" ++#include "kcompat.h" ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) ++/* From lib/vsprintf.c */ ++#include ++ ++static int skip_atoi(const char **s) ++{ ++ int i=0; ++ ++ while (isdigit(**s)) ++ i = i*10 + *((*s)++) - '0'; ++ return i; ++} ++ ++#define _kc_ZEROPAD 1 /* pad with zero */ ++#define _kc_SIGN 2 /* unsigned/signed long */ ++#define _kc_PLUS 4 /* show plus */ ++#define _kc_SPACE 8 /* space if plus */ ++#define _kc_LEFT 16 /* left justified */ ++#define _kc_SPECIAL 32 /* 0x */ ++#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ ++ ++static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) ++{ ++ char c,sign,tmp[66]; ++ const char *digits; ++ const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; ++ const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; ++ int i; ++ ++ digits = (type & _kc_LARGE) ? large_digits : small_digits; ++ if (type & _kc_LEFT) ++ type &= ~_kc_ZEROPAD; ++ if (base < 2 || base > 36) ++ return 0; ++ c = (type & _kc_ZEROPAD) ? '0' : ' '; ++ sign = 0; ++ if (type & _kc_SIGN) { ++ if (num < 0) { ++ sign = '-'; ++ num = -num; ++ size--; ++ } else if (type & _kc_PLUS) { ++ sign = '+'; ++ size--; ++ } else if (type & _kc_SPACE) { ++ sign = ' '; ++ size--; ++ } ++ } ++ if (type & _kc_SPECIAL) { ++ if (base == 16) ++ size -= 2; ++ else if (base == 8) ++ size--; ++ } ++ i = 0; ++ if (num == 0) ++ tmp[i++]='0'; ++ else while (num != 0) ++ tmp[i++] = digits[do_div(num,base)]; ++ if (i > precision) ++ precision = i; ++ size -= precision; ++ if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { ++ while(size-->0) { ++ if (buf <= end) ++ *buf = ' '; ++ ++buf; ++ } ++ } ++ if (sign) { ++ if (buf <= end) ++ *buf = sign; ++ ++buf; ++ } ++ if (type & _kc_SPECIAL) { ++ if (base==8) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ } else if (base==16) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ if (buf <= end) ++ *buf = digits[33]; ++ ++buf; ++ } ++ } ++ if (!(type & _kc_LEFT)) { ++ while (size-- > 0) { ++ if (buf <= end) ++ *buf = c; ++ ++buf; ++ } ++ } ++ while (i < precision--) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ } ++ while (i-- > 0) { ++ if (buf <= end) ++ *buf = tmp[i]; ++ ++buf; ++ } ++ while (size-- > 0) { ++ if (buf <= end) ++ *buf = ' '; ++ ++buf; ++ } ++ return buf; ++} ++ ++int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) ++{ ++ int len; ++ unsigned long long num; ++ int i, base; ++ char *str, *end, c; ++ const char *s; ++ ++ int flags; /* flags to number() */ ++ ++ int field_width; /* width of output field */ ++ int precision; /* min. # of digits for integers; max ++ number of chars for from string */ ++ int qualifier; /* 'h', 'l', or 'L' for integer fields */ ++ /* 'z' support added 23/7/1999 S.H. */ ++ /* 'z' changed to 'Z' --davidm 1/25/99 */ ++ ++ str = buf; ++ end = buf + size - 1; ++ ++ if (end < buf - 1) { ++ end = ((void *) -1); ++ size = end - buf + 1; ++ } ++ ++ for (; *fmt ; ++fmt) { ++ if (*fmt != '%') { ++ if (str <= end) ++ *str = *fmt; ++ ++str; ++ continue; ++ } ++ ++ /* process flags */ ++ flags = 0; ++ repeat: ++ ++fmt; /* this also skips first '%' */ ++ switch (*fmt) { ++ case '-': flags |= _kc_LEFT; goto repeat; ++ case '+': flags |= _kc_PLUS; goto repeat; ++ case ' ': flags |= _kc_SPACE; goto repeat; ++ case '#': flags |= _kc_SPECIAL; goto repeat; ++ case '0': flags |= _kc_ZEROPAD; goto repeat; ++ } ++ ++ /* get field width */ ++ field_width = -1; ++ if (isdigit(*fmt)) ++ field_width = skip_atoi(&fmt); ++ else if (*fmt == '*') { ++ ++fmt; ++ /* it's the next argument */ ++ field_width = va_arg(args, int); ++ if (field_width < 0) { ++ field_width = -field_width; ++ flags |= _kc_LEFT; ++ } ++ } ++ ++ /* get the precision */ ++ precision = -1; ++ if (*fmt == '.') { ++ ++fmt; ++ if (isdigit(*fmt)) ++ precision = skip_atoi(&fmt); ++ else if (*fmt == '*') { ++ ++fmt; ++ /* it's the next argument */ ++ precision = va_arg(args, int); ++ } ++ if (precision < 0) ++ precision = 0; ++ } ++ ++ /* get the conversion qualifier */ ++ qualifier = -1; ++ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { ++ qualifier = *fmt; ++ ++fmt; ++ } ++ ++ /* default base */ ++ base = 10; ++ ++ switch (*fmt) { ++ case 'c': ++ if (!(flags & _kc_LEFT)) { ++ while (--field_width > 0) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ } ++ c = (unsigned char) va_arg(args, int); ++ if (str <= end) ++ *str = c; ++ ++str; ++ while (--field_width > 0) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ continue; ++ ++ case 's': ++ s = va_arg(args, char *); ++ if (!s) ++ s = ""; ++ ++ len = strnlen(s, precision); ++ ++ if (!(flags & _kc_LEFT)) { ++ while (len < field_width--) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ } ++ for (i = 0; i < len; ++i) { ++ if (str <= end) ++ *str = *s; ++ ++str; ++s; ++ } ++ while (len < field_width--) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ continue; ++ ++ case 'p': ++ if (field_width == -1) { ++ field_width = 2*sizeof(void *); ++ flags |= _kc_ZEROPAD; ++ } ++ str = number(str, end, ++ (unsigned long) va_arg(args, void *), ++ 16, field_width, precision, flags); ++ continue; ++ ++ ++ case 'n': ++ /* FIXME: ++ * What does C99 say about the overflow case here? */ ++ if (qualifier == 'l') { ++ long * ip = va_arg(args, long *); ++ *ip = (str - buf); ++ } else if (qualifier == 'Z') { ++ size_t * ip = va_arg(args, size_t *); ++ *ip = (str - buf); ++ } else { ++ int * ip = va_arg(args, int *); ++ *ip = (str - buf); ++ } ++ continue; ++ ++ case '%': ++ if (str <= end) ++ *str = '%'; ++ ++str; ++ continue; ++ ++ /* integer number formats - set up the flags and "break" */ ++ case 'o': ++ base = 8; ++ break; ++ ++ case 'X': ++ flags |= _kc_LARGE; ++ case 'x': ++ base = 16; ++ break; ++ ++ case 'd': ++ case 'i': ++ flags |= _kc_SIGN; ++ case 'u': ++ break; ++ ++ default: ++ if (str <= end) ++ *str = '%'; ++ ++str; ++ if (*fmt) { ++ if (str <= end) ++ *str = *fmt; ++ ++str; ++ } else { ++ --fmt; ++ } ++ continue; ++ } ++ if (qualifier == 'L') ++ num = va_arg(args, long long); ++ else if (qualifier == 'l') { ++ num = va_arg(args, unsigned long); ++ if (flags & _kc_SIGN) ++ num = (signed long) num; ++ } else if (qualifier == 'Z') { ++ num = va_arg(args, size_t); ++ } else if (qualifier == 'h') { ++ num = (unsigned short) va_arg(args, int); ++ if (flags & _kc_SIGN) ++ num = (signed short) num; ++ } else { ++ num = va_arg(args, unsigned int); ++ if (flags & _kc_SIGN) ++ num = (signed int) num; ++ } ++ str = number(str, end, num, base, ++ field_width, precision, flags); ++ } ++ if (str <= end) ++ *str = '\0'; ++ else if (size > 0) ++ /* don't write out a null byte if the buf size is zero */ ++ *end = '\0'; ++ /* the trailing null byte doesn't count towards the total ++ * ++str; ++ */ ++ return str-buf; ++} ++ ++int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) ++{ ++ va_list args; ++ int i; ++ ++ va_start(args, fmt); ++ i = _kc_vsnprintf(buf,size,fmt,args); ++ va_end(args); ++ return i; ++} ++#endif /* < 2.4.8 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) ) ++struct sk_buff * ++_kc_skb_pad(struct sk_buff *skb, int pad) ++{ ++ struct sk_buff *nskb; ++ ++ /* If the skbuff is non linear tailroom is always zero.. */ ++ if(skb_tailroom(skb) >= pad) ++ { ++ memset(skb->data+skb->len, 0, pad); ++ return skb; ++ } ++ ++ nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC); ++ kfree_skb(skb); ++ if(nskb) ++ memset(nskb->data+nskb->len, 0, pad); ++ return nskb; ++} ++#endif /* < 2.4.21 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) ++ ++/**************************************/ ++/* PCI DMA MAPPING */ ++ ++#if defined(CONFIG_HIGHMEM) ++ ++#ifndef PCI_DRAM_OFFSET ++#define PCI_DRAM_OFFSET 0 ++#endif ++ ++u64 ++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, ++ size_t size, int direction) ++{ ++ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + ++ PCI_DRAM_OFFSET); ++} ++ ++#else /* CONFIG_HIGHMEM */ ++ ++u64 ++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, ++ size_t size, int direction) ++{ ++ return pci_map_single(dev, (void *)page_address(page) + offset, size, ++ direction); ++} ++ ++#endif /* CONFIG_HIGHMEM */ ++ ++void ++_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, ++ int direction) ++{ ++ return pci_unmap_single(dev, dma_addr, size, direction); ++} ++ ++#endif /* 2.4.13 => 2.4.3 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) ++ ++/**************************************/ ++/* PCI DRIVER API */ ++ ++int ++_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) ++{ ++ if (!pci_dma_supported(dev, mask)) ++ return -EIO; ++ dev->dma_mask = mask; ++ return 0; ++} ++ ++int ++_kc_pci_request_regions(struct pci_dev *dev, char *res_name) ++{ ++ int i; ++ ++ for (i = 0; i < 6; i++) { ++ if (pci_resource_len(dev, i) == 0) ++ continue; ++ ++ if (pci_resource_flags(dev, i) & IORESOURCE_IO) { ++ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { ++ pci_release_regions(dev); ++ return -EBUSY; ++ } ++ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { ++ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { ++ pci_release_regions(dev); ++ return -EBUSY; ++ } ++ } ++ } ++ return 0; ++} ++ ++void ++_kc_pci_release_regions(struct pci_dev *dev) ++{ ++ int i; ++ ++ for (i = 0; i < 6; i++) { ++ if (pci_resource_len(dev, i) == 0) ++ continue; ++ ++ if (pci_resource_flags(dev, i) & IORESOURCE_IO) ++ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); ++ ++ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) ++ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); ++ } ++} ++ ++/**************************************/ ++/* NETWORK DRIVER API */ ++ ++struct net_device * ++_kc_alloc_etherdev(int sizeof_priv) ++{ ++ struct net_device *dev; ++ int alloc_size; ++ ++ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; ++ dev = kmalloc(alloc_size, GFP_KERNEL); ++ if (!dev) ++ return NULL; ++ memset(dev, 0, alloc_size); ++ ++ if (sizeof_priv) ++ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); ++ dev->name[0] = '\0'; ++ ether_setup(dev); ++ ++ return dev; ++} ++ ++int ++_kc_is_valid_ether_addr(u8 *addr) ++{ ++ const char zaddr[6] = { 0, }; ++ ++ return !(addr[0] & 1) && memcmp(addr, zaddr, 6); ++} ++ ++#endif /* 2.4.3 => 2.4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) ++ ++int ++_kc_pci_set_power_state(struct pci_dev *dev, int state) ++{ ++ return 0; ++} ++ ++int ++_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) ++{ ++ return 0; ++} ++ ++#endif /* 2.4.6 => 2.4.3 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, ++ int off, int size) ++{ ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ frag->page = page; ++ frag->page_offset = off; ++ frag->size = size; ++ skb_shinfo(skb)->nr_frags = i + 1; ++} ++ ++/* ++ * Original Copyright: ++ * find_next_bit.c: fallback find next bit implementation ++ * ++ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. ++ * Written by David Howells (dhowells@redhat.com) ++ */ ++ ++/** ++ * find_next_bit - find the next set bit in a memory region ++ * @addr: The address to base the search on ++ * @offset: The bitnumber to start searching at ++ * @size: The maximum size to search ++ */ ++unsigned long find_next_bit(const unsigned long *addr, unsigned long size, ++ unsigned long offset) ++{ ++ const unsigned long *p = addr + BITOP_WORD(offset); ++ unsigned long result = offset & ~(BITS_PER_LONG-1); ++ unsigned long tmp; ++ ++ if (offset >= size) ++ return size; ++ size -= result; ++ offset %= BITS_PER_LONG; ++ if (offset) { ++ tmp = *(p++); ++ tmp &= (~0UL << offset); ++ if (size < BITS_PER_LONG) ++ goto found_first; ++ if (tmp) ++ goto found_middle; ++ size -= BITS_PER_LONG; ++ result += BITS_PER_LONG; ++ } ++ while (size & ~(BITS_PER_LONG-1)) { ++ if ((tmp = *(p++))) ++ goto found_middle; ++ result += BITS_PER_LONG; ++ size -= BITS_PER_LONG; ++ } ++ if (!size) ++ return result; ++ tmp = *p; ++ ++found_first: ++ tmp &= (~0UL >> (BITS_PER_LONG - size)); ++ if (tmp == 0UL) /* Are any bits set? */ ++ return result + size; /* Nope. */ ++found_middle: ++ return result + ffs(tmp); ++} ++ ++#endif /* 2.6.0 => 2.4.6 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) ++{ ++ va_list args; ++ int i; ++ ++ va_start(args, fmt); ++ i = vsnprintf(buf, size, fmt, args); ++ va_end(args); ++ return (i >= size) ? (size - 1) : i; ++} ++#endif /* < 2.6.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) ++DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; ++#endif /* < 2.6.10 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) ++void *_kc_kzalloc(size_t size, int flags) ++{ ++ void *ret = kmalloc(size, flags); ++ if (ret) ++ memset(ret, 0, size); ++ return ret; ++} ++#endif /* <= 2.6.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) ++int _kc_pci_save_state(struct pci_dev *pdev) ++{ ++ struct adapter_struct *adapter = pci_get_drvdata(pdev); ++ int size = PCI_CONFIG_SPACE_LEN, i; ++ u16 pcie_cap_offset, pcie_link_status; ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) ++ /* no ->dev for 2.4 kernels */ ++ WARN_ON(pdev->dev.driver_data == NULL); ++#endif ++ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); ++ if (pcie_cap_offset) { ++ if (!pci_read_config_word(pdev, ++ pcie_cap_offset + PCIE_LINK_STATUS, ++ &pcie_link_status)) ++ size = PCIE_CONFIG_SPACE_LEN; ++ } ++ pci_config_space_ich8lan(); ++#ifdef HAVE_PCI_ERS ++ if (adapter->config_space == NULL) ++#else ++ WARN_ON(adapter->config_space != NULL); ++#endif ++ adapter->config_space = kmalloc(size, GFP_KERNEL); ++ if (!adapter->config_space) { ++ printk(KERN_ERR "Out of memory in pci_save_state\n"); ++ return -ENOMEM; ++ } ++ for (i = 0; i < (size / 4); i++) ++ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); ++ return 0; ++} ++ ++void _kc_pci_restore_state(struct pci_dev *pdev) ++{ ++ struct adapter_struct *adapter = pci_get_drvdata(pdev); ++ int size = PCI_CONFIG_SPACE_LEN, i; ++ u16 pcie_cap_offset; ++ u16 pcie_link_status; ++ ++ if (adapter->config_space != NULL) { ++ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); ++ if (pcie_cap_offset && ++ !pci_read_config_word(pdev, ++ pcie_cap_offset + PCIE_LINK_STATUS, ++ &pcie_link_status)) ++ size = PCIE_CONFIG_SPACE_LEN; ++ ++ pci_config_space_ich8lan(); ++ for (i = 0; i < (size / 4); i++) ++ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); ++#ifndef HAVE_PCI_ERS ++ kfree(adapter->config_space); ++ adapter->config_space = NULL; ++#endif ++ } ++} ++ ++#ifdef HAVE_PCI_ERS ++void _kc_free_netdev(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ ++ if (adapter->config_space != NULL) ++ kfree(adapter->config_space); ++#ifdef CONFIG_SYSFS ++ if (netdev->reg_state == NETREG_UNINITIALIZED) { ++ kfree((char *)netdev - netdev->padded); ++ } else { ++ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); ++ netdev->reg_state = NETREG_RELEASED; ++ class_device_put(&netdev->class_dev); ++ } ++#else ++ kfree((char *)netdev - netdev->padded); ++#endif ++} ++#endif ++#endif /* <= 2.6.18 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) ++/* hexdump code taken from lib/hexdump.c */ ++static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, ++ int groupsize, unsigned char *linebuf, ++ size_t linebuflen, bool ascii) ++{ ++ const u8 *ptr = buf; ++ u8 ch; ++ int j, lx = 0; ++ int ascii_column; ++ ++ if (rowsize != 16 && rowsize != 32) ++ rowsize = 16; ++ ++ if (!len) ++ goto nil; ++ if (len > rowsize) /* limit to one line at a time */ ++ len = rowsize; ++ if ((len % groupsize) != 0) /* no mixed size output */ ++ groupsize = 1; ++ ++ switch (groupsize) { ++ case 8: { ++ const u64 *ptr8 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%16.16llx", j ? " " : "", ++ (unsigned long long)*(ptr8 + j)); ++ ascii_column = 17 * ngroups + 2; ++ break; ++ } ++ ++ case 4: { ++ const u32 *ptr4 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%8.8x", j ? " " : "", *(ptr4 + j)); ++ ascii_column = 9 * ngroups + 2; ++ break; ++ } ++ ++ case 2: { ++ const u16 *ptr2 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%4.4x", j ? " " : "", *(ptr2 + j)); ++ ascii_column = 5 * ngroups + 2; ++ break; ++ } ++ ++ default: ++ for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { ++ ch = ptr[j]; ++ linebuf[lx++] = hex_asc(ch >> 4); ++ linebuf[lx++] = hex_asc(ch & 0x0f); ++ linebuf[lx++] = ' '; ++ } ++ if (j) ++ lx--; ++ ++ ascii_column = 3 * rowsize + 2; ++ break; ++ } ++ if (!ascii) ++ goto nil; ++ ++ while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) ++ linebuf[lx++] = ' '; ++ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) ++ linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] ++ : '.'; ++nil: ++ linebuf[lx++] = '\0'; ++} ++ ++void _kc_print_hex_dump(const char *level, ++ const char *prefix_str, int prefix_type, ++ int rowsize, int groupsize, ++ const void *buf, size_t len, bool ascii) ++{ ++ const u8 *ptr = buf; ++ int i, linelen, remaining = len; ++ unsigned char linebuf[200]; ++ ++ if (rowsize != 16 && rowsize != 32) ++ rowsize = 16; ++ ++ for (i = 0; i < len; i += rowsize) { ++ linelen = min(remaining, rowsize); ++ remaining -= rowsize; ++ _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, ++ linebuf, sizeof(linebuf), ascii); ++ ++ switch (prefix_type) { ++ case DUMP_PREFIX_ADDRESS: ++ printk("%s%s%*p: %s\n", level, prefix_str, ++ (int)(2 * sizeof(void *)), ptr + i, linebuf); ++ break; ++ case DUMP_PREFIX_OFFSET: ++ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); ++ break; ++ default: ++ printk("%s%s%s\n", level, prefix_str, linebuf); ++ break; ++ } ++ } ++} ++#endif /* < 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) ++int ixgbe_dcb_netlink_register() ++{ ++ return 0; ++} ++ ++int ixgbe_dcb_netlink_unregister() ++{ ++ return 0; ++} ++ ++int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, ++ struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) ++{ ++ return 0; ++} ++#endif /* < 2.6.23 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) ++#ifdef NAPI ++struct net_device *napi_to_poll_dev(struct napi_struct *napi) ++{ ++ struct adapter_q_vector *q_vector = container_of(napi, ++ struct adapter_q_vector, ++ napi); ++ return &q_vector->poll_dev; ++} ++ ++int __kc_adapter_clean(struct net_device *netdev, int *budget) ++{ ++ int work_done; ++ int work_to_do = min(*budget, netdev->quota); ++ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ ++ struct napi_struct *napi = netdev->priv; ++ work_done = napi->poll(napi, work_to_do); ++ *budget -= work_done; ++ netdev->quota -= work_done; ++ return (work_done >= work_to_do) ? 1 : 0; ++} ++#endif /* NAPI */ ++#endif /* <= 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) ++void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) ++{ ++ struct pci_dev *parent = pdev->bus->self; ++ u16 link_state; ++ int pos; ++ ++ if (!parent) ++ return; ++ ++ pos = pci_find_capability(parent, PCI_CAP_ID_EXP); ++ if (pos) { ++ pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); ++ link_state &= ~state; ++ pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); ++ } ++} ++#endif /* < 2.6.26 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) ++#ifdef HAVE_TX_MQ ++void _kc_netif_tx_stop_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_stop_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_stop_subqueue(netdev, i); ++} ++void _kc_netif_tx_wake_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_wake_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_wake_subqueue(netdev, i); ++} ++void _kc_netif_tx_start_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_start_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_start_subqueue(netdev, i); ++} ++#endif /* HAVE_TX_MQ */ ++#endif /* < 2.6.27 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) ++ ++int ++_kc_pci_prepare_to_sleep(struct pci_dev *dev) ++{ ++ pci_power_t target_state; ++ int error; ++ ++ target_state = pci_choose_state(dev, PMSG_SUSPEND); ++ ++ pci_enable_wake(dev, target_state, true); ++ ++ error = pci_set_power_state(dev, target_state); ++ ++ if (error) ++ pci_enable_wake(dev, target_state, false); ++ ++ return error; ++} ++ ++int ++_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) ++{ ++ int err; ++ ++ err = pci_enable_wake(dev, PCI_D3cold, enable); ++ if (err) ++ goto out; ++ ++ err = pci_enable_wake(dev, PCI_D3hot, enable); ++ ++out: ++ return err; ++} ++#endif /* < 2.6.28 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) ++#ifdef HAVE_NETDEV_SELECT_QUEUE ++#include ++static u32 _kc_simple_tx_hashrnd; ++static u32 _kc_simple_tx_hashrnd_initialized; ++ ++u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb) ++{ ++ u32 addr1, addr2, ports; ++ u32 hash, ihl; ++ u8 ip_proto = 0; ++ ++ if (unlikely(!_kc_simple_tx_hashrnd_initialized)) { ++ get_random_bytes(&_kc_simple_tx_hashrnd, 4); ++ _kc_simple_tx_hashrnd_initialized = 1; ++ } ++ ++ switch (skb->protocol) { ++ case htons(ETH_P_IP): ++ if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))) ++ ip_proto = ip_hdr(skb)->protocol; ++ addr1 = ip_hdr(skb)->saddr; ++ addr2 = ip_hdr(skb)->daddr; ++ ihl = ip_hdr(skb)->ihl; ++ break; ++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ++ case htons(ETH_P_IPV6): ++ ip_proto = ipv6_hdr(skb)->nexthdr; ++ addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3]; ++ addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3]; ++ ihl = (40 >> 2); ++ break; ++#endif ++ default: ++ return 0; ++ } ++ ++ ++ switch (ip_proto) { ++ case IPPROTO_TCP: ++ case IPPROTO_UDP: ++ case IPPROTO_DCCP: ++ case IPPROTO_ESP: ++ case IPPROTO_AH: ++ case IPPROTO_SCTP: ++ case IPPROTO_UDPLITE: ++ ports = *((u32 *) (skb_network_header(skb) + (ihl * 4))); ++ break; ++ ++ default: ++ ports = 0; ++ break; ++ } ++ ++ hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd); ++ ++ return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); ++} ++#endif /* HAVE_NETDEV_SELECT_QUEUE */ ++#endif /* < 2.6.30 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ++struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length) ++{ ++ struct sk_buff *skb; ++ ++ skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); ++ if (skb) { ++ if (NET_IP_ALIGN + NET_SKB_PAD) ++ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); ++ skb->dev = dev; ++ } ++ return skb; ++} ++#endif /* < 2.6.33 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) ++int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) ++{ ++ unsigned long features = dev->features; ++ ++ if (data & ~supported) ++ return -EINVAL; ++ ++#ifdef NETIF_F_LRO ++ features &= ~NETIF_F_LRO; ++ if (data & ETH_FLAG_LRO) ++ features |= NETIF_F_LRO; ++#endif ++#ifdef NETIF_F_NTUPLE ++ features &= ~NETIF_F_NTUPLE; ++ if (data & ETH_FLAG_NTUPLE) ++ features |= NETIF_F_NTUPLE; ++#endif ++#ifdef NETIF_F_RXHASH ++ features &= ~NETIF_F_RXHASH; ++ if (data & ETH_FLAG_RXHASH) ++ features |= NETIF_F_RXHASH; ++#endif ++ ++ dev->features = features; ++ ++ return 0; ++} ++#endif /* < 2.6.36 */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/kcompat_ethtool.c linux-2.6.22-50/drivers/net/ixgbe/kcompat_ethtool.c +--- linux-2.6.22-40/drivers/net/ixgbe/kcompat_ethtool.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/kcompat_ethtool.c 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,1172 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/* ++ * net/core/ethtool.c - Ethtool ioctl handler ++ * Copyright (c) 2003 Matthew Wilcox ++ * ++ * This file is where we call all the ethtool_ops commands to get ++ * the information ethtool needs. We fall back to calling do_ioctl() ++ * for drivers which haven't been converted to ethtool_ops yet. ++ * ++ * It's GPL, stupid. ++ * ++ * Modification by sfeldma@pobox.com to work as backward compat ++ * solution for pre-ethtool_ops kernels. ++ * - copied struct ethtool_ops from ethtool.h ++ * - defined SET_ETHTOOL_OPS ++ * - put in some #ifndef NETIF_F_xxx wrappers ++ * - changes refs to dev->ethtool_ops to ethtool_ops ++ * - changed dev_ethtool to ethtool_ioctl ++ * - remove EXPORT_SYMBOL()s ++ * - added _kc_ prefix in built-in ethtool_op_xxx ops. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "kcompat.h" ++ ++#undef SUPPORTED_10000baseT_Full ++#define SUPPORTED_10000baseT_Full (1 << 12) ++#undef ADVERTISED_10000baseT_Full ++#define ADVERTISED_10000baseT_Full (1 << 12) ++#undef SPEED_10000 ++#define SPEED_10000 10000 ++ ++#undef ethtool_ops ++#define ethtool_ops _kc_ethtool_ops ++ ++struct _kc_ethtool_ops { ++ int (*get_settings)(struct net_device *, struct ethtool_cmd *); ++ int (*set_settings)(struct net_device *, struct ethtool_cmd *); ++ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); ++ int (*get_regs_len)(struct net_device *); ++ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); ++ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); ++ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); ++ u32 (*get_msglevel)(struct net_device *); ++ void (*set_msglevel)(struct net_device *, u32); ++ int (*nway_reset)(struct net_device *); ++ u32 (*get_link)(struct net_device *); ++ int (*get_eeprom_len)(struct net_device *); ++ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); ++ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); ++ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); ++ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); ++ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); ++ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); ++ void (*get_pauseparam)(struct net_device *, ++ struct ethtool_pauseparam*); ++ int (*set_pauseparam)(struct net_device *, ++ struct ethtool_pauseparam*); ++ u32 (*get_rx_csum)(struct net_device *); ++ int (*set_rx_csum)(struct net_device *, u32); ++ u32 (*get_tx_csum)(struct net_device *); ++ int (*set_tx_csum)(struct net_device *, u32); ++ u32 (*get_sg)(struct net_device *); ++ int (*set_sg)(struct net_device *, u32); ++ u32 (*get_tso)(struct net_device *); ++ int (*set_tso)(struct net_device *, u32); ++ int (*self_test_count)(struct net_device *); ++ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); ++ void (*get_strings)(struct net_device *, u32 stringset, u8 *); ++ int (*phys_id)(struct net_device *, u32); ++ int (*get_stats_count)(struct net_device *); ++ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, ++ u64 *); ++} *ethtool_ops = NULL; ++ ++#undef SET_ETHTOOL_OPS ++#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) ++ ++/* ++ * Some useful ethtool_ops methods that are device independent. If we find that ++ * all drivers want to do the same thing here, we can turn these into dev_() ++ * function calls. ++ */ ++ ++#undef ethtool_op_get_link ++#define ethtool_op_get_link _kc_ethtool_op_get_link ++u32 _kc_ethtool_op_get_link(struct net_device *dev) ++{ ++ return netif_carrier_ok(dev) ? 1 : 0; ++} ++ ++#undef ethtool_op_get_tx_csum ++#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum ++u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev) ++{ ++#ifdef NETIF_F_IP_CSUM ++ return (dev->features & NETIF_F_IP_CSUM) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_tx_csum ++#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum ++int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_IP_CSUM ++ if (data) ++#ifdef NETIF_F_IPV6_CSUM ++ dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); ++ else ++ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); ++#else ++ dev->features |= NETIF_F_IP_CSUM; ++ else ++ dev->features &= ~NETIF_F_IP_CSUM; ++#endif ++#endif ++ ++ return 0; ++} ++ ++#undef ethtool_op_get_sg ++#define ethtool_op_get_sg _kc_ethtool_op_get_sg ++u32 _kc_ethtool_op_get_sg(struct net_device *dev) ++{ ++#ifdef NETIF_F_SG ++ return (dev->features & NETIF_F_SG) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_sg ++#define ethtool_op_set_sg _kc_ethtool_op_set_sg ++int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_SG ++ if (data) ++ dev->features |= NETIF_F_SG; ++ else ++ dev->features &= ~NETIF_F_SG; ++#endif ++ ++ return 0; ++} ++ ++#undef ethtool_op_get_tso ++#define ethtool_op_get_tso _kc_ethtool_op_get_tso ++u32 _kc_ethtool_op_get_tso(struct net_device *dev) ++{ ++#ifdef NETIF_F_TSO ++ return (dev->features & NETIF_F_TSO) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_tso ++#define ethtool_op_set_tso _kc_ethtool_op_set_tso ++int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_TSO ++ if (data) ++ dev->features |= NETIF_F_TSO; ++ else ++ dev->features &= ~NETIF_F_TSO; ++#endif ++ ++ return 0; ++} ++ ++/* Handlers for each ethtool command */ ++ ++static int ethtool_get_settings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_cmd cmd = { ETHTOOL_GSET }; ++ int err; ++ ++ if (!ethtool_ops->get_settings) ++ return -EOPNOTSUPP; ++ ++ err = ethtool_ops->get_settings(dev, &cmd); ++ if (err < 0) ++ return err; ++ ++ if (copy_to_user(useraddr, &cmd, sizeof(cmd))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_settings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_cmd cmd; ++ ++ if (!ethtool_ops->set_settings) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&cmd, useraddr, sizeof(cmd))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_settings(dev, &cmd); ++} ++ ++static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_drvinfo info; ++ struct ethtool_ops *ops = ethtool_ops; ++ ++ if (!ops->get_drvinfo) ++ return -EOPNOTSUPP; ++ ++ memset(&info, 0, sizeof(info)); ++ info.cmd = ETHTOOL_GDRVINFO; ++ ops->get_drvinfo(dev, &info); ++ ++ if (ops->self_test_count) ++ info.testinfo_len = ops->self_test_count(dev); ++ if (ops->get_stats_count) ++ info.n_stats = ops->get_stats_count(dev); ++ if (ops->get_regs_len) ++ info.regdump_len = ops->get_regs_len(dev); ++ if (ops->get_eeprom_len) ++ info.eedump_len = ops->get_eeprom_len(dev); ++ ++ if (copy_to_user(useraddr, &info, sizeof(info))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_get_regs(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_regs regs; ++ struct ethtool_ops *ops = ethtool_ops; ++ void *regbuf; ++ int reglen, ret; ++ ++ if (!ops->get_regs || !ops->get_regs_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(®s, useraddr, sizeof(regs))) ++ return -EFAULT; ++ ++ reglen = ops->get_regs_len(dev); ++ if (regs.len > reglen) ++ regs.len = reglen; ++ ++ regbuf = kmalloc(reglen, GFP_USER); ++ if (!regbuf) ++ return -ENOMEM; ++ ++ ops->get_regs(dev, ®s, regbuf); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, ®s, sizeof(regs))) ++ goto out; ++ useraddr += offsetof(struct ethtool_regs, data); ++ if (copy_to_user(useraddr, regbuf, reglen)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(regbuf); ++ return ret; ++} ++ ++static int ethtool_get_wol(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; ++ ++ if (!ethtool_ops->get_wol) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_wol(dev, &wol); ++ ++ if (copy_to_user(useraddr, &wol, sizeof(wol))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_wol(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_wolinfo wol; ++ ++ if (!ethtool_ops->set_wol) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&wol, useraddr, sizeof(wol))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_wol(dev, &wol); ++} ++ ++static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GMSGLVL }; ++ ++ if (!ethtool_ops->get_msglevel) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_msglevel(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_msglevel) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ ethtool_ops->set_msglevel(dev, edata.data); ++ return 0; ++} ++ ++static int ethtool_nway_reset(struct net_device *dev) ++{ ++ if (!ethtool_ops->nway_reset) ++ return -EOPNOTSUPP; ++ ++ return ethtool_ops->nway_reset(dev); ++} ++ ++static int ethtool_get_link(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GLINK }; ++ ++ if (!ethtool_ops->get_link) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_link(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_eeprom eeprom; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->get_eeprom || !ops->get_eeprom_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) ++ return -EFAULT; ++ ++ /* Check for wrap and zero */ ++ if (eeprom.offset + eeprom.len <= eeprom.offset) ++ return -EINVAL; ++ ++ /* Check for exceeding total eeprom len */ ++ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) ++ return -EINVAL; ++ ++ data = kmalloc(eeprom.len, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ret = -EFAULT; ++ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) ++ goto out; ++ ++ ret = ops->get_eeprom(dev, &eeprom, data); ++ if (ret) ++ goto out; ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) ++ goto out; ++ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_eeprom eeprom; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->set_eeprom || !ops->get_eeprom_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) ++ return -EFAULT; ++ ++ /* Check for wrap and zero */ ++ if (eeprom.offset + eeprom.len <= eeprom.offset) ++ return -EINVAL; ++ ++ /* Check for exceeding total eeprom len */ ++ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) ++ return -EINVAL; ++ ++ data = kmalloc(eeprom.len, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ret = -EFAULT; ++ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) ++ goto out; ++ ++ ret = ops->set_eeprom(dev, &eeprom, data); ++ if (ret) ++ goto out; ++ ++ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) ++ ret = -EFAULT; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; ++ ++ if (!ethtool_ops->get_coalesce) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_coalesce(dev, &coalesce); ++ ++ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_coalesce coalesce; ++ ++ if (!ethtool_ops->get_coalesce) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_coalesce(dev, &coalesce); ++} ++ ++static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; ++ ++ if (!ethtool_ops->get_ringparam) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_ringparam(dev, &ringparam); ++ ++ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_ringparam ringparam; ++ ++ if (!ethtool_ops->get_ringparam) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_ringparam(dev, &ringparam); ++} ++ ++static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; ++ ++ if (!ethtool_ops->get_pauseparam) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_pauseparam(dev, &pauseparam); ++ ++ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_pauseparam pauseparam; ++ ++ if (!ethtool_ops->get_pauseparam) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_pauseparam(dev, &pauseparam); ++} ++ ++static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GRXCSUM }; ++ ++ if (!ethtool_ops->get_rx_csum) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_rx_csum(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_rx_csum) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ ethtool_ops->set_rx_csum(dev, edata.data); ++ return 0; ++} ++ ++static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GTXCSUM }; ++ ++ if (!ethtool_ops->get_tx_csum) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_tx_csum(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_tx_csum) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_tx_csum(dev, edata.data); ++} ++ ++static int ethtool_get_sg(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GSG }; ++ ++ if (!ethtool_ops->get_sg) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_sg(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_sg(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_sg) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_sg(dev, edata.data); ++} ++ ++static int ethtool_get_tso(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GTSO }; ++ ++ if (!ethtool_ops->get_tso) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_tso(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_tso(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_tso) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_tso(dev, edata.data); ++} ++ ++static int ethtool_self_test(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_test test; ++ struct ethtool_ops *ops = ethtool_ops; ++ u64 *data; ++ int ret; ++ ++ if (!ops->self_test || !ops->self_test_count) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&test, useraddr, sizeof(test))) ++ return -EFAULT; ++ ++ test.len = ops->self_test_count(dev); ++ data = kmalloc(test.len * sizeof(u64), GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->self_test(dev, &test, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &test, sizeof(test))) ++ goto out; ++ useraddr += sizeof(test); ++ if (copy_to_user(useraddr, data, test.len * sizeof(u64))) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_get_strings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_gstrings gstrings; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->get_strings) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) ++ return -EFAULT; ++ ++ switch (gstrings.string_set) { ++ case ETH_SS_TEST: ++ if (!ops->self_test_count) ++ return -EOPNOTSUPP; ++ gstrings.len = ops->self_test_count(dev); ++ break; ++ case ETH_SS_STATS: ++ if (!ops->get_stats_count) ++ return -EOPNOTSUPP; ++ gstrings.len = ops->get_stats_count(dev); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->get_strings(dev, gstrings.string_set, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) ++ goto out; ++ useraddr += sizeof(gstrings); ++ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_phys_id(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_value id; ++ ++ if (!ethtool_ops->phys_id) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&id, useraddr, sizeof(id))) ++ return -EFAULT; ++ ++ return ethtool_ops->phys_id(dev, id.data); ++} ++ ++static int ethtool_get_stats(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_stats stats; ++ struct ethtool_ops *ops = ethtool_ops; ++ u64 *data; ++ int ret; ++ ++ if (!ops->get_ethtool_stats || !ops->get_stats_count) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&stats, useraddr, sizeof(stats))) ++ return -EFAULT; ++ ++ stats.n_stats = ops->get_stats_count(dev); ++ data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->get_ethtool_stats(dev, &stats, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &stats, sizeof(stats))) ++ goto out; ++ useraddr += sizeof(stats); ++ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++/* The main entry point in this file. Called from net/core/dev.c */ ++ ++#define ETHTOOL_OPS_COMPAT ++int ethtool_ioctl(struct ifreq *ifr) ++{ ++ struct net_device *dev = __dev_get_by_name(ifr->ifr_name); ++ void *useraddr = (void *) ifr->ifr_data; ++ u32 ethcmd; ++ ++ /* ++ * XXX: This can be pushed down into the ethtool_* handlers that ++ * need it. Keep existing behavior for the moment. ++ */ ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ ++ if (!dev || !netif_device_present(dev)) ++ return -ENODEV; ++ ++ if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) ++ return -EFAULT; ++ ++ switch (ethcmd) { ++ case ETHTOOL_GSET: ++ return ethtool_get_settings(dev, useraddr); ++ case ETHTOOL_SSET: ++ return ethtool_set_settings(dev, useraddr); ++ case ETHTOOL_GDRVINFO: ++ return ethtool_get_drvinfo(dev, useraddr); ++ case ETHTOOL_GREGS: ++ return ethtool_get_regs(dev, useraddr); ++ case ETHTOOL_GWOL: ++ return ethtool_get_wol(dev, useraddr); ++ case ETHTOOL_SWOL: ++ return ethtool_set_wol(dev, useraddr); ++ case ETHTOOL_GMSGLVL: ++ return ethtool_get_msglevel(dev, useraddr); ++ case ETHTOOL_SMSGLVL: ++ return ethtool_set_msglevel(dev, useraddr); ++ case ETHTOOL_NWAY_RST: ++ return ethtool_nway_reset(dev); ++ case ETHTOOL_GLINK: ++ return ethtool_get_link(dev, useraddr); ++ case ETHTOOL_GEEPROM: ++ return ethtool_get_eeprom(dev, useraddr); ++ case ETHTOOL_SEEPROM: ++ return ethtool_set_eeprom(dev, useraddr); ++ case ETHTOOL_GCOALESCE: ++ return ethtool_get_coalesce(dev, useraddr); ++ case ETHTOOL_SCOALESCE: ++ return ethtool_set_coalesce(dev, useraddr); ++ case ETHTOOL_GRINGPARAM: ++ return ethtool_get_ringparam(dev, useraddr); ++ case ETHTOOL_SRINGPARAM: ++ return ethtool_set_ringparam(dev, useraddr); ++ case ETHTOOL_GPAUSEPARAM: ++ return ethtool_get_pauseparam(dev, useraddr); ++ case ETHTOOL_SPAUSEPARAM: ++ return ethtool_set_pauseparam(dev, useraddr); ++ case ETHTOOL_GRXCSUM: ++ return ethtool_get_rx_csum(dev, useraddr); ++ case ETHTOOL_SRXCSUM: ++ return ethtool_set_rx_csum(dev, useraddr); ++ case ETHTOOL_GTXCSUM: ++ return ethtool_get_tx_csum(dev, useraddr); ++ case ETHTOOL_STXCSUM: ++ return ethtool_set_tx_csum(dev, useraddr); ++ case ETHTOOL_GSG: ++ return ethtool_get_sg(dev, useraddr); ++ case ETHTOOL_SSG: ++ return ethtool_set_sg(dev, useraddr); ++ case ETHTOOL_GTSO: ++ return ethtool_get_tso(dev, useraddr); ++ case ETHTOOL_STSO: ++ return ethtool_set_tso(dev, useraddr); ++ case ETHTOOL_TEST: ++ return ethtool_self_test(dev, useraddr); ++ case ETHTOOL_GSTRINGS: ++ return ethtool_get_strings(dev, useraddr); ++ case ETHTOOL_PHYS_ID: ++ return ethtool_phys_id(dev, useraddr); ++ case ETHTOOL_GSTATS: ++ return ethtool_get_stats(dev, useraddr); ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return -EOPNOTSUPP; ++} ++ ++#define mii_if_info _kc_mii_if_info ++struct _kc_mii_if_info { ++ int phy_id; ++ int advertising; ++ int phy_id_mask; ++ int reg_num_mask; ++ ++ unsigned int full_duplex : 1; /* is full duplex? */ ++ unsigned int force_media : 1; /* is autoneg. disabled? */ ++ ++ struct net_device *dev; ++ int (*mdio_read) (struct net_device *dev, int phy_id, int location); ++ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); ++}; ++ ++struct ethtool_cmd; ++struct mii_ioctl_data; ++ ++#undef mii_link_ok ++#define mii_link_ok _kc_mii_link_ok ++#undef mii_nway_restart ++#define mii_nway_restart _kc_mii_nway_restart ++#undef mii_ethtool_gset ++#define mii_ethtool_gset _kc_mii_ethtool_gset ++#undef mii_ethtool_sset ++#define mii_ethtool_sset _kc_mii_ethtool_sset ++#undef mii_check_link ++#define mii_check_link _kc_mii_check_link ++extern int _kc_mii_link_ok (struct mii_if_info *mii); ++extern int _kc_mii_nway_restart (struct mii_if_info *mii); ++extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, ++ struct ethtool_cmd *ecmd); ++extern int _kc_mii_ethtool_sset(struct mii_if_info *mii, ++ struct ethtool_cmd *ecmd); ++extern void _kc_mii_check_link (struct mii_if_info *mii); ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) ++#undef generic_mii_ioctl ++#define generic_mii_ioctl _kc_generic_mii_ioctl ++extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, ++ struct mii_ioctl_data *mii_data, int cmd, ++ unsigned int *duplex_changed); ++#endif /* > 2.4.6 */ ++ ++ ++struct _kc_pci_dev_ext { ++ struct pci_dev *dev; ++ void *pci_drvdata; ++ struct pci_driver *driver; ++}; ++ ++struct _kc_net_dev_ext { ++ struct net_device *dev; ++ unsigned int carrier; ++}; ++ ++ ++/**************************************/ ++/* mii support */ ++ ++int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) ++{ ++ struct net_device *dev = mii->dev; ++ u32 advert, bmcr, lpa, nego; ++ ++ ecmd->supported = ++ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | ++ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | ++ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); ++ ++ /* only supports twisted-pair */ ++ ecmd->port = PORT_MII; ++ ++ /* only supports internal transceiver */ ++ ecmd->transceiver = XCVR_INTERNAL; ++ ++ /* this isn't fully supported at higher layers */ ++ ecmd->phy_address = mii->phy_id; ++ ++ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; ++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); ++ if (advert & ADVERTISE_10HALF) ++ ecmd->advertising |= ADVERTISED_10baseT_Half; ++ if (advert & ADVERTISE_10FULL) ++ ecmd->advertising |= ADVERTISED_10baseT_Full; ++ if (advert & ADVERTISE_100HALF) ++ ecmd->advertising |= ADVERTISED_100baseT_Half; ++ if (advert & ADVERTISE_100FULL) ++ ecmd->advertising |= ADVERTISED_100baseT_Full; ++ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA); ++ if (bmcr & BMCR_ANENABLE) { ++ ecmd->advertising |= ADVERTISED_Autoneg; ++ ecmd->autoneg = AUTONEG_ENABLE; ++ ++ nego = mii_nway_result(advert & lpa); ++ if (nego == LPA_100FULL || nego == LPA_100HALF) ++ ecmd->speed = SPEED_100; ++ else ++ ecmd->speed = SPEED_10; ++ if (nego == LPA_100FULL || nego == LPA_10FULL) { ++ ecmd->duplex = DUPLEX_FULL; ++ mii->full_duplex = 1; ++ } else { ++ ecmd->duplex = DUPLEX_HALF; ++ mii->full_duplex = 0; ++ } ++ } else { ++ ecmd->autoneg = AUTONEG_DISABLE; ++ ++ ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; ++ ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; ++ } ++ ++ /* ignore maxtxpkt, maxrxpkt for now */ ++ ++ return 0; ++} ++ ++int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) ++{ ++ struct net_device *dev = mii->dev; ++ ++ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) ++ return -EINVAL; ++ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) ++ return -EINVAL; ++ if (ecmd->port != PORT_MII) ++ return -EINVAL; ++ if (ecmd->transceiver != XCVR_INTERNAL) ++ return -EINVAL; ++ if (ecmd->phy_address != mii->phy_id) ++ return -EINVAL; ++ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) ++ return -EINVAL; ++ ++ /* ignore supported, maxtxpkt, maxrxpkt */ ++ ++ if (ecmd->autoneg == AUTONEG_ENABLE) { ++ u32 bmcr, advert, tmp; ++ ++ if ((ecmd->advertising & (ADVERTISED_10baseT_Half | ++ ADVERTISED_10baseT_Full | ++ ADVERTISED_100baseT_Half | ++ ADVERTISED_100baseT_Full)) == 0) ++ return -EINVAL; ++ ++ /* advertise only what has been requested */ ++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); ++ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); ++ if (ADVERTISED_10baseT_Half) ++ tmp |= ADVERTISE_10HALF; ++ if (ADVERTISED_10baseT_Full) ++ tmp |= ADVERTISE_10FULL; ++ if (ADVERTISED_100baseT_Half) ++ tmp |= ADVERTISE_100HALF; ++ if (ADVERTISED_100baseT_Full) ++ tmp |= ADVERTISE_100FULL; ++ if (advert != tmp) { ++ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); ++ mii->advertising = tmp; ++ } ++ ++ /* turn on autonegotiation, and force a renegotiate */ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); ++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); ++ ++ mii->force_media = 0; ++ } else { ++ u32 bmcr, tmp; ++ ++ /* turn off auto negotiation, set speed and duplexity */ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); ++ if (ecmd->speed == SPEED_100) ++ tmp |= BMCR_SPEED100; ++ if (ecmd->duplex == DUPLEX_FULL) { ++ tmp |= BMCR_FULLDPLX; ++ mii->full_duplex = 1; ++ } else ++ mii->full_duplex = 0; ++ if (bmcr != tmp) ++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); ++ ++ mii->force_media = 1; ++ } ++ return 0; ++} ++ ++int _kc_mii_link_ok (struct mii_if_info *mii) ++{ ++ /* first, a dummy read, needed to latch some MII phys */ ++ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); ++ if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) ++ return 1; ++ return 0; ++} ++ ++int _kc_mii_nway_restart (struct mii_if_info *mii) ++{ ++ int bmcr; ++ int r = -EINVAL; ++ ++ /* if autoneg is off, it's an error */ ++ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); ++ ++ if (bmcr & BMCR_ANENABLE) { ++ bmcr |= BMCR_ANRESTART; ++ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); ++ r = 0; ++ } ++ ++ return r; ++} ++ ++void _kc_mii_check_link (struct mii_if_info *mii) ++{ ++ int cur_link = mii_link_ok(mii); ++ int prev_link = netif_carrier_ok(mii->dev); ++ ++ if (cur_link && !prev_link) ++ netif_carrier_on(mii->dev); ++ else if (prev_link && !cur_link) ++ netif_carrier_off(mii->dev); ++} ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) ++int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, ++ struct mii_ioctl_data *mii_data, int cmd, ++ unsigned int *duplex_chg_out) ++{ ++ int rc = 0; ++ unsigned int duplex_changed = 0; ++ ++ if (duplex_chg_out) ++ *duplex_chg_out = 0; ++ ++ mii_data->phy_id &= mii_if->phy_id_mask; ++ mii_data->reg_num &= mii_if->reg_num_mask; ++ ++ switch(cmd) { ++ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */ ++ case SIOCGMIIPHY: ++ mii_data->phy_id = mii_if->phy_id; ++ /* fall through */ ++ ++ case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */ ++ case SIOCGMIIREG: ++ mii_data->val_out = ++ mii_if->mdio_read(mii_if->dev, mii_data->phy_id, ++ mii_data->reg_num); ++ break; ++ ++ case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */ ++ case SIOCSMIIREG: { ++ u16 val = mii_data->val_in; ++ ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ ++ if (mii_data->phy_id == mii_if->phy_id) { ++ switch(mii_data->reg_num) { ++ case MII_BMCR: { ++ unsigned int new_duplex = 0; ++ if (val & (BMCR_RESET|BMCR_ANENABLE)) ++ mii_if->force_media = 0; ++ else ++ mii_if->force_media = 1; ++ if (mii_if->force_media && ++ (val & BMCR_FULLDPLX)) ++ new_duplex = 1; ++ if (mii_if->full_duplex != new_duplex) { ++ duplex_changed = 1; ++ mii_if->full_duplex = new_duplex; ++ } ++ break; ++ } ++ case MII_ADVERTISE: ++ mii_if->advertising = val; ++ break; ++ default: ++ /* do nothing */ ++ break; ++ } ++ } ++ ++ mii_if->mdio_write(mii_if->dev, mii_data->phy_id, ++ mii_data->reg_num, val); ++ break; ++ } ++ ++ default: ++ rc = -EOPNOTSUPP; ++ break; ++ } ++ ++ if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) ++ *duplex_chg_out = 1; ++ ++ return rc; ++} ++#endif /* > 2.4.6 */ ++ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/kcompat.h linux-2.6.22-50/drivers/net/ixgbe/kcompat.h +--- linux-2.6.22-40/drivers/net/ixgbe/kcompat.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/kcompat.h 2010-08-25 17:56:26.000000000 -0400 +@@ -0,0 +1,2353 @@ ++/******************************************************************************* ++ ++ Intel 10 Gigabit PCI Express Linux driver ++ Copyright(c) 1999 - 2010 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ You should have received a copy of the GNU General Public License along with ++ this program; if not, write to the Free Software Foundation, Inc., ++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _KCOMPAT_H_ ++#define _KCOMPAT_H_ ++ ++#ifndef LINUX_VERSION_CODE ++#include ++#else ++#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* NAPI enable/disable flags here */ ++/* enable NAPI for ixgbe by default */ ++#undef CONFIG_IXGBE_NAPI ++#define CONFIG_IXGBE_NAPI ++#define NAPI ++#ifdef CONFIG_IXGBE_NAPI ++#undef NAPI ++#define NAPI ++#endif /* CONFIG_IXGBE_NAPI */ ++#ifdef IXGBE_NAPI ++#undef NAPI ++#define NAPI ++#endif /* IXGBE_NAPI */ ++#ifdef IXGBE_NO_NAPI ++#undef NAPI ++#endif /* IXGBE_NO_NAPI */ ++ ++#define adapter_struct ixgbe_adapter ++#define adapter_q_vector ixgbe_q_vector ++ ++/* and finally set defines so that the code sees the changes */ ++#ifdef NAPI ++#ifndef CONFIG_IXGBE_NAPI ++#define CONFIG_IXGBE_NAPI ++#endif ++#else ++#undef CONFIG_IXGBE_NAPI ++#endif /* NAPI */ ++ ++/* MSI compatibility code for all kernels and drivers */ ++#ifdef DISABLE_PCI_MSI ++#undef CONFIG_PCI_MSI ++#endif ++#ifndef CONFIG_PCI_MSI ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) ++struct msix_entry { ++ u16 vector; /* kernel uses to write allocated vector */ ++ u16 entry; /* driver uses to specify entry, OS writes */ ++}; ++#endif ++#undef pci_enable_msi ++#define pci_enable_msi(a) -ENOTSUPP ++#undef pci_disable_msi ++#define pci_disable_msi(a) do {} while (0) ++#undef pci_enable_msix ++#define pci_enable_msix(a, b, c) -ENOTSUPP ++#undef pci_disable_msix ++#define pci_disable_msix(a) do {} while (0) ++#define msi_remove_pci_irq_vectors(a) do {} while (0) ++#endif /* CONFIG_PCI_MSI */ ++#ifdef DISABLE_PM ++#undef CONFIG_PM ++#endif ++ ++#ifdef DISABLE_NET_POLL_CONTROLLER ++#undef CONFIG_NET_POLL_CONTROLLER ++#endif ++ ++#ifndef PMSG_SUSPEND ++#define PMSG_SUSPEND 3 ++#endif ++ ++/* generic boolean compatibility */ ++#undef TRUE ++#undef FALSE ++#define TRUE true ++#define FALSE false ++#ifdef GCC_VERSION ++#if ( GCC_VERSION < 3000 ) ++#define _Bool char ++#endif ++#else ++#define _Bool char ++#endif ++#ifndef bool ++#define bool _Bool ++#define true 1 ++#define false 0 ++#endif ++ ++ ++/* kernels less than 2.4.14 don't have this */ ++#ifndef ETH_P_8021Q ++#define ETH_P_8021Q 0x8100 ++#endif ++ ++#ifndef module_param ++#define module_param(v,t,p) MODULE_PARM(v, "i"); ++#endif ++ ++#ifndef DMA_64BIT_MASK ++#define DMA_64BIT_MASK 0xffffffffffffffffULL ++#endif ++ ++#ifndef DMA_32BIT_MASK ++#define DMA_32BIT_MASK 0x00000000ffffffffULL ++#endif ++ ++#ifndef PCI_CAP_ID_EXP ++#define PCI_CAP_ID_EXP 0x10 ++#endif ++ ++#ifndef PCIE_LINK_STATE_L0S ++#define PCIE_LINK_STATE_L0S 1 ++#endif ++#ifndef PCIE_LINK_STATE_L1 ++#define PCIE_LINK_STATE_L1 2 ++#endif ++ ++#ifndef mmiowb ++#ifdef CONFIG_IA64 ++#define mmiowb() asm volatile ("mf.a" ::: "memory") ++#else ++#define mmiowb() ++#endif ++#endif ++ ++#ifndef SET_NETDEV_DEV ++#define SET_NETDEV_DEV(net, pdev) ++#endif ++ ++#ifndef HAVE_FREE_NETDEV ++#define free_netdev(x) kfree(x) ++#endif ++ ++#ifdef HAVE_POLL_CONTROLLER ++#define CONFIG_NET_POLL_CONTROLLER ++#endif ++ ++#ifndef NETDEV_TX_OK ++#define NETDEV_TX_OK 0 ++#endif ++ ++#ifndef NETDEV_TX_BUSY ++#define NETDEV_TX_BUSY 1 ++#endif ++ ++#ifndef NETDEV_TX_LOCKED ++#define NETDEV_TX_LOCKED -1 ++#endif ++ ++#ifdef CONFIG_PCI_IOV ++#define VMDQ_P(p) ((p) + adapter->num_vfs) ++#else ++#define VMDQ_P(p) (p) ++#endif ++ ++#ifndef SKB_DATAREF_SHIFT ++/* if we do not have the infrastructure to detect if skb_header is cloned ++ just return false in all cases */ ++#define skb_header_cloned(x) 0 ++#endif ++ ++#ifndef NETIF_F_GSO ++#define gso_size tso_size ++#define gso_segs tso_segs ++#endif ++ ++#ifndef NETIF_F_GRO ++#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ ++ vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) ++#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) ++#endif ++ ++#ifndef NETIF_F_SCTP_CSUM ++#define NETIF_F_SCTP_CSUM 0 ++#endif ++ ++#ifndef NETIF_F_LRO ++#define NETIF_F_LRO (1 << 15) ++#endif ++ ++#ifndef ETH_FLAG_LRO ++#define ETH_FLAG_LRO (1 << 15) ++#endif ++ ++#ifndef ETH_FLAG_NTUPLE ++#define ETH_FLAG_NTUPLE 0 ++#endif ++ ++#ifndef IPPROTO_SCTP ++#define IPPROTO_SCTP 132 ++#endif ++ ++#ifndef CHECKSUM_PARTIAL ++#define CHECKSUM_PARTIAL CHECKSUM_HW ++#define CHECKSUM_COMPLETE CHECKSUM_HW ++#endif ++ ++#ifndef __read_mostly ++#define __read_mostly ++#endif ++ ++#ifndef HAVE_NETIF_MSG ++#define HAVE_NETIF_MSG 1 ++enum { ++ NETIF_MSG_DRV = 0x0001, ++ NETIF_MSG_PROBE = 0x0002, ++ NETIF_MSG_LINK = 0x0004, ++ NETIF_MSG_TIMER = 0x0008, ++ NETIF_MSG_IFDOWN = 0x0010, ++ NETIF_MSG_IFUP = 0x0020, ++ NETIF_MSG_RX_ERR = 0x0040, ++ NETIF_MSG_TX_ERR = 0x0080, ++ NETIF_MSG_TX_QUEUED = 0x0100, ++ NETIF_MSG_INTR = 0x0200, ++ NETIF_MSG_TX_DONE = 0x0400, ++ NETIF_MSG_RX_STATUS = 0x0800, ++ NETIF_MSG_PKTDATA = 0x1000, ++ NETIF_MSG_HW = 0x2000, ++ NETIF_MSG_WOL = 0x4000, ++}; ++ ++#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) ++#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) ++#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) ++#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) ++#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) ++#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) ++#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) ++#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) ++#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) ++#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) ++#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) ++#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) ++#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) ++#else /* HAVE_NETIF_MSG */ ++#define NETIF_MSG_HW 0x2000 ++#define NETIF_MSG_WOL 0x4000 ++#endif /* HAVE_NETIF_MSG */ ++#ifndef netif_msg_hw ++#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) ++#endif ++#ifndef netif_msg_wol ++#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) ++#endif ++ ++#ifndef MII_RESV1 ++#define MII_RESV1 0x17 /* Reserved... */ ++#endif ++ ++#ifndef unlikely ++#define unlikely(_x) _x ++#define likely(_x) _x ++#endif ++ ++#ifndef WARN_ON ++#define WARN_ON(x) ++#endif ++ ++#ifndef PCI_DEVICE ++#define PCI_DEVICE(vend,dev) \ ++ .vendor = (vend), .device = (dev), \ ++ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID ++#endif ++ ++#ifndef node_online ++#define node_online(node) ((node) == 0) ++#endif ++ ++#ifndef num_online_cpus ++#define num_online_cpus() smp_num_cpus ++#endif ++ ++#ifndef numa_node_id ++#define numa_node_id() 0 ++#endif ++ ++ ++#ifndef _LINUX_RANDOM_H ++#include ++#endif ++ ++#ifndef DECLARE_BITMAP ++#ifndef BITS_TO_LONGS ++#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) ++#endif ++#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] ++#endif ++ ++#ifndef VLAN_HLEN ++#define VLAN_HLEN 4 ++#endif ++ ++#ifndef VLAN_ETH_HLEN ++#define VLAN_ETH_HLEN 18 ++#endif ++ ++#ifndef VLAN_ETH_FRAME_LEN ++#define VLAN_ETH_FRAME_LEN 1518 ++#endif ++ ++#if !defined(IXGBE_DCA) && !defined(IGB_DCA) ++#define dca_get_tag(b) 0 ++#define dca_add_requester(a) -1 ++#define dca_remove_requester(b) do { } while(0) ++#define DCA_PROVIDER_ADD 0x0001 ++#define DCA_PROVIDER_REMOVE 0x0002 ++#endif ++ ++#ifndef DCA_GET_TAG_TWO_ARGS ++#define dca3_get_tag(a,b) dca_get_tag(b) ++#endif ++ ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#if defined(__i386__) || defined(__x86_64__) ++#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#endif ++#endif ++ ++#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#ifdef NET_IP_ALIGN ++#undef NET_IP_ALIGN ++#endif ++#ifdef NET_SKB_PAD ++#undef NET_SKB_PAD ++#endif ++#ifdef netdev_alloc_skb_ip_align ++#undef netdev_alloc_skb_ip_align ++#endif ++extern struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length); ++#define NET_IP_ALIGN 0 ++#define NET_SKB_PAD L1_CACHE_BYTES ++#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) ++#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ ++ ++/* taken from 2.6.24 definition in linux/kernel.h */ ++#ifndef IS_ALIGNED ++#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) ++#endif ++ ++/*****************************************************************************/ ++/* Installations with ethtool version without eeprom, adapter id, or statistics ++ * support */ ++ ++#ifndef ETH_GSTRING_LEN ++#define ETH_GSTRING_LEN 32 ++#endif ++ ++#ifndef ETHTOOL_GSTATS ++#define ETHTOOL_GSTATS 0x1d ++#undef ethtool_drvinfo ++#define ethtool_drvinfo k_ethtool_drvinfo ++struct k_ethtool_drvinfo { ++ u32 cmd; ++ char driver[32]; ++ char version[32]; ++ char fw_version[32]; ++ char bus_info[32]; ++ char reserved1[32]; ++ char reserved2[16]; ++ u32 n_stats; ++ u32 testinfo_len; ++ u32 eedump_len; ++ u32 regdump_len; ++}; ++ ++struct ethtool_stats { ++ u32 cmd; ++ u32 n_stats; ++ u64 data[0]; ++}; ++#endif /* ETHTOOL_GSTATS */ ++ ++#ifndef ETHTOOL_PHYS_ID ++#define ETHTOOL_PHYS_ID 0x1c ++#endif /* ETHTOOL_PHYS_ID */ ++ ++#ifndef ETHTOOL_GSTRINGS ++#define ETHTOOL_GSTRINGS 0x1b ++enum ethtool_stringset { ++ ETH_SS_TEST = 0, ++ ETH_SS_STATS, ++}; ++struct ethtool_gstrings { ++ u32 cmd; /* ETHTOOL_GSTRINGS */ ++ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ ++ u32 len; /* number of strings in the string set */ ++ u8 data[0]; ++}; ++#endif /* ETHTOOL_GSTRINGS */ ++ ++#ifndef ETHTOOL_TEST ++#define ETHTOOL_TEST 0x1a ++enum ethtool_test_flags { ++ ETH_TEST_FL_OFFLINE = (1 << 0), ++ ETH_TEST_FL_FAILED = (1 << 1), ++}; ++struct ethtool_test { ++ u32 cmd; ++ u32 flags; ++ u32 reserved; ++ u32 len; ++ u64 data[0]; ++}; ++#endif /* ETHTOOL_TEST */ ++ ++#ifndef ETHTOOL_GEEPROM ++#define ETHTOOL_GEEPROM 0xb ++#undef ETHTOOL_GREGS ++struct ethtool_eeprom { ++ u32 cmd; ++ u32 magic; ++ u32 offset; ++ u32 len; ++ u8 data[0]; ++}; ++ ++struct ethtool_value { ++ u32 cmd; ++ u32 data; ++}; ++#endif /* ETHTOOL_GEEPROM */ ++ ++#ifndef ETHTOOL_GLINK ++#define ETHTOOL_GLINK 0xa ++#endif /* ETHTOOL_GLINK */ ++ ++#ifndef ETHTOOL_GWOL ++#define ETHTOOL_GWOL 0x5 ++#define ETHTOOL_SWOL 0x6 ++#define SOPASS_MAX 6 ++struct ethtool_wolinfo { ++ u32 cmd; ++ u32 supported; ++ u32 wolopts; ++ u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ ++}; ++#endif /* ETHTOOL_GWOL */ ++ ++#ifndef ETHTOOL_GREGS ++#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ ++#define ethtool_regs _kc_ethtool_regs ++/* for passing big chunks of data */ ++struct _kc_ethtool_regs { ++ u32 cmd; ++ u32 version; /* driver-specific, indicates different chips/revs */ ++ u32 len; /* bytes */ ++ u8 data[0]; ++}; ++#endif /* ETHTOOL_GREGS */ ++ ++#ifndef ETHTOOL_GMSGLVL ++#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ ++#endif ++#ifndef ETHTOOL_SMSGLVL ++#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ ++#endif ++#ifndef ETHTOOL_NWAY_RST ++#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ ++#endif ++#ifndef ETHTOOL_GLINK ++#define ETHTOOL_GLINK 0x0000000a /* Get link status */ ++#endif ++#ifndef ETHTOOL_GEEPROM ++#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ ++#endif ++#ifndef ETHTOOL_SEEPROM ++#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ ++#endif ++#ifndef ETHTOOL_GCOALESCE ++#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ ++/* for configuring coalescing parameters of chip */ ++#define ethtool_coalesce _kc_ethtool_coalesce ++struct _kc_ethtool_coalesce { ++ u32 cmd; /* ETHTOOL_{G,S}COALESCE */ ++ ++ /* How many usecs to delay an RX interrupt after ++ * a packet arrives. If 0, only rx_max_coalesced_frames ++ * is used. ++ */ ++ u32 rx_coalesce_usecs; ++ ++ /* How many packets to delay an RX interrupt after ++ * a packet arrives. If 0, only rx_coalesce_usecs is ++ * used. It is illegal to set both usecs and max frames ++ * to zero as this would cause RX interrupts to never be ++ * generated. ++ */ ++ u32 rx_max_coalesced_frames; ++ ++ /* Same as above two parameters, except that these values ++ * apply while an IRQ is being serviced by the host. Not ++ * all cards support this feature and the values are ignored ++ * in that case. ++ */ ++ u32 rx_coalesce_usecs_irq; ++ u32 rx_max_coalesced_frames_irq; ++ ++ /* How many usecs to delay a TX interrupt after ++ * a packet is sent. If 0, only tx_max_coalesced_frames ++ * is used. ++ */ ++ u32 tx_coalesce_usecs; ++ ++ /* How many packets to delay a TX interrupt after ++ * a packet is sent. If 0, only tx_coalesce_usecs is ++ * used. It is illegal to set both usecs and max frames ++ * to zero as this would cause TX interrupts to never be ++ * generated. ++ */ ++ u32 tx_max_coalesced_frames; ++ ++ /* Same as above two parameters, except that these values ++ * apply while an IRQ is being serviced by the host. Not ++ * all cards support this feature and the values are ignored ++ * in that case. ++ */ ++ u32 tx_coalesce_usecs_irq; ++ u32 tx_max_coalesced_frames_irq; ++ ++ /* How many usecs to delay in-memory statistics ++ * block updates. Some drivers do not have an in-memory ++ * statistic block, and in such cases this value is ignored. ++ * This value must not be zero. ++ */ ++ u32 stats_block_coalesce_usecs; ++ ++ /* Adaptive RX/TX coalescing is an algorithm implemented by ++ * some drivers to improve latency under low packet rates and ++ * improve throughput under high packet rates. Some drivers ++ * only implement one of RX or TX adaptive coalescing. Anything ++ * not implemented by the driver causes these values to be ++ * silently ignored. ++ */ ++ u32 use_adaptive_rx_coalesce; ++ u32 use_adaptive_tx_coalesce; ++ ++ /* When the packet rate (measured in packets per second) ++ * is below pkt_rate_low, the {rx,tx}_*_low parameters are ++ * used. ++ */ ++ u32 pkt_rate_low; ++ u32 rx_coalesce_usecs_low; ++ u32 rx_max_coalesced_frames_low; ++ u32 tx_coalesce_usecs_low; ++ u32 tx_max_coalesced_frames_low; ++ ++ /* When the packet rate is below pkt_rate_high but above ++ * pkt_rate_low (both measured in packets per second) the ++ * normal {rx,tx}_* coalescing parameters are used. ++ */ ++ ++ /* When the packet rate is (measured in packets per second) ++ * is above pkt_rate_high, the {rx,tx}_*_high parameters are ++ * used. ++ */ ++ u32 pkt_rate_high; ++ u32 rx_coalesce_usecs_high; ++ u32 rx_max_coalesced_frames_high; ++ u32 tx_coalesce_usecs_high; ++ u32 tx_max_coalesced_frames_high; ++ ++ /* How often to do adaptive coalescing packet rate sampling, ++ * measured in seconds. Must not be zero. ++ */ ++ u32 rate_sample_interval; ++}; ++#endif /* ETHTOOL_GCOALESCE */ ++ ++#ifndef ETHTOOL_SCOALESCE ++#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ ++#endif ++#ifndef ETHTOOL_GRINGPARAM ++#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ ++/* for configuring RX/TX ring parameters */ ++#define ethtool_ringparam _kc_ethtool_ringparam ++struct _kc_ethtool_ringparam { ++ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ ++ ++ /* Read only attributes. These indicate the maximum number ++ * of pending RX/TX ring entries the driver will allow the ++ * user to set. ++ */ ++ u32 rx_max_pending; ++ u32 rx_mini_max_pending; ++ u32 rx_jumbo_max_pending; ++ u32 tx_max_pending; ++ ++ /* Values changeable by the user. The valid values are ++ * in the range 1 to the "*_max_pending" counterpart above. ++ */ ++ u32 rx_pending; ++ u32 rx_mini_pending; ++ u32 rx_jumbo_pending; ++ u32 tx_pending; ++}; ++#endif /* ETHTOOL_GRINGPARAM */ ++ ++#ifndef ETHTOOL_SRINGPARAM ++#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ ++#endif ++#ifndef ETHTOOL_GPAUSEPARAM ++#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ ++/* for configuring link flow control parameters */ ++#define ethtool_pauseparam _kc_ethtool_pauseparam ++struct _kc_ethtool_pauseparam { ++ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ ++ ++ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg ++ * being true) the user may set 'autoneg' here non-zero to have the ++ * pause parameters be auto-negotiated too. In such a case, the ++ * {rx,tx}_pause values below determine what capabilities are ++ * advertised. ++ * ++ * If 'autoneg' is zero or the link is not being auto-negotiated, ++ * then {rx,tx}_pause force the driver to use/not-use pause ++ * flow control. ++ */ ++ u32 autoneg; ++ u32 rx_pause; ++ u32 tx_pause; ++}; ++#endif /* ETHTOOL_GPAUSEPARAM */ ++ ++#ifndef ETHTOOL_SPAUSEPARAM ++#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ ++#endif ++#ifndef ETHTOOL_GRXCSUM ++#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_SRXCSUM ++#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_GTXCSUM ++#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_STXCSUM ++#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_GSG ++#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable ++ * (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_SSG ++#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable ++ * (ethtool_value). */ ++#endif ++#ifndef ETHTOOL_TEST ++#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ ++#endif ++#ifndef ETHTOOL_GSTRINGS ++#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ ++#endif ++#ifndef ETHTOOL_PHYS_ID ++#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ ++#endif ++#ifndef ETHTOOL_GSTATS ++#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ ++#endif ++#ifndef ETHTOOL_GTSO ++#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_STSO ++#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ ++#endif ++ ++#ifndef ETHTOOL_BUSINFO_LEN ++#define ETHTOOL_BUSINFO_LEN 32 ++#endif ++ ++#ifndef RHEL_RELEASE_CODE ++#define RHEL_RELEASE_CODE 0 ++#endif ++#ifndef RHEL_RELEASE_VERSION ++#define RHEL_RELEASE_VERSION(a,b) 0 ++#endif ++#ifndef AX_RELEASE_CODE ++#define AX_RELEASE_CODE 0 ++#endif ++#ifndef AX_RELEASE_VERSION ++#define AX_RELEASE_VERSION(a,b) 0 ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.3 => 2.4.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) ++ ++/**************************************/ ++/* PCI DRIVER API */ ++ ++#ifndef pci_set_dma_mask ++#define pci_set_dma_mask _kc_pci_set_dma_mask ++extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); ++#endif ++ ++#ifndef pci_request_regions ++#define pci_request_regions _kc_pci_request_regions ++extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); ++#endif ++ ++#ifndef pci_release_regions ++#define pci_release_regions _kc_pci_release_regions ++extern void _kc_pci_release_regions(struct pci_dev *pdev); ++#endif ++ ++/**************************************/ ++/* NETWORK DRIVER API */ ++ ++#ifndef alloc_etherdev ++#define alloc_etherdev _kc_alloc_etherdev ++extern struct net_device * _kc_alloc_etherdev(int sizeof_priv); ++#endif ++ ++#ifndef is_valid_ether_addr ++#define is_valid_ether_addr _kc_is_valid_ether_addr ++extern int _kc_is_valid_ether_addr(u8 *addr); ++#endif ++ ++/**************************************/ ++/* MISCELLANEOUS */ ++ ++#ifndef INIT_TQUEUE ++#define INIT_TQUEUE(_tq, _routine, _data) \ ++ do { \ ++ INIT_LIST_HEAD(&(_tq)->list); \ ++ (_tq)->sync = 0; \ ++ (_tq)->routine = _routine; \ ++ (_tq)->data = _data; \ ++ } while (0) ++#endif ++ ++#endif /* 2.4.3 => 2.4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) ++/* Generic MII registers. */ ++#define MII_BMCR 0x00 /* Basic mode control register */ ++#define MII_BMSR 0x01 /* Basic mode status register */ ++#define MII_PHYSID1 0x02 /* PHYS ID 1 */ ++#define MII_PHYSID2 0x03 /* PHYS ID 2 */ ++#define MII_ADVERTISE 0x04 /* Advertisement control reg */ ++#define MII_LPA 0x05 /* Link partner ability reg */ ++#define MII_EXPANSION 0x06 /* Expansion register */ ++/* Basic mode control register. */ ++#define BMCR_FULLDPLX 0x0100 /* Full duplex */ ++#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ ++/* Basic mode status register. */ ++#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ ++#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ ++#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ ++#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ ++#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ ++#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ ++/* Advertisement control register. */ ++#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ ++#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ ++#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ ++#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ ++#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ ++#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ ++ ADVERTISE_100HALF | ADVERTISE_100FULL) ++/* Expansion register for auto-negotiation. */ ++#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.6 => 2.4.3 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) ++ ++#ifndef pci_set_power_state ++#define pci_set_power_state _kc_pci_set_power_state ++extern int _kc_pci_set_power_state(struct pci_dev *dev, int state); ++#endif ++ ++#ifndef pci_enable_wake ++#define pci_enable_wake _kc_pci_enable_wake ++extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); ++#endif ++ ++#ifndef pci_disable_device ++#define pci_disable_device _kc_pci_disable_device ++extern void _kc_pci_disable_device(struct pci_dev *pdev); ++#endif ++ ++/* PCI PM entry point syntax changed, so don't support suspend/resume */ ++#undef CONFIG_PM ++ ++#endif /* 2.4.6 => 2.4.3 */ ++ ++#ifndef HAVE_PCI_SET_MWI ++#define pci_set_mwi(X) pci_write_config_word(X, \ ++ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ ++ PCI_COMMAND_INVALIDATE); ++#define pci_clear_mwi(X) pci_write_config_word(X, \ ++ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ ++ ~PCI_COMMAND_INVALIDATE); ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.10 => 2.4.9 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) ++ ++/**************************************/ ++/* MODULE API */ ++ ++#ifndef MODULE_LICENSE ++ #define MODULE_LICENSE(X) ++#endif ++ ++/**************************************/ ++/* OTHER */ ++ ++#undef min ++#define min(x,y) ({ \ ++ const typeof(x) _x = (x); \ ++ const typeof(y) _y = (y); \ ++ (void) (&_x == &_y); \ ++ _x < _y ? _x : _y; }) ++ ++#undef max ++#define max(x,y) ({ \ ++ const typeof(x) _x = (x); \ ++ const typeof(y) _y = (y); \ ++ (void) (&_x == &_y); \ ++ _x > _y ? _x : _y; }) ++ ++#define min_t(type,x,y) ({ \ ++ type _x = (x); \ ++ type _y = (y); \ ++ _x < _y ? _x : _y; }) ++ ++#define max_t(type,x,y) ({ \ ++ type _x = (x); \ ++ type _y = (y); \ ++ _x > _y ? _x : _y; }) ++ ++#ifndef list_for_each_safe ++#define list_for_each_safe(pos, n, head) \ ++ for (pos = (head)->next, n = pos->next; pos != (head); \ ++ pos = n, n = pos->next) ++#endif ++ ++#ifndef ____cacheline_aligned_in_smp ++#ifdef CONFIG_SMP ++#define ____cacheline_aligned_in_smp ____cacheline_aligned ++#else ++#define ____cacheline_aligned_in_smp ++#endif /* CONFIG_SMP */ ++#endif ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) ++extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); ++#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) ++extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); ++#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) ++#else /* 2.4.8 => 2.4.9 */ ++extern int snprintf(char * buf, size_t size, const char *fmt, ...); ++extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); ++#endif ++#endif /* 2.4.10 -> 2.4.6 */ ++ ++ ++/*****************************************************************************/ ++/* 2.4.13 => 2.4.10 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) ++ ++/**************************************/ ++/* PCI DMA MAPPING */ ++ ++#ifndef virt_to_page ++ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) ++#endif ++ ++#ifndef pci_map_page ++#define pci_map_page _kc_pci_map_page ++extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); ++#endif ++ ++#ifndef pci_unmap_page ++#define pci_unmap_page _kc_pci_unmap_page ++extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); ++#endif ++ ++/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ ++ ++#undef DMA_32BIT_MASK ++#define DMA_32BIT_MASK 0xffffffff ++#undef DMA_64BIT_MASK ++#define DMA_64BIT_MASK 0xffffffff ++ ++/**************************************/ ++/* OTHER */ ++ ++#ifndef cpu_relax ++#define cpu_relax() rep_nop() ++#endif ++ ++struct vlan_ethhdr { ++ unsigned char h_dest[ETH_ALEN]; ++ unsigned char h_source[ETH_ALEN]; ++ unsigned short h_vlan_proto; ++ unsigned short h_vlan_TCI; ++ unsigned short h_vlan_encapsulated_proto; ++}; ++#endif /* 2.4.13 => 2.4.10 */ ++ ++/*****************************************************************************/ ++/* 2.4.17 => 2.4.12 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) ++ ++#ifndef __devexit_p ++ #define __devexit_p(x) &(x) ++#endif ++ ++#endif /* 2.4.17 => 2.4.13 */ ++ ++/*****************************************************************************/ ++/* 2.4.20 => 2.4.19 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) ++ ++/* we won't support NAPI on less than 2.4.20 */ ++#ifdef NAPI ++#undef NAPI ++#undef CONFIG_IXGBE_NAPI ++#endif ++ ++#endif /* 2.4.20 => 2.4.19 */ ++ ++/*****************************************************************************/ ++/* < 2.4.21 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) ) ++#define skb_pad(x,y) _kc_skb_pad(x, y) ++struct sk_buff * _kc_skb_pad(struct sk_buff *skb, int pad); ++#endif /* < 2.4.21 */ ++ ++/*****************************************************************************/ ++/* 2.4.22 => 2.4.17 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) ++#define pci_name(x) ((x)->slot_name) ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.22 => 2.4.17 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) ++#ifndef IXGBE_NO_LRO ++/* Don't enable LRO for these legacy kernels */ ++#define IXGBE_NO_LRO ++#endif ++#endif ++ ++/*****************************************************************************/ ++/*****************************************************************************/ ++/* 2.4.23 => 2.4.22 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) ++/*****************************************************************************/ ++#ifdef NAPI ++#ifndef netif_poll_disable ++#define netif_poll_disable(x) _kc_netif_poll_disable(x) ++static inline void _kc_netif_poll_disable(struct net_device *netdev) ++{ ++ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { ++ /* No hurry */ ++ current->state = TASK_INTERRUPTIBLE; ++ schedule_timeout(1); ++ } ++} ++#endif ++#ifndef netif_poll_enable ++#define netif_poll_enable(x) _kc_netif_poll_enable(x) ++static inline void _kc_netif_poll_enable(struct net_device *netdev) ++{ ++ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); ++} ++#endif ++#endif /* NAPI */ ++#ifndef netif_tx_disable ++#define netif_tx_disable(x) _kc_netif_tx_disable(x) ++static inline void _kc_netif_tx_disable(struct net_device *dev) ++{ ++ spin_lock_bh(&dev->xmit_lock); ++ netif_stop_queue(dev); ++ spin_unlock_bh(&dev->xmit_lock); ++} ++#endif ++#endif /* 2.4.23 => 2.4.22 */ ++ ++/*****************************************************************************/ ++/* 2.6.4 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ ++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ ++ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) ++#define ETHTOOL_OPS_COMPAT ++#endif /* 2.6.4 => 2.6.0 */ ++ ++/*****************************************************************************/ ++/* 2.5.71 => 2.4.x */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) ++#define sk_protocol protocol ++#define pci_get_device pci_find_device ++#endif /* 2.5.70 => 2.4.x */ ++ ++/*****************************************************************************/ ++/* < 2.4.27 or 2.6.0 <= 2.6.5 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ ++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ ++ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) ++ ++#ifndef netif_msg_init ++#define netif_msg_init _kc_netif_msg_init ++static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) ++{ ++ /* use default */ ++ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) ++ return default_msg_enable_bits; ++ if (debug_value == 0) /* no output */ ++ return 0; ++ /* set low N bits */ ++ return (1 << debug_value) -1; ++} ++#endif ++ ++#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ ++/*****************************************************************************/ ++#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ ++ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ ++ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) ++#define netdev_priv(x) x->priv ++#endif ++ ++/*****************************************************************************/ ++/* <= 2.5.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) ++#undef pci_register_driver ++#define pci_register_driver pci_module_init ++ ++/* ++ * Most of the dma compat code is copied/modifed from the 2.4.37 ++ * /include/linux/libata-compat.h header file ++ */ ++/* These definitions mirror those in pci.h, so they can be used ++ * interchangeably with their PCI_ counterparts */ ++enum dma_data_direction { ++ DMA_BIDIRECTIONAL = 0, ++ DMA_TO_DEVICE = 1, ++ DMA_FROM_DEVICE = 2, ++ DMA_NONE = 3, ++}; ++ ++struct device { ++ struct pci_dev pdev; ++}; ++ ++static inline struct pci_dev *to_pci_dev (struct device *dev) ++{ ++ return (struct pci_dev *) dev; ++} ++static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) ++{ ++ return (struct device *) pdev; ++} ++ ++#define pdev_printk(lvl, pdev, fmt, args...) \ ++ printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) ++#define dev_err(dev, fmt, args...) \ ++ pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) ++#define dev_info(dev, fmt, args...) \ ++ pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) ++#define dev_warn(dev, fmt, args...) \ ++ pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) ++ ++/* NOTE: dangerous! we ignore the 'gfp' argument */ ++#define dma_alloc_coherent(dev,sz,dma,gfp) \ ++ pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) ++#define dma_free_coherent(dev,sz,addr,dma_addr) \ ++ pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) ++ ++#define dma_map_page(dev,a,b,c,d) \ ++ pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) ++#define dma_unmap_page(dev,a,b,c) \ ++ pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) ++ ++#define dma_map_single(dev,a,b,c) \ ++ pci_map_single(to_pci_dev(dev),(a),(b),(c)) ++#define dma_unmap_single(dev,a,b,c) \ ++ pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) ++ ++#define dma_sync_single(dev,a,b,c) \ ++ pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) ++ ++#define dma_set_mask(dev,mask) \ ++ pci_set_dma_mask(to_pci_dev(dev),(mask)) ++ ++/* hlist_* code - double linked lists */ ++struct hlist_head { ++ struct hlist_node *first; ++}; ++ ++struct hlist_node { ++ struct hlist_node *next, **pprev; ++}; ++ ++static inline void __hlist_del(struct hlist_node *n) ++{ ++ struct hlist_node *next = n->next; ++ struct hlist_node **pprev = n->pprev; ++ *pprev = next; ++ if (next) ++ next->pprev = pprev; ++} ++ ++static inline void hlist_del(struct hlist_node *n) ++{ ++ __hlist_del(n); ++ n->next = NULL; ++ n->pprev = NULL; ++} ++ ++static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) ++{ ++ struct hlist_node *first = h->first; ++ n->next = first; ++ if (first) ++ first->pprev = &n->next; ++ h->first = n; ++ n->pprev = &h->first; ++} ++ ++static inline int hlist_empty(const struct hlist_head *h) ++{ ++ return !h->first; ++} ++#define HLIST_HEAD_INIT { .first = NULL } ++#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } ++#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) ++static inline void INIT_HLIST_NODE(struct hlist_node *h) ++{ ++ h->next = NULL; ++ h->pprev = NULL; ++} ++#define hlist_entry(ptr, type, member) container_of(ptr,type,member) ++ ++#define hlist_for_each_entry(tpos, pos, head, member) \ ++ for (pos = (head)->first; \ ++ pos && ({ prefetch(pos->next); 1;}) && \ ++ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ ++ pos = pos->next) ++ ++#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ ++ for (pos = (head)->first; \ ++ pos && ({ n = pos->next; 1; }) && \ ++ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ ++ pos = n) ++ ++#ifndef might_sleep ++#define might_sleep() ++#endif ++#else ++static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) ++{ ++ return &pdev->dev; ++} ++#endif /* <= 2.5.0 */ ++ ++/*****************************************************************************/ ++/* 2.5.28 => 2.4.23 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) ++ ++static inline void _kc_synchronize_irq(void) ++{ ++ synchronize_irq(); ++} ++#undef synchronize_irq ++#define synchronize_irq(X) _kc_synchronize_irq() ++ ++#include ++#define work_struct tq_struct ++#undef INIT_WORK ++#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) ++#undef container_of ++#define container_of list_entry ++#define schedule_work schedule_task ++#define flush_scheduled_work flush_scheduled_tasks ++#define cancel_work_sync(x) flush_scheduled_work() ++ ++#endif /* 2.5.28 => 2.4.17 */ ++ ++/*****************************************************************************/ ++/* 2.6.0 => 2.5.28 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++#undef get_cpu ++#define get_cpu() smp_processor_id() ++#undef put_cpu ++#define put_cpu() do { } while(0) ++#define MODULE_INFO(version, _version) ++#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT ++#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 ++#endif ++#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 ++ ++#define dma_set_coherent_mask(dev,mask) 1 ++ ++#undef dev_put ++#define dev_put(dev) __dev_put(dev) ++ ++#ifndef skb_fill_page_desc ++#define skb_fill_page_desc _kc_skb_fill_page_desc ++extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); ++#endif ++ ++#undef ALIGN ++#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) ++ ++#ifndef page_count ++#define page_count(p) atomic_read(&(p)->count) ++#endif ++ ++#ifdef MAX_NUMNODES ++#undef MAX_NUMNODES ++#endif ++#define MAX_NUMNODES 1 ++ ++/* find_first_bit and find_next bit are not defined for most ++ * 2.4 kernels (except for the redhat 2.4.21 kernels ++ */ ++#include ++#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) ++#undef find_next_bit ++#define find_next_bit _kc_find_next_bit ++extern unsigned long _kc_find_next_bit(const unsigned long *addr, ++ unsigned long size, ++ unsigned long offset); ++#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) ++ ++ ++#ifndef netdev_name ++static inline const char *_kc_netdev_name(const struct net_device *dev) ++{ ++ if (strchr(dev->name, '%')) ++ return "(unregistered net_device)"; ++ return dev->name; ++} ++#define netdev_name(netdev) _kc_netdev_name(netdev) ++#endif /* netdev_name */ ++#endif /* 2.6.0 => 2.5.28 */ ++ ++/*****************************************************************************/ ++/* 2.6.4 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++#define MODULE_VERSION(_version) MODULE_INFO(version, _version) ++#endif /* 2.6.4 => 2.6.0 */ ++ ++/*****************************************************************************/ ++/* 2.6.5 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ++#define dma_sync_single_for_cpu dma_sync_single ++#define dma_sync_single_for_device dma_sync_single ++#ifndef pci_dma_mapping_error ++#define pci_dma_mapping_error _kc_pci_dma_mapping_error ++static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) ++{ ++ return dma_addr == 0; ++} ++#endif ++#endif /* 2.6.5 => 2.6.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); ++#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) ++#endif /* < 2.6.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) ++/* taken from 2.6 include/linux/bitmap.h */ ++#undef bitmap_zero ++#define bitmap_zero _kc_bitmap_zero ++static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) ++{ ++ if (nbits <= BITS_PER_LONG) ++ *dst = 0UL; ++ else { ++ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ++ memset(dst, 0, len); ++ } ++} ++#define random_ether_addr _kc_random_ether_addr ++static inline void _kc_random_ether_addr(u8 *addr) ++{ ++ get_random_bytes(addr, ETH_ALEN); ++ addr[0] &= 0xfe; /* clear multicast */ ++ addr[0] |= 0x02; /* set local assignment */ ++} ++#define page_to_nid(x) 0 ++ ++#endif /* < 2.6.6 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) ++#undef if_mii ++#define if_mii _kc_if_mii ++static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) ++{ ++ return (struct mii_ioctl_data *) &rq->ifr_ifru; ++} ++#endif /* < 2.6.7 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) ++#ifndef PCI_EXP_DEVCTL ++#define PCI_EXP_DEVCTL 8 ++#endif ++#ifndef PCI_EXP_DEVCTL_CERE ++#define PCI_EXP_DEVCTL_CERE 0x0001 ++#endif ++#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ ++ schedule_timeout((x * HZ)/1000 + 2); \ ++ } while (0) ++ ++#endif /* < 2.6.8 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) ++#include ++#define __iomem ++ ++#ifndef kcalloc ++#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) ++extern void *_kc_kzalloc(size_t size, int flags); ++#endif ++#define MSEC_PER_SEC 1000L ++static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) ++{ ++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) ++ return (MSEC_PER_SEC / HZ) * j; ++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) ++ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); ++#else ++ return (j * MSEC_PER_SEC) / HZ; ++#endif ++} ++static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) ++{ ++ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) ++ return MAX_JIFFY_OFFSET; ++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) ++ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); ++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) ++ return m * (HZ / MSEC_PER_SEC); ++#else ++ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; ++#endif ++} ++ ++#define msleep_interruptible _kc_msleep_interruptible ++static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) ++{ ++ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; ++ ++ while (timeout && !signal_pending(current)) { ++ __set_current_state(TASK_INTERRUPTIBLE); ++ timeout = schedule_timeout(timeout); ++ } ++ return _kc_jiffies_to_msecs(timeout); ++} ++ ++/* Basic mode control register. */ ++#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ ++ ++#ifndef __le16 ++#define __le16 u16 ++#endif ++#ifndef __le32 ++#define __le32 u32 ++#endif ++#ifndef __le64 ++#define __le64 u64 ++#endif ++#ifndef __be16 ++#define __be16 u16 ++#endif ++ ++static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) ++{ ++ return (struct vlan_ethhdr *)skb->mac.raw; ++} ++ ++/* Wake-On-Lan options. */ ++#define WAKE_PHY (1 << 0) ++#define WAKE_UCAST (1 << 1) ++#define WAKE_MCAST (1 << 2) ++#define WAKE_BCAST (1 << 3) ++#define WAKE_ARP (1 << 4) ++#define WAKE_MAGIC (1 << 5) ++#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ ++ ++#endif /* < 2.6.9 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) ++#ifdef module_param_array_named ++#undef module_param_array_named ++#define module_param_array_named(name, array, type, nump, perm) \ ++ static struct kparam_array __param_arr_##name \ ++ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ ++ sizeof(array[0]), array }; \ ++ module_param_call(name, param_array_set, param_array_get, \ ++ &__param_arr_##name, perm) ++#endif /* module_param_array_named */ ++/* ++ * num_online is broken for all < 2.6.10 kernels. This is needed to support ++ * Node module parameter of ixgbe. ++ */ ++#undef num_online_nodes ++#define num_online_nodes(n) 1 ++extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); ++#undef node_online_map ++#define node_online_map _kcompat_node_online_map ++#endif /* < 2.6.10 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) ++#define PCI_D0 0 ++#define PCI_D1 1 ++#define PCI_D2 2 ++#define PCI_D3hot 3 ++#define PCI_D3cold 4 ++typedef int pci_power_t; ++#define pci_choose_state(pdev,state) state ++#define PMSG_SUSPEND 3 ++#define PCI_EXP_LNKCTL 16 ++ ++#undef NETIF_F_LLTX ++ ++#ifndef ARCH_HAS_PREFETCH ++#define prefetch(X) ++#endif ++ ++#ifndef NET_IP_ALIGN ++#define NET_IP_ALIGN 2 ++#endif ++ ++#define KC_USEC_PER_SEC 1000000L ++#define usecs_to_jiffies _kc_usecs_to_jiffies ++static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) ++{ ++#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) ++ return (KC_USEC_PER_SEC / HZ) * j; ++#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) ++ return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); ++#else ++ return (j * KC_USEC_PER_SEC) / HZ; ++#endif ++} ++static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) ++{ ++ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) ++ return MAX_JIFFY_OFFSET; ++#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) ++ return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); ++#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) ++ return m * (HZ / KC_USEC_PER_SEC); ++#else ++ return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; ++#endif ++} ++#endif /* < 2.6.11 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) ++#include ++#define USE_REBOOT_NOTIFIER ++ ++/* Generic MII registers. */ ++#define MII_CTRL1000 0x09 /* 1000BASE-T control */ ++#define MII_STAT1000 0x0a /* 1000BASE-T status */ ++/* Advertisement control register. */ ++#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ ++#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ ++/* 1000BASE-T Control register */ ++#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ ++#endif /* < 2.6.12 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) ++#define pm_message_t u32 ++#ifndef kzalloc ++#define kzalloc _kc_kzalloc ++extern void *_kc_kzalloc(size_t size, int flags); ++#endif ++ ++/* Generic MII registers. */ ++#define MII_ESTATUS 0x0f /* Extended Status */ ++/* Basic mode status register. */ ++#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ ++/* Extended status register. */ ++#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ ++#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ ++#endif /* < 2.6.14 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) ++#ifndef vmalloc_node ++#define vmalloc_node(a,b) vmalloc(a) ++#endif /* vmalloc_node*/ ++ ++#define setup_timer(_timer, _function, _data) \ ++do { \ ++ (_timer)->function = _function; \ ++ (_timer)->data = _data; \ ++ init_timer(_timer); \ ++} while (0) ++#ifndef device_can_wakeup ++#define device_can_wakeup(dev) (1) ++#endif ++#ifndef device_set_wakeup_enable ++#define device_set_wakeup_enable(dev, val) do{}while(0) ++#endif ++#ifndef device_init_wakeup ++#define device_init_wakeup(dev,val) do {} while (0) ++#endif ++#endif /* < 2.6.15 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) ++#undef DEFINE_MUTEX ++#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) ++#define mutex_lock(x) down_interruptible(x) ++#define mutex_unlock(x) up(x) ++ ++#ifndef ____cacheline_internodealigned_in_smp ++#ifdef CONFIG_SMP ++#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp ++#else ++#define ____cacheline_internodealigned_in_smp ++#endif /* CONFIG_SMP */ ++#endif /* ____cacheline_internodealigned_in_smp */ ++#undef HAVE_PCI_ERS ++#else /* 2.6.16 and above */ ++#undef HAVE_PCI_ERS ++#define HAVE_PCI_ERS ++#endif /* < 2.6.16 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) ++#ifndef first_online_node ++#define first_online_node 0 ++#endif ++#ifndef NET_SKB_PAD ++#define NET_SKB_PAD 16 ++#endif ++#endif /* < 2.6.17 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) ++ ++#ifndef IRQ_HANDLED ++#define irqreturn_t void ++#define IRQ_HANDLED ++#define IRQ_NONE ++#endif ++ ++#ifndef IRQF_PROBE_SHARED ++#ifdef SA_PROBEIRQ ++#define IRQF_PROBE_SHARED SA_PROBEIRQ ++#else ++#define IRQF_PROBE_SHARED 0 ++#endif ++#endif ++ ++#ifndef IRQF_SHARED ++#define IRQF_SHARED SA_SHIRQ ++#endif ++ ++#ifndef ARRAY_SIZE ++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ++#endif ++ ++#ifndef FIELD_SIZEOF ++#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) ++#endif ++ ++#ifndef skb_is_gso ++#ifdef NETIF_F_TSO ++#define skb_is_gso _kc_skb_is_gso ++static inline int _kc_skb_is_gso(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_size; ++} ++#else ++#define skb_is_gso(a) 0 ++#endif ++#endif ++ ++#ifndef resource_size_t ++#define resource_size_t unsigned long ++#endif ++ ++#endif /* < 2.6.18 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) ++ ++#ifndef DIV_ROUND_UP ++#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) ++#endif ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) ++#if (!(( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) ) && ( RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0) ) || ( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0) ) || (AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0)))) ++typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); ++#endif ++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) ++#undef CONFIG_INET_LRO ++#undef CONFIG_INET_LRO_MODULE ++#undef CONFIG_FCOE ++#undef CONFIG_FCOE_MODULE ++#endif ++typedef irqreturn_t (*new_handler_t)(int, void*); ++static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) ++#else /* 2.4.x */ ++typedef void (*irq_handler_t)(int, void*, struct pt_regs *); ++typedef void (*new_handler_t)(int, void*); ++static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) ++#endif /* >= 2.5.x */ ++{ ++ irq_handler_t new_handler = (irq_handler_t) handler; ++ return request_irq(irq, new_handler, flags, devname, dev_id); ++} ++ ++#undef request_irq ++#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) ++ ++#define irq_handler_t new_handler_t ++/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ ++#define PCIE_CONFIG_SPACE_LEN 256 ++#define PCI_CONFIG_SPACE_LEN 64 ++#define PCIE_LINK_STATUS 0x12 ++#define pci_config_space_ich8lan() do {} while(0) ++#undef pci_save_state ++extern int _kc_pci_save_state(struct pci_dev *); ++#define pci_save_state(pdev) _kc_pci_save_state(pdev) ++#undef pci_restore_state ++extern void _kc_pci_restore_state(struct pci_dev *); ++#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) ++#ifdef HAVE_PCI_ERS ++#undef free_netdev ++extern void _kc_free_netdev(struct net_device *); ++#define free_netdev(netdev) _kc_free_netdev(netdev) ++#endif ++static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) ++{ ++ return 0; ++} ++#define pci_disable_pcie_error_reporting(dev) do {} while (0) ++#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) ++#else /* 2.6.19 */ ++#include ++#endif /* < 2.6.19 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) ++#undef INIT_WORK ++#define INIT_WORK(_work, _func) \ ++do { \ ++ INIT_LIST_HEAD(&(_work)->entry); \ ++ (_work)->pending = 0; \ ++ (_work)->func = (void (*)(void *))_func; \ ++ (_work)->data = _work; \ ++ init_timer(&(_work)->timer); \ ++} while (0) ++#endif ++ ++#ifndef PCI_VDEVICE ++#define PCI_VDEVICE(ven, dev) \ ++ PCI_VENDOR_ID_##ven, (dev), \ ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0 ++#endif ++ ++#ifndef round_jiffies ++#define round_jiffies(x) x ++#endif ++ ++#define csum_offset csum ++ ++#define HAVE_EARLY_VMALLOC_NODE ++#define dev_to_node(dev) -1 ++#undef set_dev_node ++/* remove compiler warning with b=b, for unused variable */ ++#define set_dev_node(a, b) do { (b) = (b); } while(0) ++#else /* < 2.6.20 */ ++#define HAVE_DEVICE_NUMA_NODE ++#endif /* < 2.6.20 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) ++#define to_net_dev(class) container_of(class, struct net_device, class_dev) ++#define NETDEV_CLASS_DEV ++#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) ++#define vlan_group_set_device(vg, id, dev) if (vg) vg->vlan_devices[id] = dev; ++#define pci_channel_offline(pdev) (pdev->error_state && \ ++ pdev->error_state != pci_channel_io_normal) ++#define pci_request_selected_regions(pdev, bars, name) \ ++ pci_request_regions(pdev, name) ++#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); ++#endif /* < 2.6.21 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) ++#define tcp_hdr(skb) (skb->h.th) ++#define tcp_hdrlen(skb) (skb->h.th->doff << 2) ++#define skb_transport_offset(skb) (skb->h.raw - skb->data) ++#define skb_transport_header(skb) (skb->h.raw) ++#define ipv6_hdr(skb) (skb->nh.ipv6h) ++#define ip_hdr(skb) (skb->nh.iph) ++#define skb_network_offset(skb) (skb->nh.raw - skb->data) ++#define skb_network_header(skb) (skb->nh.raw) ++#define skb_tail_pointer(skb) skb->tail ++#define skb_reset_tail_pointer(skb) \ ++ do { \ ++ skb->tail = skb->data; \ ++ } while (0) ++#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ ++ memcpy(skb->data + offset, from, len) ++#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) ++#define pci_register_driver pci_module_init ++#define skb_mac_header(skb) skb->mac.raw ++ ++#ifdef NETIF_F_MULTI_QUEUE ++#ifndef alloc_etherdev_mq ++#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) ++#endif ++#endif /* NETIF_F_MULTI_QUEUE */ ++ ++#ifndef ETH_FCS_LEN ++#define ETH_FCS_LEN 4 ++#endif ++#define cancel_work_sync(x) flush_scheduled_work() ++#ifndef udp_hdr ++#define udp_hdr _udp_hdr ++static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) ++{ ++ return (struct udphdr *)skb_transport_header(skb); ++} ++#endif ++ ++#ifdef cpu_to_be16 ++#undef cpu_to_be16 ++#endif ++#define cpu_to_be16(x) __constant_htons(x) ++ ++#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) ++enum { ++ DUMP_PREFIX_NONE, ++ DUMP_PREFIX_ADDRESS, ++ DUMP_PREFIX_OFFSET ++}; ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ ++#ifndef hex_asc ++#define hex_asc(x) "0123456789abcdef"[x] ++#endif ++#include ++extern void _kc_print_hex_dump(const char *level, const char *prefix_str, ++ int prefix_type, int rowsize, int groupsize, ++ const void *buf, size_t len, bool ascii); ++#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ ++ _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) ++#else /* 2.6.22 */ ++#define ETH_TYPE_TRANS_SETS_DEV ++#define HAVE_NETDEV_STATS_IN_NETDEV ++#endif /* < 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) ++#undef ETHTOOL_GPERMADDR ++#endif /* > 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) ++#define netif_subqueue_stopped(_a, _b) 0 ++#ifndef PTR_ALIGN ++#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) ++#endif ++ ++#ifndef CONFIG_PM_SLEEP ++#define CONFIG_PM_SLEEP CONFIG_PM ++#endif ++#endif /* < 2.6.23 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) ++/* if GRO is supported then the napi struct must already exist */ ++#ifndef NETIF_F_GRO ++/* NAPI API changes in 2.6.24 break everything */ ++struct napi_struct { ++ /* used to look up the real NAPI polling routine */ ++ int (*poll)(struct napi_struct *, int); ++ struct net_device *dev; ++ int weight; ++}; ++#endif ++ ++#ifdef NAPI ++extern int __kc_adapter_clean(struct net_device *, int *); ++extern struct net_device *napi_to_poll_dev(struct napi_struct *napi); ++#define netif_napi_add(_netdev, _napi, _poll, _weight) \ ++ do { \ ++ struct napi_struct *__napi = (_napi); \ ++ struct net_device *poll_dev = napi_to_poll_dev(__napi); \ ++ poll_dev->poll = &(__kc_adapter_clean); \ ++ poll_dev->priv = (_napi); \ ++ poll_dev->weight = (_weight); \ ++ set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \ ++ set_bit(__LINK_STATE_START, &poll_dev->state);\ ++ dev_hold(poll_dev); \ ++ __napi->poll = &(_poll); \ ++ __napi->weight = (_weight); \ ++ __napi->dev = (_netdev); \ ++ } while (0) ++#define netif_napi_del(_napi) \ ++ do { \ ++ struct net_device *poll_dev = napi_to_poll_dev(_napi); \ ++ WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \ ++ dev_put(poll_dev); \ ++ memset(poll_dev, 0, sizeof(struct net_device));\ ++ } while (0) ++#define napi_schedule_prep(_napi) \ ++ (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi))) ++#define napi_schedule(_napi) \ ++ do { \ ++ if (napi_schedule_prep(_napi)) \ ++ __netif_rx_schedule(napi_to_poll_dev(_napi)); \ ++ } while (0) ++#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) ++#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) ++#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) ++#ifndef NETIF_F_GRO ++#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi)) ++#else ++#define napi_complete(_napi) \ ++ do { \ ++ napi_gro_flush(_napi); \ ++ netif_rx_complete(napi_to_poll_dev(_napi)); \ ++ } while (0) ++#endif /* NETIF_F_GRO */ ++#else /* NAPI */ ++#define netif_napi_add(_netdev, _napi, _poll, _weight) \ ++ do { \ ++ struct napi_struct *__napi = _napi; \ ++ _netdev->poll = &(_poll); \ ++ _netdev->weight = (_weight); \ ++ __napi->poll = &(_poll); \ ++ __napi->weight = (_weight); \ ++ __napi->dev = (_netdev); \ ++ } while (0) ++#define netif_napi_del(_a) do {} while (0) ++#endif /* NAPI */ ++ ++#undef dev_get_by_name ++#define dev_get_by_name(_a, _b) dev_get_by_name(_b) ++#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) ++#ifndef DMA_BIT_MASK ++#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) ++#endif ++ ++#ifdef NETIF_F_TSO6 ++#define skb_is_gso_v6 _kc_skb_is_gso_v6 ++static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; ++} ++#endif /* NETIF_F_TSO6 */ ++ ++#ifndef KERN_CONT ++#define KERN_CONT "" ++#endif ++#else /* < 2.6.24 */ ++#define HAVE_ETHTOOL_GET_SSET_COUNT ++#define HAVE_NETDEV_NAPI_LIST ++#endif /* < 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) ++#include ++#endif /* > 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) ++#define PM_QOS_CPU_DMA_LATENCY 1 ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) ++#include ++#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY ++#define pm_qos_add_requirement(pm_qos_class, name, value) \ ++ set_acceptable_latency(name, value) ++#define pm_qos_remove_requirement(pm_qos_class, name) \ ++ remove_acceptable_latency(name) ++#define pm_qos_update_requirement(pm_qos_class, name, value) \ ++ modify_acceptable_latency(name, value) ++#else ++#define PM_QOS_DEFAULT_VALUE -1 ++#define pm_qos_add_requirement(pm_qos_class, name, value) ++#define pm_qos_remove_requirement(pm_qos_class, name) ++#define pm_qos_update_requirement(pm_qos_class, name, value) { \ ++ if (value != PM_QOS_DEFAULT_VALUE) { \ ++ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ ++ pci_name(adapter->pdev)); \ ++ } \ ++} ++#endif /* > 2.6.18 */ ++ ++#define pci_enable_device_mem(pdev) pci_enable_device(pdev) ++ ++#ifndef DEFINE_PCI_DEVICE_TABLE ++#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] ++#endif /* DEFINE_PCI_DEVICE_TABLE */ ++ ++#endif /* < 2.6.25 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) ++#ifdef NETIF_F_TSO ++#ifdef NETIF_F_TSO6 ++#define netif_set_gso_max_size(_netdev, size) \ ++ do { \ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { \ ++ _netdev->features &= ~NETIF_F_TSO; \ ++ _netdev->features &= ~NETIF_F_TSO6; \ ++ } else { \ ++ _netdev->features |= NETIF_F_TSO; \ ++ _netdev->features |= NETIF_F_TSO6; \ ++ } \ ++ } while (0) ++#else /* NETIF_F_TSO6 */ ++#define netif_set_gso_max_size(_netdev, size) \ ++ do { \ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ ++ _netdev->features &= ~NETIF_F_TSO; \ ++ else \ ++ _netdev->features |= NETIF_F_TSO; \ ++ } while (0) ++#endif /* NETIF_F_TSO6 */ ++#else ++#define netif_set_gso_max_size(_netdev, size) do {} while (0) ++#endif /* NETIF_F_TSO */ ++#undef kzalloc_node ++#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) ++ ++extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); ++#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) ++#else /* < 2.6.26 */ ++#include ++#define HAVE_NETDEV_VLAN_FEATURES ++#endif /* < 2.6.26 */ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) ++#if (((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && defined(CONFIG_PM_SLEEP))) ++#undef device_set_wakeup_enable ++#define device_set_wakeup_enable(dev, val) \ ++ do { \ ++ u16 pmc = 0; \ ++ int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ ++ if (pm) { \ ++ pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ ++ &pmc); \ ++ } \ ++ (dev)->power.can_wakeup = !!(pmc >> 11); \ ++ (dev)->power.should_wakeup = (val && (pmc >> 11)); \ ++ } while (0) ++#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ ++#endif /* 2.6.15 through 2.6.27 */ ++#ifndef netif_napi_del ++#define netif_napi_del(_a) do {} while (0) ++#ifdef NAPI ++#ifdef CONFIG_NETPOLL ++#undef netif_napi_del ++#define netif_napi_del(_a) list_del(&(_a)->dev_list); ++#endif ++#endif ++#endif /* netif_napi_del */ ++#ifdef dma_mapping_error ++#undef dma_mapping_error ++#endif ++#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) ++ ++#ifdef CONFIG_NETDEVICES_MULTIQUEUE ++#define HAVE_TX_MQ ++#endif ++ ++#ifdef HAVE_TX_MQ ++extern void _kc_netif_tx_stop_all_queues(struct net_device *); ++extern void _kc_netif_tx_wake_all_queues(struct net_device *); ++extern void _kc_netif_tx_start_all_queues(struct net_device *); ++#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) ++#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) ++#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) ++#undef netif_stop_subqueue ++#define netif_stop_subqueue(_ndev,_qi) do { \ ++ if (netif_is_multiqueue((_ndev))) \ ++ netif_stop_subqueue((_ndev), (_qi)); \ ++ else \ ++ netif_stop_queue((_ndev)); \ ++ } while (0) ++#undef netif_start_subqueue ++#define netif_start_subqueue(_ndev,_qi) do { \ ++ if (netif_is_multiqueue((_ndev))) \ ++ netif_start_subqueue((_ndev), (_qi)); \ ++ else \ ++ netif_start_queue((_ndev)); \ ++ } while (0) ++#else /* HAVE_TX_MQ */ ++#define netif_tx_stop_all_queues(a) netif_stop_queue(a) ++#define netif_tx_wake_all_queues(a) netif_wake_queue(a) ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) ++#define netif_tx_start_all_queues(a) netif_start_queue(a) ++#else ++#define netif_tx_start_all_queues(a) do {} while (0) ++#endif ++#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) ++#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) ++#endif /* HAVE_TX_MQ */ ++#ifndef NETIF_F_MULTI_QUEUE ++#define NETIF_F_MULTI_QUEUE 0 ++#define netif_is_multiqueue(a) 0 ++#define netif_wake_subqueue(a, b) ++#endif /* NETIF_F_MULTI_QUEUE */ ++#else /* < 2.6.27 */ ++#define HAVE_TX_MQ ++#define HAVE_NETDEV_SELECT_QUEUE ++#endif /* < 2.6.27 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) ++#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ ++ pci_resource_len(pdev, bar)) ++#define pci_wake_from_d3 _kc_pci_wake_from_d3 ++#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep ++extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); ++extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev); ++#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) ++#endif /* < 2.6.28 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) ++#define pci_request_selected_regions_exclusive(pdev, bars, name) \ ++ pci_request_selected_regions(pdev, bars, name) ++#ifndef CONFIG_NR_CPUS ++#define CONFIG_NR_CPUS 1 ++#endif /* CONFIG_NR_CPUS */ ++#ifndef pcie_aspm_enabled ++#define pcie_aspm_enabled() (1) ++#endif /* pcie_aspm_enabled */ ++#else /* < 2.6.29 */ ++#ifdef CONFIG_DCB ++#define HAVE_PFC_MODE_ENABLE ++#endif /* CONFIG_DCB */ ++#endif /* < 2.6.29 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) ++#undef CONFIG_FCOE ++#undef CONFIG_FCOE_MODULE ++extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb); ++#define skb_tx_hash(n, s) _kc_skb_tx_hash(n, s) ++#define skb_record_rx_queue(a, b) do {} while (0) ++#ifndef CONFIG_PCI_IOV ++#undef pci_enable_sriov ++#define pci_enable_sriov(a, b) -ENOTSUPP ++#undef pci_disable_sriov ++#define pci_disable_sriov(a) do {} while (0) ++#endif /* CONFIG_PCI_IOV */ ++#else ++#define HAVE_ASPM_QUIRKS ++#endif /* < 2.6.30 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) ++#define ETH_P_1588 0x88F7 ++#define ETH_P_FIP 0x8914 ++#ifndef netdev_uc_count ++#define netdev_uc_count(dev) ((dev)->uc_count) ++#endif ++#ifndef netdev_for_each_uc_addr ++#define netdev_for_each_uc_addr(uclist, dev) \ ++ for (uclist = dev->uc_list; uclist; uclist = uclist->next) ++#endif ++#else ++#ifndef HAVE_NETDEV_STORAGE_ADDRESS ++#define HAVE_NETDEV_STORAGE_ADDRESS ++#endif ++#ifndef HAVE_NETDEV_HW_ADDR ++#define HAVE_NETDEV_HW_ADDR ++#endif ++#ifndef HAVE_TRANS_START_IN_QUEUE ++#define HAVE_TRANS_START_IN_QUEUE ++#endif ++#endif /* < 2.6.31 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) ++#undef netdev_tx_t ++#define netdev_tx_t int ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++#ifndef NETIF_F_FCOE_MTU ++#define NETIF_F_FCOE_MTU (1 << 26) ++#endif ++#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ ++ ++#ifndef pm_runtime_get_sync ++#define pm_runtime_get_sync(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_put ++#define pm_runtime_put(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_put_sync ++#define pm_runtime_put_sync(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_resume ++#define pm_runtime_resume(dev) do {} while (0) ++#endif ++#ifndef pm_schedule_suspend ++#define pm_schedule_suspend(dev, t) do {} while (0) ++#endif ++#ifndef pm_runtime_set_suspended ++#define pm_runtime_set_suspended(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_disable ++#define pm_runtime_disable(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_put_noidle ++#define pm_runtime_put_noidle(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_set_active ++#define pm_runtime_set_active(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_enable ++#define pm_runtime_enable(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_get_noresume ++#define pm_runtime_get_noresume(dev) do {} while (0) ++#endif ++#else /* < 2.6.32 */ ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE ++#define HAVE_NETDEV_OPS_FCOE_ENABLE ++#endif ++#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ ++#ifdef CONFIG_DCB ++#ifndef HAVE_DCBNL_OPS_GETAPP ++#define HAVE_DCBNL_OPS_GETAPP ++#endif ++#endif /* CONFIG_DCB */ ++#include ++#endif /* < 2.6.32 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) ++#ifndef netdev_alloc_skb_ip_align ++extern struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length); ++#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) ++#endif ++#ifndef pci_pcie_cap ++#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) ++#endif ++#else /* < 2.6.33 */ ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN ++#define HAVE_NETDEV_OPS_FCOE_GETWWN ++#endif ++#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ ++#define HAVE_ETHTOOL_SFP_DISPLAY_PORT ++#endif /* < 2.6.33 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) ++#ifndef netdev_mc_count ++#define netdev_mc_count(dev) ((dev)->mc_count) ++#endif ++#ifndef netdev_mc_empty ++#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) ++#endif ++#ifndef netdev_for_each_mc_addr ++#define netdev_for_each_mc_addr(mclist, dev) \ ++ for (mclist = dev->mc_list; mclist; mclist = mclist->next) ++#endif ++#ifndef netdev_uc_count ++#define netdev_uc_count(dev) ((dev)->uc.count) ++#endif ++#ifndef netdev_uc_empty ++#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) ++#endif ++#ifndef netdev_for_each_uc_addr ++#define netdev_for_each_uc_addr(ha, dev) \ ++ list_for_each_entry(ha, &dev->uc.list, list) ++#endif ++#ifndef dma_set_coherent_mask ++#define dma_set_coherent_mask(dev,mask) \ ++ pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) ++#endif ++#ifndef pci_dev_run_wake ++#define pci_dev_run_wake(pdev) (0) ++#endif ++ ++/* netdev logging taken from include/linux/netdevice.h */ ++#ifndef netdev_name ++static inline const char *_kc_netdev_name(const struct net_device *dev) ++{ ++ if (dev->reg_state != NETREG_REGISTERED) ++ return "(unregistered net_device)"; ++ return dev->name; ++} ++#define netdev_name(netdev) _kc_netdev_name(netdev) ++#endif /* netdev_name */ ++ ++#undef netdev_printk ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++#define netdev_printk(level, netdev, format, args...) \ ++do { \ ++ struct adapter_struct *kc_adapter = netdev_priv(netdev);\ ++ struct pci_dev *pdev = kc_adapter->pdev; \ ++ printk("%s %s: " format, level, pci_name(pdev), \ ++ ##args); \ ++} while(0) ++#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) ++#define netdev_printk(level, netdev, format, args...) \ ++do { \ ++ struct adapter_struct *kc_adapter = netdev_priv(netdev);\ ++ struct pci_dev *pdev = kc_adapter->pdev; \ ++ struct device *dev = pci_dev_to_dev(pdev); \ ++ dev_printk(level, dev->parent, "%s: " format, \ ++ netdev_name(netdev), ##args); \ ++} while(0) ++#else /* 2.6.21 => 2.6.34 */ ++#define netdev_printk(level, netdev, format, args...) \ ++ dev_printk(level, (netdev)->dev.parent, \ ++ "%s: " format, \ ++ netdev_name(netdev), ##args) ++#endif /* <2.6.0 <2.6.21 <2.6.34 */ ++#undef netdev_emerg ++#define netdev_emerg(dev, format, args...) \ ++ netdev_printk(KERN_EMERG, dev, format, ##args) ++#undef netdev_alert ++#define netdev_alert(dev, format, args...) \ ++ netdev_printk(KERN_ALERT, dev, format, ##args) ++#undef netdev_crit ++#define netdev_crit(dev, format, args...) \ ++ netdev_printk(KERN_CRIT, dev, format, ##args) ++#undef netdev_err ++#define netdev_err(dev, format, args...) \ ++ netdev_printk(KERN_ERR, dev, format, ##args) ++#undef netdev_warn ++#define netdev_warn(dev, format, args...) \ ++ netdev_printk(KERN_WARNING, dev, format, ##args) ++#undef netdev_notice ++#define netdev_notice(dev, format, args...) \ ++ netdev_printk(KERN_NOTICE, dev, format, ##args) ++#undef netdev_info ++#define netdev_info(dev, format, args...) \ ++ netdev_printk(KERN_INFO, dev, format, ##args) ++#undef netdev_dbg ++#if defined(DEBUG) ++#define netdev_dbg(__dev, format, args...) \ ++ netdev_printk(KERN_DEBUG, __dev, format, ##args) ++#elif defined(CONFIG_DYNAMIC_DEBUG) ++#define netdev_dbg(__dev, format, args...) \ ++do { \ ++ dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ ++ netdev_name(__dev), ##args); \ ++} while (0) ++#else /* DEBUG */ ++#define netdev_dbg(__dev, format, args...) \ ++({ \ ++ if (0) \ ++ netdev_printk(KERN_DEBUG, __dev, format, ##args); \ ++ 0; \ ++}) ++#endif /* DEBUG */ ++ ++#if !defined(CONFIG_PM_OPS) && defined(CONFIG_PM_SLEEP) ++#define CONFIG_PM_OPS ++#endif ++#ifdef SET_SYSTEM_SLEEP_PM_OPS ++#define HAVE_SYSTEM_SLEEP_PM_OPS ++#endif ++#else /* < 2.6.34 */ ++#define HAVE_SYSTEM_SLEEP_PM_OPS ++#ifndef HAVE_SET_RX_MODE ++#define HAVE_SET_RX_MODE ++#endif ++#define HAVE_IPLINK_VF_CONFIG ++#endif /* < 2.6.34 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) ++#else /* < 2.6.35 */ ++#define HAVE_PM_QOS_REQUEST_LIST ++#endif /* < 2.6.35 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) ++extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); ++#define ethtool_op_set_flags _kc_ethtool_op_set_flags ++#else /* < 2.6.36 */ ++#define HAVE_PM_QOS_REQUEST_ACTIVE ++#endif /* < 2.6.36 */ ++#endif /* _KCOMPAT_H_ */ +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/Makefile linux-2.6.22-50/drivers/net/ixgbe/Makefile +--- linux-2.6.22-40/drivers/net/ixgbe/Makefile 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/Makefile 2010-09-18 07:50:14.000000000 -0400 +@@ -0,0 +1,12 @@ ++obj-$(CONFIG_IXGBE) += ixgbe.o ++ ++ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_api.o ixgbe_param.o \ ++ ixgbe_ethtool.o kcompat.o ixgbe_82598.o ixgbe_82599.o \ ++ ixgbe_sriov.o ixgbe_mbx.o ixgbe_dcb.o ixgbe_dcb_82598.o \ ++ ixgbe_dcb_82599.o ixgbe_phy.o ++ifeq ($(CONFIG_FCOE),y) ++ixgbe-objs += ixgbe_fcoe.o ++endif ++ifeq ($(CONFIG_FCOE),m) ++ixgbe-objs += ixgbe_fcoe.o ++endif +diff -Nurp linux-2.6.22-40/drivers/net/ixgbe/Module.supported linux-2.6.22-50/drivers/net/ixgbe/Module.supported +--- linux-2.6.22-40/drivers/net/ixgbe/Module.supported 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.22-50/drivers/net/ixgbe/Module.supported 2010-08-25 17:56:31.000000000 -0400 +@@ -0,0 +1 @@ ++ixgbe.ko external +diff -Nurp linux-2.6.22-40/drivers/net/Kconfig linux-2.6.22-50/drivers/net/Kconfig +--- linux-2.6.22-40/drivers/net/Kconfig 2010-09-18 07:37:31.000000000 -0400 ++++ linux-2.6.22-50/drivers/net/Kconfig 2010-09-18 07:52:50.000000000 -0400 +@@ -2490,6 +2490,25 @@ config IXGB_NAPI + + If in doubt, say N. + ++config IXGBE ++ tristate "Intel(R) PRO/10GbE support" ++ depends on PCI ++ ---help--- ++ This driver supports Intel(R) PRO/10GbE family of ++ adapters. For more information on how to identify your adapter, go ++ to the Adapter & Driver ID Guide at: ++ ++ ++ ++ For general information and support, go to the Intel support ++ website at: ++ ++ ++ ++ To compile this driver as a module, choose M here and read ++ . The module ++ will be called ixgbe. ++ + config S2IO + tristate "S2IO 10Gbe XFrame NIC" + depends on PCI +diff -Nurp linux-2.6.22-40/drivers/net/Makefile linux-2.6.22-50/drivers/net/Makefile +--- linux-2.6.22-40/drivers/net/Makefile 2010-09-18 07:37:31.000000000 -0400 ++++ linux-2.6.22-50/drivers/net/Makefile 2010-09-18 07:39:51.000000000 -0400 +@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/ + obj-$(CONFIG_E1000E) += e1000e/ + obj-$(CONFIG_IBM_EMAC) += ibm_emac/ + obj-$(CONFIG_IXGB) += ixgb/ ++obj-$(CONFIG_IXGBE) += ixgbe/ + obj-$(CONFIG_CHELSIO_T1) += chelsio/ + obj-$(CONFIG_CHELSIO_T3) += cxgb3/ + obj-$(CONFIG_EHEA) += ehea/ -- 2.43.0