--- /dev/null
+Index: linux-2.6.22/drivers/net/Kconfig
+===================================================================
+--- linux-2.6.22.orig/drivers/net/Kconfig 2009-12-18 12:37:55.000000000 -0500
++++ linux-2.6.22/drivers/net/Kconfig 2009-12-18 12:39:22.000000000 -0500
+@@ -2016,6 +2016,28 @@
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called e1000e.
+
++config IGB
++ tristate "Intel(R) 82575 Gigabit Ethernet support"
++ depends on PCI
++ ---help---
++ This driver supports Intel(R) 82575 gigabit ethernet adapters.
++ For more information on how to identify your adapter, go to the
++ Adapter & Driver ID Guide at:
++
++ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
++
++ For general information and support, go to the Intel support
++ website at:
++
++ <http://support.intel.com>
++
++ More specific information on configuring the driver is in
++ <file:Documentation/networking/igb.txt>.
++
++ To compile this driver as a module, choose M here and read
++ <file:Documentation/networking/net-modules.txt>. The module
++ will be called igb.
++
+ source "drivers/net/ixp2000/Kconfig"
+
+ config MYRI_SBUS
+Index: linux-2.6.22/drivers/net/Makefile
+===================================================================
+--- linux-2.6.22.orig/drivers/net/Makefile 2009-12-18 12:38:07.000000000 -0500
++++ linux-2.6.22/drivers/net/Makefile 2009-12-18 12:39:22.000000000 -0500
+@@ -12,6 +12,7 @@
+ obj-$(CONFIG_BONDING) += bonding/
+ obj-$(CONFIG_ATL1) += atl1/
+ obj-$(CONFIG_GIANFAR) += gianfar_driver.o
++obj-$(CONFIG_IGB) += igb/
+
+ gianfar_driver-objs := gianfar.o \
+ gianfar_ethtool.o \
+Index: linux-2.6.22/drivers/net/igb/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/Makefile 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,37 @@
++################################################################################
++#
++# Intel 82575 PCI-Express Ethernet Linux driver
++# Copyright(c) 1999 - 2009 Intel Corporation.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful, but WITHOUT
++# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++# more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Linux NICS <linux.nics@intel.com>
++# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++#
++################################################################################
++
++#
++# Makefile for the Intel(R) 82575 PCI-Express ethernet driver
++#
++
++obj-$(CONFIG_IGB) += igb.o
++
++igb-objs := igb_main.o igb_ethtool.o igb_param.o kcompat.o e1000_api.o e1000_manage.o e1000_82575.o \
++ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
++
+Index: linux-2.6.22/drivers/net/igb/e1000_82575.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_82575.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,1580 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++/*
++ * 82575EB Gigabit Network Connection
++ * 82575EB Gigabit Backplane Connection
++ * 82575GB Gigabit Network Connection
++ * 82576 Gigabit Network Connection
++ * 82576 Quad Port Gigabit Mezzanine Adapter
++ */
++
++#include "e1000_api.h"
++
++static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
++static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
++static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
++static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
++static void e1000_release_phy_82575(struct e1000_hw *hw);
++static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
++static void e1000_release_nvm_82575(struct e1000_hw *hw);
++static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
++static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
++static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
++ u16 *duplex);
++static s32 e1000_init_hw_82575(struct e1000_hw *hw);
++static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
++static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
++ u16 *data);
++static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
++static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
++ bool active);
++static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
++static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
++static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
++static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
++ u32 offset, u16 data);
++static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
++static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
++static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
++ u16 *speed, u16 *duplex);
++static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
++static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
++static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
++static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
++static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
++static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
++static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
++static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
++
++/**
++ * e1000_init_phy_params_82575 - Init PHY func ptrs.
++ * @hw: pointer to the HW structure
++ **/
++static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_init_phy_params_82575");
++
++ if (hw->phy.media_type != e1000_media_type_copper) {
++ phy->type = e1000_phy_none;
++ goto out;
++ }
++
++ phy->ops.power_up = e1000_power_up_phy_copper;
++ phy->ops.power_down = e1000_power_down_phy_copper_82575;
++
++ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
++ phy->reset_delay_us = 100;
++
++ phy->ops.acquire = e1000_acquire_phy_82575;
++ phy->ops.check_reset_block = e1000_check_reset_block_generic;
++ phy->ops.commit = e1000_phy_sw_reset_generic;
++ phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
++ phy->ops.release = e1000_release_phy_82575;
++
++ if (e1000_sgmii_active_82575(hw)) {
++ phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
++ phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
++ phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
++ } else {
++ phy->ops.reset = e1000_phy_hw_reset_generic;
++ phy->ops.read_reg = e1000_read_phy_reg_igp;
++ phy->ops.write_reg = e1000_write_phy_reg_igp;
++ }
++
++ /* Set phy->phy_addr and phy->id. */
++ ret_val = e1000_get_phy_id_82575(hw);
++
++ /* Verify phy id and set remaining function pointers */
++ switch (phy->id) {
++ case M88E1111_I_PHY_ID:
++ phy->type = e1000_phy_m88;
++ phy->ops.check_polarity = e1000_check_polarity_m88;
++ phy->ops.get_info = e1000_get_phy_info_m88;
++ phy->ops.get_cable_length = e1000_get_cable_length_m88;
++ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
++ break;
++ case IGP03E1000_E_PHY_ID:
++ case IGP04E1000_E_PHY_ID:
++ phy->type = e1000_phy_igp_3;
++ phy->ops.check_polarity = e1000_check_polarity_igp;
++ phy->ops.get_info = e1000_get_phy_info_igp;
++ phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
++ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
++ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
++ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
++ break;
++ default:
++ ret_val = -E1000_ERR_PHY;
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_init_nvm_params_82575 - Init NVM func ptrs.
++ * @hw: pointer to the HW structure
++ **/
++static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
++{
++ struct e1000_nvm_info *nvm = &hw->nvm;
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
++ u16 size;
++
++ DEBUGFUNC("e1000_init_nvm_params_82575");
++
++ nvm->opcode_bits = 8;
++ nvm->delay_usec = 1;
++ switch (nvm->override) {
++ case e1000_nvm_override_spi_large:
++ nvm->page_size = 32;
++ nvm->address_bits = 16;
++ break;
++ case e1000_nvm_override_spi_small:
++ nvm->page_size = 8;
++ nvm->address_bits = 8;
++ break;
++ default:
++ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
++ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
++ break;
++ }
++
++ nvm->type = e1000_nvm_eeprom_spi;
++
++ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
++ E1000_EECD_SIZE_EX_SHIFT);
++
++ /*
++ * Added to a constant, "size" becomes the left-shift value
++ * for setting word_size.
++ */
++ size += NVM_WORD_SIZE_BASE_SHIFT;
++
++ /* EEPROM access above 16k is unsupported */
++ if (size > 14)
++ size = 14;
++ nvm->word_size = 1 << size;
++
++ /* Function Pointers */
++ nvm->ops.acquire = e1000_acquire_nvm_82575;
++ nvm->ops.read = e1000_read_nvm_eerd;
++ nvm->ops.release = e1000_release_nvm_82575;
++ nvm->ops.update = e1000_update_nvm_checksum_generic;
++ nvm->ops.valid_led_default = e1000_valid_led_default_82575;
++ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
++ nvm->ops.write = e1000_write_nvm_spi;
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_init_mac_params_82575 - Init MAC func ptrs.
++ * @hw: pointer to the HW structure
++ **/
++static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
++ u32 ctrl_ext = 0;
++
++ DEBUGFUNC("e1000_init_mac_params_82575");
++
++ /* Set media type */
++ /*
++ * The 82575 uses bits 22:23 for link mode. The mode can be changed
++ * based on the EEPROM. We cannot rely upon device ID. There
++ * is no distinguishable difference between fiber and internal
++ * SerDes mode on the 82575. There can be an external PHY attached
++ * on the SGMII interface. For this, we'll set sgmii_active to true.
++ */
++ hw->phy.media_type = e1000_media_type_copper;
++ dev_spec->sgmii_active = false;
++
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
++ case E1000_CTRL_EXT_LINK_MODE_SGMII:
++ dev_spec->sgmii_active = true;
++ ctrl_ext |= E1000_CTRL_I2C_ENA;
++ break;
++ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
++ hw->phy.media_type = e1000_media_type_internal_serdes;
++ ctrl_ext |= E1000_CTRL_I2C_ENA;
++ break;
++ default:
++ ctrl_ext &= ~E1000_CTRL_I2C_ENA;
++ break;
++ }
++
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
++
++ /* Set mta register count */
++ mac->mta_reg_count = 128;
++ /* Set uta register count */
++ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
++ /* Set rar entry count */
++ mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
++ if (mac->type == e1000_82576)
++ mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
++ /* Set if part includes ASF firmware */
++ mac->asf_firmware_present = true;
++ /* Set if manageability features are enabled. */
++ mac->arc_subsystem_valid =
++ (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
++ ? true : false;
++
++ /* Function pointers */
++
++ /* bus type/speed/width */
++ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
++ /* reset */
++ mac->ops.reset_hw = e1000_reset_hw_82575;
++ /* hw initialization */
++ mac->ops.init_hw = e1000_init_hw_82575;
++ /* link setup */
++ mac->ops.setup_link = e1000_setup_link_generic;
++ /* physical interface link setup */
++ mac->ops.setup_physical_interface =
++ (hw->phy.media_type == e1000_media_type_copper)
++ ? e1000_setup_copper_link_82575
++ : e1000_setup_serdes_link_82575;
++ /* physical interface shutdown */
++ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
++ /* check for link */
++ mac->ops.check_for_link = e1000_check_for_link_82575;
++ /* receive address register setting */
++ mac->ops.rar_set = e1000_rar_set_generic;
++ /* read mac address */
++ mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
++ /* multicast address update */
++ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
++ /* writing VFTA */
++ mac->ops.write_vfta = e1000_write_vfta_generic;
++ /* clearing VFTA */
++ mac->ops.clear_vfta = e1000_clear_vfta_generic;
++ /* setting MTA */
++ mac->ops.mta_set = e1000_mta_set_generic;
++ /* ID LED init */
++ mac->ops.id_led_init = e1000_id_led_init_generic;
++ /* blink LED */
++ mac->ops.blink_led = e1000_blink_led_generic;
++ /* setup LED */
++ mac->ops.setup_led = e1000_setup_led_generic;
++ /* cleanup LED */
++ mac->ops.cleanup_led = e1000_cleanup_led_generic;
++ /* turn on/off LED */
++ mac->ops.led_on = e1000_led_on_generic;
++ mac->ops.led_off = e1000_led_off_generic;
++ /* clear hardware counters */
++ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
++ /* link info */
++ mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
++
++ /* set lan id for port to determine which phy lock to use */
++ hw->mac.ops.set_lan_id(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_init_function_pointers_82575 - Init func ptrs.
++ * @hw: pointer to the HW structure
++ *
++ * Called to initialize all function pointers and parameters.
++ **/
++void e1000_init_function_pointers_82575(struct e1000_hw *hw)
++{
++ DEBUGFUNC("e1000_init_function_pointers_82575");
++
++ hw->mac.ops.init_params = e1000_init_mac_params_82575;
++ hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
++ hw->phy.ops.init_params = e1000_init_phy_params_82575;
++ hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
++}
++
++/**
++ * e1000_acquire_phy_82575 - Acquire rights to access PHY
++ * @hw: pointer to the HW structure
++ *
++ * Acquire access rights to the correct PHY.
++ **/
++static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
++{
++ u16 mask = E1000_SWFW_PHY0_SM;
++
++ DEBUGFUNC("e1000_acquire_phy_82575");
++
++ if (hw->bus.func == E1000_FUNC_1)
++ mask = E1000_SWFW_PHY1_SM;
++
++ return e1000_acquire_swfw_sync_82575(hw, mask);
++}
++
++/**
++ * e1000_release_phy_82575 - Release rights to access PHY
++ * @hw: pointer to the HW structure
++ *
++ * A wrapper to release access rights to the correct PHY.
++ **/
++static void e1000_release_phy_82575(struct e1000_hw *hw)
++{
++ u16 mask = E1000_SWFW_PHY0_SM;
++
++ DEBUGFUNC("e1000_release_phy_82575");
++
++ if (hw->bus.func == E1000_FUNC_1)
++ mask = E1000_SWFW_PHY1_SM;
++
++ e1000_release_swfw_sync_82575(hw, mask);
++}
++
++/**
++ * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Reads the PHY register at offset using the serial gigabit media independent
++ * interface and stores the retrieved information in data.
++ **/
++static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
++ u16 *data)
++{
++ s32 ret_val = -E1000_ERR_PARAM;
++
++ DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
++
++ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
++ DEBUGOUT1("PHY Address %u is out of range\n", offset);
++ goto out;
++ }
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
++
++ hw->phy.ops.release(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ *
++ * Writes the data to PHY register at the offset using the serial gigabit
++ * media independent interface.
++ **/
++static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
++ u16 data)
++{
++ s32 ret_val = -E1000_ERR_PARAM;
++
++ DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
++
++ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
++ DEBUGOUT1("PHY Address %d is out of range\n", offset);
++ goto out;
++ }
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
++
++ hw->phy.ops.release(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_get_phy_id_82575 - Retrieve PHY addr and id
++ * @hw: pointer to the HW structure
++ *
++ * Retrieves the PHY address and ID for both PHY's which do and do not use
++ * sgmi interface.
++ **/
++static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val = E1000_SUCCESS;
++ u16 phy_id;
++ u32 ctrl_ext;
++
++ DEBUGFUNC("e1000_get_phy_id_82575");
++
++ /*
++ * For SGMII PHYs, we try the list of possible addresses until
++ * we find one that works. For non-SGMII PHYs
++ * (e.g. integrated copper PHYs), an address of 1 should
++ * work. The result of this function should mean phy->phy_addr
++ * and phy->id are set correctly.
++ */
++ if (!e1000_sgmii_active_82575(hw)) {
++ phy->addr = 1;
++ ret_val = e1000_get_phy_id(hw);
++ goto out;
++ }
++
++ /* Power on sgmii phy if it is disabled */
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
++ ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
++ E1000_WRITE_FLUSH(hw);
++ msec_delay(300);
++
++ /*
++ * The address field in the I2CCMD register is 3 bits and 0 is invalid.
++ * Therefore, we need to test 1-7
++ */
++ for (phy->addr = 1; phy->addr < 8; phy->addr++) {
++ ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
++ if (ret_val == E1000_SUCCESS) {
++ DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
++ phy_id,
++ phy->addr);
++ /*
++ * At the time of this writing, The M88 part is
++ * the only supported SGMII PHY product.
++ */
++ if (phy_id == M88_VENDOR)
++ break;
++ } else {
++ DEBUGOUT1("PHY address %u was unreadable\n",
++ phy->addr);
++ }
++ }
++
++ /* A valid PHY type couldn't be found. */
++ if (phy->addr == 8) {
++ phy->addr = 0;
++ ret_val = -E1000_ERR_PHY;
++ } else {
++ ret_val = e1000_get_phy_id(hw);
++ }
++
++ /* restore previous sfp cage power state */
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
++ * @hw: pointer to the HW structure
++ *
++ * Resets the PHY using the serial gigabit media independent interface.
++ **/
++static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
++
++ /*
++ * This isn't a true "hard" reset, but is the only reset
++ * available to us at this time.
++ */
++
++ DEBUGOUT("Soft resetting SGMII attached PHY...\n");
++
++ if (!(hw->phy.ops.write_reg))
++ goto out;
++
++ /*
++ * SFP documentation requires the following to configure the SPF module
++ * to work on SGMII. No further documentation is given.
++ */
++ ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
++ if (ret_val)
++ goto out;
++
++ ret_val = hw->phy.ops.commit(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
++ * @hw: pointer to the HW structure
++ * @active: true to enable LPLU, false to disable
++ *
++ * Sets the LPLU D0 state according to the active flag. When
++ * activating LPLU this function also disables smart speed
++ * and vice versa. LPLU will not be activated unless the
++ * device autonegotiation advertisement meets standards of
++ * either 10 or 10/100 or 10/100/1000 at all duplexes.
++ * This is a function pointer entry point only called by
++ * PHY setup routines.
++ **/
++static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val = E1000_SUCCESS;
++ u16 data;
++
++ DEBUGFUNC("e1000_set_d0_lplu_state_82575");
++
++ if (!(hw->phy.ops.read_reg))
++ goto out;
++
++ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
++ if (ret_val)
++ goto out;
++
++ if (active) {
++ data |= IGP02E1000_PM_D0_LPLU;
++ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
++ data);
++ if (ret_val)
++ goto out;
++
++ /* When LPLU is enabled, we should disable SmartSpeed */
++ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
++ &data);
++ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
++ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
++ data);
++ if (ret_val)
++ goto out;
++ } else {
++ data &= ~IGP02E1000_PM_D0_LPLU;
++ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
++ data);
++ /*
++ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
++ * during Dx states where the power conservation is most
++ * important. During driver activity we should enable
++ * SmartSpeed, so performance is maintained.
++ */
++ if (phy->smart_speed == e1000_smart_speed_on) {
++ ret_val = phy->ops.read_reg(hw,
++ IGP01E1000_PHY_PORT_CONFIG,
++ &data);
++ if (ret_val)
++ goto out;
++
++ data |= IGP01E1000_PSCFR_SMART_SPEED;
++ ret_val = phy->ops.write_reg(hw,
++ IGP01E1000_PHY_PORT_CONFIG,
++ data);
++ if (ret_val)
++ goto out;
++ } else if (phy->smart_speed == e1000_smart_speed_off) {
++ ret_val = phy->ops.read_reg(hw,
++ IGP01E1000_PHY_PORT_CONFIG,
++ &data);
++ if (ret_val)
++ goto out;
++
++ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
++ ret_val = phy->ops.write_reg(hw,
++ IGP01E1000_PHY_PORT_CONFIG,
++ data);
++ if (ret_val)
++ goto out;
++ }
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_acquire_nvm_82575 - Request for access to EEPROM
++ * @hw: pointer to the HW structure
++ *
++ * Acquire the necessary semaphores for exclusive access to the EEPROM.
++ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
++ * Return successful if access grant bit set, else clear the request for
++ * EEPROM access and return -E1000_ERR_NVM (-1).
++ **/
++static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
++{
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_acquire_nvm_82575");
++
++ ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_acquire_nvm_generic(hw);
++
++ if (ret_val)
++ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_release_nvm_82575 - Release exclusive access to EEPROM
++ * @hw: pointer to the HW structure
++ *
++ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
++ * then release the semaphores acquired.
++ **/
++static void e1000_release_nvm_82575(struct e1000_hw *hw)
++{
++ DEBUGFUNC("e1000_release_nvm_82575");
++
++ e1000_release_nvm_generic(hw);
++ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
++}
++
++/**
++ * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
++ * @hw: pointer to the HW structure
++ * @mask: specifies which semaphore to acquire
++ *
++ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
++ * will also specify which port we're acquiring the lock for.
++ **/
++static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
++{
++ u32 swfw_sync;
++ u32 swmask = mask;
++ u32 fwmask = mask << 16;
++ s32 ret_val = E1000_SUCCESS;
++ s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
++
++ DEBUGFUNC("e1000_acquire_swfw_sync_82575");
++
++ while (i < timeout) {
++ if (e1000_get_hw_semaphore_generic(hw)) {
++ ret_val = -E1000_ERR_SWFW_SYNC;
++ goto out;
++ }
++
++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
++ if (!(swfw_sync & (fwmask | swmask)))
++ break;
++
++ /*
++ * Firmware currently using resource (fwmask)
++ * or other software thread using resource (swmask)
++ */
++ e1000_put_hw_semaphore_generic(hw);
++ msec_delay_irq(5);
++ i++;
++ }
++
++ if (i == timeout) {
++ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
++ ret_val = -E1000_ERR_SWFW_SYNC;
++ goto out;
++ }
++
++ swfw_sync |= swmask;
++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
++
++ e1000_put_hw_semaphore_generic(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
++ * @hw: pointer to the HW structure
++ * @mask: specifies which semaphore to acquire
++ *
++ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
++ * will also specify which port we're releasing the lock for.
++ **/
++static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
++{
++ u32 swfw_sync;
++
++ DEBUGFUNC("e1000_release_swfw_sync_82575");
++
++ while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS);
++ /* Empty */
++
++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
++ swfw_sync &= ~mask;
++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
++
++ e1000_put_hw_semaphore_generic(hw);
++}
++
++/**
++ * e1000_get_cfg_done_82575 - Read config done bit
++ * @hw: pointer to the HW structure
++ *
++ * Read the management control register for the config done bit for
++ * completion status. NOTE: silicon which is EEPROM-less will fail trying
++ * to read the config done bit, so an error is *ONLY* logged and returns
++ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
++ * would not be able to be reset or change link.
++ **/
++static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
++{
++ s32 timeout = PHY_CFG_TIMEOUT;
++ s32 ret_val = E1000_SUCCESS;
++ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
++
++ DEBUGFUNC("e1000_get_cfg_done_82575");
++
++ if (hw->bus.func == E1000_FUNC_1)
++ mask = E1000_NVM_CFG_DONE_PORT_1;
++ while (timeout) {
++ if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
++ break;
++ msec_delay(1);
++ timeout--;
++ }
++ if (!timeout)
++ DEBUGOUT("MNG configuration cycle has not completed.\n");
++
++ /* If EEPROM is not marked present, init the PHY manually */
++ if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
++ (hw->phy.type == e1000_phy_igp_3))
++ e1000_phy_init_script_igp3(hw);
++
++ return ret_val;
++}
++
++/**
++ * e1000_get_link_up_info_82575 - Get link speed/duplex info
++ * @hw: pointer to the HW structure
++ * @speed: stores the current speed
++ * @duplex: stores the current duplex
++ *
++ * This is a wrapper function, if using the serial gigabit media independent
++ * interface, use PCS to retrieve the link speed and duplex information.
++ * Otherwise, use the generic function to get the link speed and duplex info.
++ **/
++static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
++ u16 *duplex)
++{
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_get_link_up_info_82575");
++
++ if (hw->phy.media_type != e1000_media_type_copper)
++ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
++ duplex);
++ else
++ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
++ duplex);
++
++ return ret_val;
++}
++
++/**
++ * e1000_check_for_link_82575 - Check for link
++ * @hw: pointer to the HW structure
++ *
++ * If sgmii is enabled, then use the pcs register to determine link, otherwise
++ * use the generic interface for determining link.
++ **/
++static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
++{
++ s32 ret_val;
++ u16 speed, duplex;
++
++ DEBUGFUNC("e1000_check_for_link_82575");
++
++ if (hw->phy.media_type != e1000_media_type_copper) {
++ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
++ &duplex);
++ /*
++ * Use this flag to determine if link needs to be checked or
++ * not. If we have link clear the flag so that we do not
++ * continue to check for link.
++ */
++ hw->mac.get_link_status = !hw->mac.serdes_has_link;
++ } else {
++ ret_val = e1000_check_for_copper_link_generic(hw);
++ }
++
++ return ret_val;
++}
++
++/**
++ * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
++ * @hw: pointer to the HW structure
++ * @speed: stores the current speed
++ * @duplex: stores the current duplex
++ *
++ * Using the physical coding sub-layer (PCS), retrieve the current speed and
++ * duplex, then store the values in the pointers provided.
++ **/
++static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
++ u16 *speed, u16 *duplex)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ u32 pcs;
++
++ DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
++
++ /* Set up defaults for the return values of this function */
++ mac->serdes_has_link = false;
++ *speed = 0;
++ *duplex = 0;
++
++ /*
++ * Read the PCS Status register for link state. For non-copper mode,
++ * the status register is not accurate. The PCS status register is
++ * used instead.
++ */
++ pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
++
++ /*
++ * The link up bit determines when link is up on autoneg. The sync ok
++ * gets set once both sides sync up and agree upon link. Stable link
++ * can be determined by checking for both link up and link sync ok
++ */
++ if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
++ mac->serdes_has_link = true;
++
++ /* Detect and store PCS speed */
++ if (pcs & E1000_PCS_LSTS_SPEED_1000) {
++ *speed = SPEED_1000;
++ } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
++ *speed = SPEED_100;
++ } else {
++ *speed = SPEED_10;
++ }
++
++ /* Detect and store PCS duplex */
++ if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
++ *duplex = FULL_DUPLEX;
++ } else {
++ *duplex = HALF_DUPLEX;
++ }
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_shutdown_serdes_link_82575 - Remove link during power down
++ * @hw: pointer to the HW structure
++ *
++ * In the case of serdes shut down sfp and PCS on driver unload
++ * when management pass thru is not enabled.
++ **/
++void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
++{
++ u32 reg;
++ u16 eeprom_data = 0;
++
++ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
++ !e1000_sgmii_active_82575(hw))
++ return;
++
++ if (hw->bus.func == E1000_FUNC_0)
++ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
++ else if (hw->bus.func == E1000_FUNC_1)
++ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
++
++ /*
++ * If APM is not enabled in the EEPROM and management interface is
++ * not enabled, then power down.
++ */
++ if (!(eeprom_data & E1000_NVM_APME_82575) &&
++ !e1000_enable_mng_pass_thru(hw)) {
++ /* Disable PCS to turn off link */
++ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
++ reg &= ~E1000_PCS_CFG_PCS_EN;
++ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
++
++ /* shutdown the laser */
++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ reg |= E1000_CTRL_EXT_SDP3_DATA;
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
++
++ /* flush the write to verify completion */
++ E1000_WRITE_FLUSH(hw);
++ msec_delay(1);
++ }
++
++ return;
++}
++
++/**
++ * e1000_reset_hw_82575 - Reset hardware
++ * @hw: pointer to the HW structure
++ *
++ * This resets the hardware into a known state.
++ **/
++static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
++{
++ u32 ctrl, icr;
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_reset_hw_82575");
++
++ /*
++ * Prevent the PCI-E bus from sticking if there is no TLP connection
++ * on the last TLP read/write transaction when MAC is reset.
++ */
++ ret_val = e1000_disable_pcie_master_generic(hw);
++ if (ret_val) {
++ DEBUGOUT("PCI-E Master disable polling has failed.\n");
++ }
++
++ /* set the completion timeout for interface */
++ ret_val = e1000_set_pcie_completion_timeout(hw);
++ if (ret_val) {
++ DEBUGOUT("PCI-E Set completion timeout has failed.\n");
++ }
++
++ DEBUGOUT("Masking off all interrupts\n");
++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
++
++ E1000_WRITE_REG(hw, E1000_RCTL, 0);
++ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
++ E1000_WRITE_FLUSH(hw);
++
++ msec_delay(10);
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++
++ DEBUGOUT("Issuing a global reset to MAC\n");
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
++
++ ret_val = e1000_get_auto_rd_done_generic(hw);
++ if (ret_val) {
++ /*
++ * When auto config read does not complete, do not
++ * return with an error. This can happen in situations
++ * where there is no eeprom and prevents getting link.
++ */
++ DEBUGOUT("Auto Read Done did not complete\n");
++ }
++
++ /* If EEPROM is not present, run manual init scripts */
++ if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
++ e1000_reset_init_script_82575(hw);
++
++ /* Clear any pending interrupt events. */
++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
++ icr = E1000_READ_REG(hw, E1000_ICR);
++
++ /* Install any alternate MAC address into RAR0 */
++ ret_val = e1000_check_alt_mac_addr_generic(hw);
++
++ return ret_val;
++}
++
++/**
++ * e1000_init_hw_82575 - Initialize hardware
++ * @hw: pointer to the HW structure
++ *
++ * This inits the hardware readying it for operation.
++ **/
++static s32 e1000_init_hw_82575(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ s32 ret_val;
++ u16 i, rar_count = mac->rar_entry_count;
++
++ DEBUGFUNC("e1000_init_hw_82575");
++
++ /* Initialize identification LED */
++ ret_val = mac->ops.id_led_init(hw);
++ if (ret_val) {
++ DEBUGOUT("Error initializing identification LED\n");
++ /* This is not fatal and we should not stop init due to this */
++ }
++
++ /* Disabling VLAN filtering */
++ DEBUGOUT("Initializing the IEEE VLAN\n");
++ mac->ops.clear_vfta(hw);
++
++ /* Setup the receive address */
++ e1000_init_rx_addrs_generic(hw, rar_count);
++
++ /* Zero out the Multicast HASH table */
++ DEBUGOUT("Zeroing the MTA\n");
++ for (i = 0; i < mac->mta_reg_count; i++)
++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
++
++ /* Zero out the Unicast HASH table */
++ DEBUGOUT("Zeroing the UTA\n");
++ for (i = 0; i < mac->uta_reg_count; i++)
++ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
++
++ /* Setup link and flow control */
++ ret_val = mac->ops.setup_link(hw);
++
++ /*
++ * Clear all of the statistics registers (clear on read). It is
++ * important that we do this after we have tried to establish link
++ * because the symbol error count will increment wildly if there
++ * is no link.
++ */
++ e1000_clear_hw_cntrs_82575(hw);
++
++ return ret_val;
++}
++
++/**
++ * e1000_setup_copper_link_82575 - Configure copper link settings
++ * @hw: pointer to the HW structure
++ *
++ * Configures the link for auto-neg or forced speed and duplex. Then we check
++ * for link, once link is established calls to configure collision distance
++ * and flow control are called.
++ **/
++static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
++{
++ u32 ctrl;
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_setup_copper_link_82575");
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl |= E1000_CTRL_SLU;
++ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++
++ ret_val = e1000_setup_serdes_link_82575(hw);
++ if (ret_val)
++ goto out;
++
++ if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
++ ret_val = hw->phy.ops.reset(hw);
++ if (ret_val) {
++ DEBUGOUT("Error resetting the PHY.\n");
++ goto out;
++ }
++ }
++ switch (hw->phy.type) {
++ case e1000_phy_m88:
++ ret_val = e1000_copper_link_setup_m88(hw);
++ break;
++ case e1000_phy_igp_3:
++ ret_val = e1000_copper_link_setup_igp(hw);
++ break;
++ default:
++ ret_val = -E1000_ERR_PHY;
++ break;
++ }
++
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_setup_copper_link_generic(hw);
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_setup_serdes_link_82575 - Setup link for serdes
++ * @hw: pointer to the HW structure
++ *
++ * Configure the physical coding sub-layer (PCS) link. The PCS link is
++ * used on copper connections where the serialized gigabit media independent
++ * interface (sgmii), or serdes fiber is being used. Configures the link
++ * for auto-negotiation or forces speed/duplex.
++ **/
++static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
++{
++ u32 ctrl_reg, reg;
++
++ DEBUGFUNC("e1000_setup_serdes_link_82575");
++
++ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
++ !e1000_sgmii_active_82575(hw))
++ return E1000_SUCCESS;
++
++ /*
++ * On the 82575, SerDes loopback mode persists until it is
++ * explicitly turned off or a power cycle is performed. A read to
++ * the register does not indicate its status. Therefore, we ensure
++ * loopback mode is disabled during initialization.
++ */
++ E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
++
++ /* power on the sfp cage if present */
++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ reg &= ~E1000_CTRL_EXT_SDP3_DATA;
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
++
++ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl_reg |= E1000_CTRL_SLU;
++
++ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
++ /* set both sw defined pins */
++ ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
++
++ /* Set switch control to serdes energy detect */
++ reg = E1000_READ_REG(hw, E1000_CONNSW);
++ reg |= E1000_CONNSW_ENRGSRC;
++ E1000_WRITE_REG(hw, E1000_CONNSW, reg);
++ }
++
++ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
++
++ if (e1000_sgmii_active_82575(hw)) {
++ /* allow time for SFP cage to power up phy */
++ msec_delay(300);
++
++ /* AN time out should be disabled for SGMII mode */
++ reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
++ } else {
++ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
++ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
++ }
++
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
++
++ /*
++ * New SerDes mode allows for forcing speed or autonegotiating speed
++ * at 1gb. Autoneg should be default set by most drivers. This is the
++ * mode that will be compatible with older link partners and switches.
++ * However, both are supported by the hardware and some drivers/tools.
++ */
++
++ reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
++ E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
++
++ /*
++ * We force flow control to prevent the CTRL register values from being
++ * overwritten by the autonegotiated flow control values
++ */
++ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
++
++ /*
++ * we always set sgmii to autoneg since it is the phy that will be
++ * forcing the link and the serdes is just a go-between
++ */
++ if (hw->mac.autoneg || e1000_sgmii_active_82575(hw)) {
++ /* Set PCS register for autoneg */
++ reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
++ E1000_PCS_LCTL_FDV_FULL | /* SerDes Full dplx */
++ E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
++ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
++ DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
++ } else {
++ /* Check for duplex first */
++ if (hw->mac.forced_speed_duplex & E1000_ALL_FULL_DUPLEX)
++ reg |= E1000_PCS_LCTL_FDV_FULL;
++
++ /* No need to check for 1000/full since the spec states that
++ * it requires autoneg to be enabled */
++ /* Now set speed */
++ if (hw->mac.forced_speed_duplex & E1000_ALL_100_SPEED)
++ reg |= E1000_PCS_LCTL_FSV_100;
++
++ /* Force speed and force link */
++ reg |= E1000_PCS_LCTL_FSD |
++ E1000_PCS_LCTL_FORCE_LINK |
++ E1000_PCS_LCTL_FLV_LINK_UP;
++
++ DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
++ }
++
++ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
++
++ if (!e1000_sgmii_active_82575(hw))
++ e1000_force_mac_fc_generic(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_valid_led_default_82575 - Verify a valid default LED config
++ * @hw: pointer to the HW structure
++ * @data: pointer to the NVM (EEPROM)
++ *
++ * Read the EEPROM for the current default LED configuration. If the
++ * LED configuration is not valid, set to a valid LED configuration.
++ **/
++static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
++{
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_valid_led_default_82575");
++
++ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ goto out;
++ }
++
++ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
++ switch(hw->phy.media_type) {
++ case e1000_media_type_internal_serdes:
++ *data = ID_LED_DEFAULT_82575_SERDES;
++ break;
++ case e1000_media_type_copper:
++ default:
++ *data = ID_LED_DEFAULT;
++ break;
++ }
++ }
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_sgmii_active_82575 - Return sgmii state
++ * @hw: pointer to the HW structure
++ *
++ * 82575 silicon has a serialized gigabit media independent interface (sgmii)
++ * which can be enabled for use in the embedded applications. Simply
++ * return the current state of the sgmii interface.
++ **/
++static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
++{
++ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
++ return dev_spec->sgmii_active;
++}
++
++/**
++ * e1000_reset_init_script_82575 - Inits HW defaults after reset
++ * @hw: pointer to the HW structure
++ *
++ * Inits recommended HW defaults after a reset when there is no EEPROM
++ * detected. This is only for the 82575.
++ **/
++static s32 e1000_reset_init_script_82575(struct e1000_hw* hw)
++{
++ DEBUGFUNC("e1000_reset_init_script_82575");
++
++ if (hw->mac.type == e1000_82575) {
++ DEBUGOUT("Running reset init script for 82575\n");
++ /* SerDes configuration via SERDESCTRL */
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
++
++ /* CCM configuration via CCMCTL register */
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
++
++ /* PCIe lanes configuration */
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
++
++ /* PCIe PLL Configuration */
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_mac_addr_82575 - Read device MAC address
++ * @hw: pointer to the HW structure
++ **/
++static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_read_mac_addr_82575");
++
++ /*
++ * If there's an alternate MAC address place it in RAR0
++ * so that it will override the Si installed default perm
++ * address.
++ */
++ ret_val = e1000_check_alt_mac_addr_generic(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_read_mac_addr_generic(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
++ * @hw: pointer to the HW structure
++ *
++ * In the case of a PHY power down to save power, or to turn off link during a
++ * driver unload, or wake on lan is not enabled, remove the link.
++ **/
++static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ struct e1000_mac_info *mac = &hw->mac;
++
++ if (!(phy->ops.check_reset_block))
++ return;
++
++ /* If the management interface is not enabled, then power down */
++ if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
++ e1000_power_down_phy_copper(hw);
++
++ return;
++}
++
++/**
++ * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
++ * @hw: pointer to the HW structure
++ *
++ * Clears the hardware counters by reading the counter registers.
++ **/
++static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
++{
++ DEBUGFUNC("e1000_clear_hw_cntrs_82575");
++
++ e1000_clear_hw_cntrs_base_generic(hw);
++
++ E1000_READ_REG(hw, E1000_PRC64);
++ E1000_READ_REG(hw, E1000_PRC127);
++ E1000_READ_REG(hw, E1000_PRC255);
++ E1000_READ_REG(hw, E1000_PRC511);
++ E1000_READ_REG(hw, E1000_PRC1023);
++ E1000_READ_REG(hw, E1000_PRC1522);
++ E1000_READ_REG(hw, E1000_PTC64);
++ E1000_READ_REG(hw, E1000_PTC127);
++ E1000_READ_REG(hw, E1000_PTC255);
++ E1000_READ_REG(hw, E1000_PTC511);
++ E1000_READ_REG(hw, E1000_PTC1023);
++ E1000_READ_REG(hw, E1000_PTC1522);
++
++ E1000_READ_REG(hw, E1000_ALGNERRC);
++ E1000_READ_REG(hw, E1000_RXERRC);
++ E1000_READ_REG(hw, E1000_TNCRS);
++ E1000_READ_REG(hw, E1000_CEXTERR);
++ E1000_READ_REG(hw, E1000_TSCTC);
++ E1000_READ_REG(hw, E1000_TSCTFC);
++
++ E1000_READ_REG(hw, E1000_MGTPRC);
++ E1000_READ_REG(hw, E1000_MGTPDC);
++ E1000_READ_REG(hw, E1000_MGTPTC);
++
++ E1000_READ_REG(hw, E1000_IAC);
++ E1000_READ_REG(hw, E1000_ICRXOC);
++
++ E1000_READ_REG(hw, E1000_ICRXPTC);
++ E1000_READ_REG(hw, E1000_ICRXATC);
++ E1000_READ_REG(hw, E1000_ICTXPTC);
++ E1000_READ_REG(hw, E1000_ICTXATC);
++ E1000_READ_REG(hw, E1000_ICTXQEC);
++ E1000_READ_REG(hw, E1000_ICTXQMTC);
++ E1000_READ_REG(hw, E1000_ICRXDMTC);
++
++ E1000_READ_REG(hw, E1000_CBTMPC);
++ E1000_READ_REG(hw, E1000_HTDPMC);
++ E1000_READ_REG(hw, E1000_CBRMPC);
++ E1000_READ_REG(hw, E1000_RPTHC);
++ E1000_READ_REG(hw, E1000_HGPTC);
++ E1000_READ_REG(hw, E1000_HTCBDPC);
++ E1000_READ_REG(hw, E1000_HGORCL);
++ E1000_READ_REG(hw, E1000_HGORCH);
++ E1000_READ_REG(hw, E1000_HGOTCL);
++ E1000_READ_REG(hw, E1000_HGOTCH);
++ E1000_READ_REG(hw, E1000_LENERRS);
++
++ /* This register should not be read in copper configurations */
++ if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
++ e1000_sgmii_active_82575(hw))
++ E1000_READ_REG(hw, E1000_SCVPC);
++}
++
++/**
++ * e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable
++ * @hw: pointer to the HW structure
++ *
++ * After rx enable if managability is enabled then there is likely some
++ * bad data at the start of the fifo and possibly in the DMA fifo. This
++ * function clears the fifos and flushes any packets that came in as rx was
++ * being enabled.
++ **/
++void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
++{
++ u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
++ int i, ms_wait;
++
++ DEBUGFUNC("e1000_rx_fifo_workaround_82575");
++ if (hw->mac.type != e1000_82575 ||
++ !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
++ return;
++
++ /* Disable all RX queues */
++ for (i = 0; i < 4; i++) {
++ rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
++ E1000_WRITE_REG(hw, E1000_RXDCTL(i),
++ rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
++ }
++ /* Poll all queues to verify they have shut down */
++ for (ms_wait = 0; ms_wait < 10; ms_wait++) {
++ msec_delay(1);
++ rx_enabled = 0;
++ for (i = 0; i < 4; i++)
++ rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
++ if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
++ break;
++ }
++
++ if (ms_wait == 10)
++ DEBUGOUT("Queue disable timed out after 10ms\n");
++
++ /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
++ * incoming packets are rejected. Set enable and wait 2ms so that
++ * any packet that was coming in as RCTL.EN was set is flushed
++ */
++ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
++ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
++
++ rlpml = E1000_READ_REG(hw, E1000_RLPML);
++ E1000_WRITE_REG(hw, E1000_RLPML, 0);
++
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
++ temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
++ temp_rctl |= E1000_RCTL_LPE;
++
++ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
++ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
++ E1000_WRITE_FLUSH(hw);
++ msec_delay(2);
++
++ /* Enable RX queues that were previously enabled and restore our
++ * previous state
++ */
++ for (i = 0; i < 4; i++)
++ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
++ E1000_WRITE_FLUSH(hw);
++
++ E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
++ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
++
++ /* Flush receive errors generated by workaround */
++ E1000_READ_REG(hw, E1000_ROC);
++ E1000_READ_REG(hw, E1000_RNBC);
++ E1000_READ_REG(hw, E1000_MPC);
++}
++
++/**
++ * e1000_set_pcie_completion_timeout - set pci-e completion timeout
++ * @hw: pointer to the HW structure
++ *
++ * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
++ * however the hardware default for these parts is 500us to 1ms which is less
++ * than the 10ms recommended by the pci-e spec. To address this we need to
++ * increase the value to either 10ms to 200ms for capability version 1 config,
++ * or 16ms to 55ms for version 2.
++ **/
++static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
++{
++ u32 gcr = E1000_READ_REG(hw, E1000_GCR);
++ s32 ret_val = E1000_SUCCESS;
++ u16 pcie_devctl2;
++
++ /* only take action if timeout value is defaulted to 0 */
++ if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
++ goto out;
++
++ /*
++ * if capababilities version is type 1 we can write the
++ * timeout of 10ms to 200ms through the GCR register
++ */
++ if (!(gcr & E1000_GCR_CAP_VER2)) {
++ gcr |= E1000_GCR_CMPL_TMOUT_10ms;
++ goto out;
++ }
++
++ /*
++ * for version 2 capabilities we need to write the config space
++ * directly in order to set the completion timeout value for
++ * 16ms to 55ms
++ */
++ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
++ &pcie_devctl2);
++ if (ret_val)
++ goto out;
++
++ pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
++
++ ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
++ &pcie_devctl2);
++out:
++ /* disable completion timeout resend */
++ gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
++
++ E1000_WRITE_REG(hw, E1000_GCR, gcr);
++ return ret_val;
++}
++
++/**
++ * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
++ * @hw: pointer to the hardware struct
++ * @enable: state to enter, either enabled or disabled
++ *
++ * enables/disables L2 switch loopback functionality.
++ **/
++void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
++{
++ u32 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
++
++ if (enable)
++ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
++ else
++ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
++
++ E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
++}
++
++/**
++ * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
++ * @hw: pointer to the hardware struct
++ * @enable: state to enter, either enabled or disabled
++ *
++ * enables/disables replication of packets across multiple pools.
++ **/
++void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
++{
++ u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
++
++ if (enable)
++ vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
++ else
++ vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
++
++ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
++}
++
+Index: linux-2.6.22/drivers/net/igb/e1000_82575.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_82575.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,439 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_82575_H_
++#define _E1000_82575_H_
++
++#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
++ (ID_LED_DEF1_DEF2 << 8) | \
++ (ID_LED_DEF1_DEF2 << 4) | \
++ (ID_LED_OFF1_ON2))
++/*
++ * Receive Address Register Count
++ * Number of high/low register pairs in the RAR. The RAR (Receive Address
++ * Registers) holds the directed and multicast addresses that we monitor.
++ * These entries are also used for MAC-based filtering.
++ */
++/*
++ * For 82576, there are an additional set of RARs that begin at an offset
++ * separate from the first set of RARs.
++ */
++#define E1000_RAR_ENTRIES_82575 16
++#define E1000_RAR_ENTRIES_82576 24
++
++struct e1000_adv_data_desc {
++ __le64 buffer_addr; /* Address of the descriptor's data buffer */
++ union {
++ u32 data;
++ struct {
++ u32 datalen :16; /* Data buffer length */
++ u32 rsvd :4;
++ u32 dtyp :4; /* Descriptor type */
++ u32 dcmd :8; /* Descriptor command */
++ } config;
++ } lower;
++ union {
++ u32 data;
++ struct {
++ u32 status :4; /* Descriptor status */
++ u32 idx :4;
++ u32 popts :6; /* Packet Options */
++ u32 paylen :18; /* Payload length */
++ } options;
++ } upper;
++};
++
++#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */
++#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */
++#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */
++#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */
++#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */
++#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */
++#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */
++#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */
++#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */
++#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */
++#define E1000_ADV_DCMD_RS 0x8 /* Report Status */
++#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */
++#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */
++/* Extended Device Control */
++#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */
++
++struct e1000_adv_context_desc {
++ union {
++ u32 ip_config;
++ struct {
++ u32 iplen :9;
++ u32 maclen :7;
++ u32 vlan_tag :16;
++ } fields;
++ } ip_setup;
++ u32 seq_num;
++ union {
++ u64 l4_config;
++ struct {
++ u32 mkrloc :9;
++ u32 tucmd :11;
++ u32 dtyp :4;
++ u32 adv :8;
++ u32 rsvd :4;
++ u32 idx :4;
++ u32 l4len :8;
++ u32 mss :16;
++ } fields;
++ } l4_setup;
++};
++
++/* SRRCTL bit definitions */
++#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
++#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
++#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
++#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
++#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
++#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
++#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
++#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
++#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
++#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
++#define E1000_SRRCTL_DROP_EN 0x80000000
++
++#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
++#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
++
++#define E1000_TX_HEAD_WB_ENABLE 0x1
++#define E1000_TX_SEQNUM_WB_ENABLE 0x2
++
++#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
++#define E1000_MRQC_ENABLE_VMDQ 0x00000003
++#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
++#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
++#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
++#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
++
++#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8
++#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << E1000_VMRCTL_MIRROR_PORT_SHIFT)
++#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0)
++#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1)
++#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
++
++#define E1000_EICR_TX_QUEUE ( \
++ E1000_EICR_TX_QUEUE0 | \
++ E1000_EICR_TX_QUEUE1 | \
++ E1000_EICR_TX_QUEUE2 | \
++ E1000_EICR_TX_QUEUE3)
++
++#define E1000_EICR_RX_QUEUE ( \
++ E1000_EICR_RX_QUEUE0 | \
++ E1000_EICR_RX_QUEUE1 | \
++ E1000_EICR_RX_QUEUE2 | \
++ E1000_EICR_RX_QUEUE3)
++
++#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
++#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
++
++#define EIMS_ENABLE_MASK ( \
++ E1000_EIMS_RX_QUEUE | \
++ E1000_EIMS_TX_QUEUE | \
++ E1000_EIMS_TCP_TIMER | \
++ E1000_EIMS_OTHER)
++
++/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
++#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
++#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
++#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
++#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
++#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
++#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
++#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
++#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
++#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
++#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
++
++/* Receive Descriptor - Advanced */
++union e1000_adv_rx_desc {
++ struct {
++ __le64 pkt_addr; /* Packet buffer address */
++ __le64 hdr_addr; /* Header buffer address */
++ } read;
++ struct {
++ struct {
++ union {
++ __le32 data;
++ struct {
++ __le16 pkt_info; /*RSS type, Pkt type*/
++ __le16 hdr_info; /* Split Header,
++ * header buffer len*/
++ } hs_rss;
++ } lo_dword;
++ union {
++ __le32 rss; /* RSS Hash */
++ struct {
++ __le16 ip_id; /* IP id */
++ __le16 csum; /* Packet Checksum */
++ } csum_ip;
++ } hi_dword;
++ } lower;
++ struct {
++ __le32 status_error; /* ext status/error */
++ __le16 length; /* Packet length */
++ __le16 vlan; /* VLAN tag */
++ } upper;
++ } wb; /* writeback */
++};
++
++#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F
++#define E1000_RXDADV_RSSTYPE_SHIFT 12
++#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
++#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
++#define E1000_RXDADV_SPLITHEADER_EN 0x00001000
++#define E1000_RXDADV_SPH 0x8000
++#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
++#define E1000_RXDADV_ERR_HBO 0x00800000
++
++/* RSS Hash results */
++#define E1000_RXDADV_RSSTYPE_NONE 0x00000000
++#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
++#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002
++#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
++#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004
++#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005
++#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
++#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
++#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
++#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
++
++/* RSS Packet Types as indicated in the receive descriptor */
++#define E1000_RXDADV_PKTTYPE_NONE 0x00000000
++#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
++#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */
++#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */
++#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */
++#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
++#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
++#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
++#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
++
++#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
++#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
++#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
++#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
++#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
++#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
++
++/* LinkSec results */
++/* Security Processing bit Indication */
++#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000
++#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
++#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
++#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
++#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
++
++#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000
++#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
++#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
++#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
++#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
++
++/* Transmit Descriptor - Advanced */
++union e1000_adv_tx_desc {
++ struct {
++ __le64 buffer_addr; /* Address of descriptor's data buf */
++ __le32 cmd_type_len;
++ __le32 olinfo_status;
++ } read;
++ struct {
++ __le64 rsvd; /* Reserved */
++ __le32 nxtseq_seed;
++ __le32 status;
++ } wb;
++};
++
++/* Adv Transmit Descriptor Config Masks */
++#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
++#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
++#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
++#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
++#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
++#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
++#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
++#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
++#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
++#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on packet */
++#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
++#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */
++#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
++#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
++#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
++#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
++#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/
++#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
++#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
++
++/* Context descriptors */
++struct e1000_adv_tx_context_desc {
++ __le32 vlan_macip_lens;
++ __le32 seqnum_seed;
++ __le32 type_tucmd_mlhl;
++ __le32 mss_l4len_idx;
++};
++
++#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
++#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
++#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
++#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
++#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
++#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
++#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
++#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
++/* IPSec Encrypt Enable for ESP */
++#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
++#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */
++#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
++#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
++/* Adv ctxt IPSec SA IDX mask */
++#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF
++/* Adv ctxt IPSec ESP len mask */
++#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF
++
++/* Additional Transmit Descriptor Control definitions */
++#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
++#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */
++/* Tx Queue Arbitration Priority 0=low, 1=high */
++#define E1000_TXDCTL_PRIORITY 0x08000000
++
++/* Additional Receive Descriptor Control definitions */
++#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
++#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */
++
++/* Direct Cache Access (DCA) definitions */
++#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
++#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
++
++#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
++#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
++
++#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
++#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
++#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
++#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
++
++#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
++#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
++#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
++
++#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
++#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
++#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
++#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
++
++/* Additional interrupt register bit definitions */
++#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */
++#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
++#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
++
++/* ETQF register bit definitions */
++#define E1000_ETQF_FILTER_ENABLE (1 << 26)
++#define E1000_ETQF_IMM_INT (1 << 29)
++#define E1000_ETQF_1588 (1 << 30)
++#define E1000_ETQF_QUEUE_ENABLE (1 << 31)
++/*
++ * ETQF filter list: one static filter per filter consumer. This is
++ * to avoid filter collisions later. Add new filters
++ * here!!
++ *
++ * Current filters:
++ * EAPOL 802.1x (0x888e): Filter 0
++ */
++#define E1000_ETQF_FILTER_EAPOL 0
++
++#define E1000_FTQF_VF_BP 0x00008000
++#define E1000_FTQF_1588_TIME_STAMP 0x08000000
++#define E1000_FTQF_MASK 0xF0000000
++#define E1000_FTQF_MASK_PROTO_BP 0x10000000
++#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
++#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000
++#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
++
++#define E1000_NVM_APME_82575 0x0400
++#define MAX_NUM_VFS 8
++
++#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */
++#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
++#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
++#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
++#define E1000_DTXSWC_LLE_SHIFT 16
++#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
++
++/* Easy defines for setting default pool, would normally be left a zero */
++#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
++#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
++
++/* Other useful VMD_CTL register defines */
++#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
++#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
++#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
++
++/* Per VM Offload register setup */
++#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
++#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
++#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
++#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
++#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
++#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
++#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
++#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
++#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
++#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
++
++#define E1000_VLVF_ARRAY_SIZE 32
++#define E1000_VLVF_VLANID_MASK 0x00000FFF
++#define E1000_VLVF_POOLSEL_SHIFT 12
++#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
++#define E1000_VLVF_LVLAN 0x00100000
++#define E1000_VLVF_VLANID_ENABLE 0x80000000
++
++#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
++
++#define E1000_IOVCTL 0x05BBC
++#define E1000_IOVCTL_REUSE_VFQ 0x00000001
++
++#define E1000_RPLOLR_STRVLAN 0x40000000
++#define E1000_RPLOLR_STRCRC 0x80000000
++
++#define E1000_DTXCTL_8023LL 0x0004
++#define E1000_DTXCTL_VLAN_ADDED 0x0008
++#define E1000_DTXCTL_OOS_ENABLE 0x0010
++#define E1000_DTXCTL_MDP_EN 0x0020
++#define E1000_DTXCTL_SPOOF_INT 0x0040
++
++#define ALL_QUEUES 0xFFFF
++
++/* RX packet buffer size defines */
++#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
++void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
++void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
++#endif /* _E1000_82575_H_ */
+Index: linux-2.6.22/drivers/net/igb/e1000_api.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_api.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,1096 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "e1000_api.h"
++
++/**
++ * e1000_init_mac_params - Initialize MAC function pointers
++ * @hw: pointer to the HW structure
++ *
++ * This function initializes the function pointers for the MAC
++ * set of functions. Called by drivers or by e1000_setup_init_funcs.
++ **/
++s32 e1000_init_mac_params(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ if (hw->mac.ops.init_params) {
++ ret_val = hw->mac.ops.init_params(hw);
++ if (ret_val) {
++ DEBUGOUT("MAC Initialization Error\n");
++ goto out;
++ }
++ } else {
++ DEBUGOUT("mac.init_mac_params was NULL\n");
++ ret_val = -E1000_ERR_CONFIG;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_init_nvm_params - Initialize NVM function pointers
++ * @hw: pointer to the HW structure
++ *
++ * This function initializes the function pointers for the NVM
++ * set of functions. Called by drivers or by e1000_setup_init_funcs.
++ **/
++s32 e1000_init_nvm_params(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ if (hw->nvm.ops.init_params) {
++ ret_val = hw->nvm.ops.init_params(hw);
++ if (ret_val) {
++ DEBUGOUT("NVM Initialization Error\n");
++ goto out;
++ }
++ } else {
++ DEBUGOUT("nvm.init_nvm_params was NULL\n");
++ ret_val = -E1000_ERR_CONFIG;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_init_phy_params - Initialize PHY function pointers
++ * @hw: pointer to the HW structure
++ *
++ * This function initializes the function pointers for the PHY
++ * set of functions. Called by drivers or by e1000_setup_init_funcs.
++ **/
++s32 e1000_init_phy_params(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ if (hw->phy.ops.init_params) {
++ ret_val = hw->phy.ops.init_params(hw);
++ if (ret_val) {
++ DEBUGOUT("PHY Initialization Error\n");
++ goto out;
++ }
++ } else {
++ DEBUGOUT("phy.init_phy_params was NULL\n");
++ ret_val = -E1000_ERR_CONFIG;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_init_mbx_params - Initialize mailbox function pointers
++ * @hw: pointer to the HW structure
++ *
++ * This function initializes the function pointers for the PHY
++ * set of functions. Called by drivers or by e1000_setup_init_funcs.
++ **/
++s32 e1000_init_mbx_params(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ if (hw->mbx.ops.init_params) {
++ ret_val = hw->mbx.ops.init_params(hw);
++ if (ret_val) {
++ DEBUGOUT("Mailbox Initialization Error\n");
++ goto out;
++ }
++ } else {
++ DEBUGOUT("mbx.init_mbx_params was NULL\n");
++ ret_val = -E1000_ERR_CONFIG;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_set_mac_type - Sets MAC type
++ * @hw: pointer to the HW structure
++ *
++ * This function sets the mac type of the adapter based on the
++ * device ID stored in the hw structure.
++ * MUST BE FIRST FUNCTION CALLED (explicitly or through
++ * e1000_setup_init_funcs()).
++ **/
++s32 e1000_set_mac_type(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_set_mac_type");
++
++ switch (hw->device_id) {
++ case E1000_DEV_ID_82575EB_COPPER:
++ case E1000_DEV_ID_82575EB_FIBER_SERDES:
++ case E1000_DEV_ID_82575GB_QUAD_COPPER:
++ mac->type = e1000_82575;
++ break;
++ case E1000_DEV_ID_82576:
++ case E1000_DEV_ID_82576_FIBER:
++ case E1000_DEV_ID_82576_SERDES:
++ case E1000_DEV_ID_82576_QUAD_COPPER:
++ case E1000_DEV_ID_82576_NS:
++ case E1000_DEV_ID_82576_NS_SERDES:
++ case E1000_DEV_ID_82576_SERDES_QUAD:
++ mac->type = e1000_82576;
++ break;
++ default:
++ /* Should never have loaded on this device */
++ ret_val = -E1000_ERR_MAC_INIT;
++ break;
++ }
++
++ return ret_val;
++}
++
++/**
++ * e1000_setup_init_funcs - Initializes function pointers
++ * @hw: pointer to the HW structure
++ * @init_device: true will initialize the rest of the function pointers
++ * getting the device ready for use. false will only set
++ * MAC type and the function pointers for the other init
++ * functions. Passing false will not generate any hardware
++ * reads or writes.
++ *
++ * This function must be called by a driver in order to use the rest
++ * of the 'shared' code files. Called by drivers only.
++ **/
++s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
++{
++ s32 ret_val;
++
++ /* Can't do much good without knowing the MAC type. */
++ ret_val = e1000_set_mac_type(hw);
++ if (ret_val) {
++ DEBUGOUT("ERROR: MAC type could not be set properly.\n");
++ goto out;
++ }
++
++ if (!hw->hw_addr) {
++ DEBUGOUT("ERROR: Registers not mapped\n");
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ }
++
++ /*
++ * Init function pointers to generic implementations. We do this first
++ * allowing a driver module to override it afterward.
++ */
++ e1000_init_mac_ops_generic(hw);
++ e1000_init_nvm_ops_generic(hw);
++ e1000_init_mbx_ops_generic(hw);
++
++ /*
++ * Set up the init function pointers. These are functions within the
++ * adapter family file that sets up function pointers for the rest of
++ * the functions in that family.
++ */
++ switch (hw->mac.type) {
++ case e1000_82575:
++ case e1000_82576:
++ e1000_init_function_pointers_82575(hw);
++ break;
++ default:
++ DEBUGOUT("Hardware not supported\n");
++ ret_val = -E1000_ERR_CONFIG;
++ break;
++ }
++
++ /*
++ * Initialize the rest of the function pointers. These require some
++ * register reads/writes in some cases.
++ */
++ if (!(ret_val) && init_device) {
++ ret_val = e1000_init_mac_params(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_init_nvm_params(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_init_phy_params(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_init_mbx_params(hw);
++ if (ret_val)
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_get_bus_info - Obtain bus information for adapter
++ * @hw: pointer to the HW structure
++ *
++ * This will obtain information about the HW bus for which the
++ * adapter is attached and stores it in the hw structure. This is a
++ * function pointer entry point called by drivers.
++ **/
++s32 e1000_get_bus_info(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.get_bus_info)
++ return hw->mac.ops.get_bus_info(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_clear_vfta - Clear VLAN filter table
++ * @hw: pointer to the HW structure
++ *
++ * This clears the VLAN filter table on the adapter. This is a function
++ * pointer entry point called by drivers.
++ **/
++void e1000_clear_vfta(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.clear_vfta)
++ hw->mac.ops.clear_vfta(hw);
++}
++
++/**
++ * e1000_write_vfta - Write value to VLAN filter table
++ * @hw: pointer to the HW structure
++ * @offset: the 32-bit offset in which to write the value to.
++ * @value: the 32-bit value to write at location offset.
++ *
++ * This writes a 32-bit value to a 32-bit offset in the VLAN filter
++ * table. This is a function pointer entry point called by drivers.
++ **/
++void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
++{
++ if (hw->mac.ops.write_vfta)
++ hw->mac.ops.write_vfta(hw, offset, value);
++}
++
++/**
++ * e1000_update_mc_addr_list - Update Multicast addresses
++ * @hw: pointer to the HW structure
++ * @mc_addr_list: array of multicast addresses to program
++ * @mc_addr_count: number of multicast addresses to program
++ *
++ * Updates the Multicast Table Array.
++ * The caller must have a packed mc_addr_list of multicast addresses.
++ **/
++void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
++ u32 mc_addr_count)
++{
++ if (hw->mac.ops.update_mc_addr_list)
++ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
++ mc_addr_count);
++}
++
++/**
++ * e1000_force_mac_fc - Force MAC flow control
++ * @hw: pointer to the HW structure
++ *
++ * Force the MAC's flow control settings. Currently no func pointer exists
++ * and all implementations are handled in the generic version of this
++ * function.
++ **/
++s32 e1000_force_mac_fc(struct e1000_hw *hw)
++{
++ return e1000_force_mac_fc_generic(hw);
++}
++
++/**
++ * e1000_check_for_link - Check/Store link connection
++ * @hw: pointer to the HW structure
++ *
++ * This checks the link condition of the adapter and stores the
++ * results in the hw->mac structure. This is a function pointer entry
++ * point called by drivers.
++ **/
++s32 e1000_check_for_link(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.check_for_link)
++ return hw->mac.ops.check_for_link(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_check_mng_mode - Check management mode
++ * @hw: pointer to the HW structure
++ *
++ * This checks if the adapter has manageability enabled.
++ * This is a function pointer entry point called by drivers.
++ **/
++bool e1000_check_mng_mode(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.check_mng_mode)
++ return hw->mac.ops.check_mng_mode(hw);
++
++ return false;
++}
++
++/**
++ * e1000_mng_write_dhcp_info - Writes DHCP info to host interface
++ * @hw: pointer to the HW structure
++ * @buffer: pointer to the host interface
++ * @length: size of the buffer
++ *
++ * Writes the DHCP information to the host interface.
++ **/
++s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
++{
++ return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
++}
++
++/**
++ * e1000_reset_hw - Reset hardware
++ * @hw: pointer to the HW structure
++ *
++ * This resets the hardware into a known state. This is a function pointer
++ * entry point called by drivers.
++ **/
++s32 e1000_reset_hw(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.reset_hw)
++ return hw->mac.ops.reset_hw(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_init_hw - Initialize hardware
++ * @hw: pointer to the HW structure
++ *
++ * This inits the hardware readying it for operation. This is a function
++ * pointer entry point called by drivers.
++ **/
++s32 e1000_init_hw(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.init_hw)
++ return hw->mac.ops.init_hw(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_setup_link - Configures link and flow control
++ * @hw: pointer to the HW structure
++ *
++ * This configures link and flow control settings for the adapter. This
++ * is a function pointer entry point called by drivers. While modules can
++ * also call this, they probably call their own version of this function.
++ **/
++s32 e1000_setup_link(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.setup_link)
++ return hw->mac.ops.setup_link(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_get_speed_and_duplex - Returns current speed and duplex
++ * @hw: pointer to the HW structure
++ * @speed: pointer to a 16-bit value to store the speed
++ * @duplex: pointer to a 16-bit value to store the duplex.
++ *
++ * This returns the speed and duplex of the adapter in the two 'out'
++ * variables passed in. This is a function pointer entry point called
++ * by drivers.
++ **/
++s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
++{
++ if (hw->mac.ops.get_link_up_info)
++ return hw->mac.ops.get_link_up_info(hw, speed, duplex);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_setup_led - Configures SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * This prepares the SW controllable LED for use and saves the current state
++ * of the LED so it can be later restored. This is a function pointer entry
++ * point called by drivers.
++ **/
++s32 e1000_setup_led(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.setup_led)
++ return hw->mac.ops.setup_led(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_cleanup_led - Restores SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * This restores the SW controllable LED to the value saved off by
++ * e1000_setup_led. This is a function pointer entry point called by drivers.
++ **/
++s32 e1000_cleanup_led(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.cleanup_led)
++ return hw->mac.ops.cleanup_led(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_blink_led - Blink SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * This starts the adapter LED blinking. Request the LED to be setup first
++ * and cleaned up after. This is a function pointer entry point called by
++ * drivers.
++ **/
++s32 e1000_blink_led(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.blink_led)
++ return hw->mac.ops.blink_led(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_id_led_init - store LED configurations in SW
++ * @hw: pointer to the HW structure
++ *
++ * Initializes the LED config in SW. This is a function pointer entry point
++ * called by drivers.
++ **/
++s32 e1000_id_led_init(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.id_led_init)
++ return hw->mac.ops.id_led_init(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_led_on - Turn on SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * Turns the SW defined LED on. This is a function pointer entry point
++ * called by drivers.
++ **/
++s32 e1000_led_on(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.led_on)
++ return hw->mac.ops.led_on(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_led_off - Turn off SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * Turns the SW defined LED off. This is a function pointer entry point
++ * called by drivers.
++ **/
++s32 e1000_led_off(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.led_off)
++ return hw->mac.ops.led_off(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_reset_adaptive - Reset adaptive IFS
++ * @hw: pointer to the HW structure
++ *
++ * Resets the adaptive IFS. Currently no func pointer exists and all
++ * implementations are handled in the generic version of this function.
++ **/
++void e1000_reset_adaptive(struct e1000_hw *hw)
++{
++ e1000_reset_adaptive_generic(hw);
++}
++
++/**
++ * e1000_update_adaptive - Update adaptive IFS
++ * @hw: pointer to the HW structure
++ *
++ * Updates adapter IFS. Currently no func pointer exists and all
++ * implementations are handled in the generic version of this function.
++ **/
++void e1000_update_adaptive(struct e1000_hw *hw)
++{
++ e1000_update_adaptive_generic(hw);
++}
++
++/**
++ * e1000_disable_pcie_master - Disable PCI-Express master access
++ * @hw: pointer to the HW structure
++ *
++ * Disables PCI-Express master access and verifies there are no pending
++ * requests. Currently no func pointer exists and all implementations are
++ * handled in the generic version of this function.
++ **/
++s32 e1000_disable_pcie_master(struct e1000_hw *hw)
++{
++ return e1000_disable_pcie_master_generic(hw);
++}
++
++/**
++ * e1000_config_collision_dist - Configure collision distance
++ * @hw: pointer to the HW structure
++ *
++ * Configures the collision distance to the default value and is used
++ * during link setup.
++ **/
++void e1000_config_collision_dist(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.config_collision_dist)
++ hw->mac.ops.config_collision_dist(hw);
++}
++
++/**
++ * e1000_rar_set - Sets a receive address register
++ * @hw: pointer to the HW structure
++ * @addr: address to set the RAR to
++ * @index: the RAR to set
++ *
++ * Sets a Receive Address Register (RAR) to the specified address.
++ **/
++void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
++{
++ if (hw->mac.ops.rar_set)
++ hw->mac.ops.rar_set(hw, addr, index);
++}
++
++/**
++ * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
++ * @hw: pointer to the HW structure
++ *
++ * Ensures that the MDI/MDIX SW state is valid.
++ **/
++s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.validate_mdi_setting)
++ return hw->mac.ops.validate_mdi_setting(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_mta_set - Sets multicast table bit
++ * @hw: pointer to the HW structure
++ * @hash_value: Multicast hash value.
++ *
++ * This sets the bit in the multicast table corresponding to the
++ * hash value. This is a function pointer entry point called by drivers.
++ **/
++void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
++{
++ if (hw->mac.ops.mta_set)
++ hw->mac.ops.mta_set(hw, hash_value);
++}
++
++/**
++ * e1000_hash_mc_addr - Determines address location in multicast table
++ * @hw: pointer to the HW structure
++ * @mc_addr: Multicast address to hash.
++ *
++ * This hashes an address to determine its location in the multicast
++ * table. Currently no func pointer exists and all implementations
++ * are handled in the generic version of this function.
++ **/
++u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
++{
++ return e1000_hash_mc_addr_generic(hw, mc_addr);
++}
++
++/**
++ * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
++ * @hw: pointer to the HW structure
++ *
++ * Enables packet filtering on transmit packets if manageability is enabled
++ * and host interface is enabled.
++ * Currently no func pointer exists and all implementations are handled in the
++ * generic version of this function.
++ **/
++bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
++{
++ return e1000_enable_tx_pkt_filtering_generic(hw);
++}
++
++/**
++ * e1000_mng_host_if_write - Writes to the manageability host interface
++ * @hw: pointer to the HW structure
++ * @buffer: pointer to the host interface buffer
++ * @length: size of the buffer
++ * @offset: location in the buffer to write to
++ * @sum: sum of the data (not checksum)
++ *
++ * This function writes the buffer content at the offset given on the host if.
++ * It also does alignment considerations to do the writes in most efficient
++ * way. Also fills up the sum of the buffer in *buffer parameter.
++ **/
++s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length,
++ u16 offset, u8 *sum)
++{
++ if (hw->mac.ops.mng_host_if_write)
++ return hw->mac.ops.mng_host_if_write(hw, buffer, length,
++ offset, sum);
++
++ return E1000_NOT_IMPLEMENTED;
++}
++
++/**
++ * e1000_mng_write_cmd_header - Writes manageability command header
++ * @hw: pointer to the HW structure
++ * @hdr: pointer to the host interface command header
++ *
++ * Writes the command header after does the checksum calculation.
++ **/
++s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
++ struct e1000_host_mng_command_header *hdr)
++{
++ if (hw->mac.ops.mng_write_cmd_header)
++ return hw->mac.ops.mng_write_cmd_header(hw, hdr);
++
++ return E1000_NOT_IMPLEMENTED;
++}
++
++/**
++ * e1000_mng_enable_host_if - Checks host interface is enabled
++ * @hw: pointer to the HW structure
++ *
++ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
++ *
++ * This function checks whether the HOST IF is enabled for command operation
++ * and also checks whether the previous command is completed. It busy waits
++ * in case of previous command is not completed.
++ **/
++s32 e1000_mng_enable_host_if(struct e1000_hw * hw)
++{
++ if (hw->mac.ops.mng_enable_host_if)
++ return hw->mac.ops.mng_enable_host_if(hw);
++
++ return E1000_NOT_IMPLEMENTED;
++}
++
++/**
++ * e1000_wait_autoneg - Waits for autonegotiation completion
++ * @hw: pointer to the HW structure
++ *
++ * Waits for autoneg to complete. Currently no func pointer exists and all
++ * implementations are handled in the generic version of this function.
++ **/
++s32 e1000_wait_autoneg(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.wait_autoneg)
++ return hw->mac.ops.wait_autoneg(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_check_reset_block - Verifies PHY can be reset
++ * @hw: pointer to the HW structure
++ *
++ * Checks if the PHY is in a state that can be reset or if manageability
++ * has it tied up. This is a function pointer entry point called by drivers.
++ **/
++s32 e1000_check_reset_block(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.check_reset_block)
++ return hw->phy.ops.check_reset_block(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_phy_reg - Reads PHY register
++ * @hw: pointer to the HW structure
++ * @offset: the register to read
++ * @data: the buffer to store the 16-bit read.
++ *
++ * Reads the PHY register and returns the value in data.
++ * This is a function pointer entry point called by drivers.
++ **/
++s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ if (hw->phy.ops.read_reg)
++ return hw->phy.ops.read_reg(hw, offset, data);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_write_phy_reg - Writes PHY register
++ * @hw: pointer to the HW structure
++ * @offset: the register to write
++ * @data: the value to write.
++ *
++ * Writes the PHY register at offset with the value in data.
++ * This is a function pointer entry point called by drivers.
++ **/
++s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ if (hw->phy.ops.write_reg)
++ return hw->phy.ops.write_reg(hw, offset, data);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_release_phy - Generic release PHY
++ * @hw: pointer to the HW structure
++ *
++ * Return if silicon family does not require a semaphore when accessing the
++ * PHY.
++ **/
++void e1000_release_phy(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.release)
++ hw->phy.ops.release(hw);
++}
++
++/**
++ * e1000_acquire_phy - Generic acquire PHY
++ * @hw: pointer to the HW structure
++ *
++ * Return success if silicon family does not require a semaphore when
++ * accessing the PHY.
++ **/
++s32 e1000_acquire_phy(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.acquire)
++ return hw->phy.ops.acquire(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_kmrn_reg - Reads register using Kumeran interface
++ * @hw: pointer to the HW structure
++ * @offset: the register to read
++ * @data: the location to store the 16-bit value read.
++ *
++ * Reads a register out of the Kumeran interface. Currently no func pointer
++ * exists and all implementations are handled in the generic version of
++ * this function.
++ **/
++s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ return e1000_read_kmrn_reg_generic(hw, offset, data);
++}
++
++/**
++ * e1000_write_kmrn_reg - Writes register using Kumeran interface
++ * @hw: pointer to the HW structure
++ * @offset: the register to write
++ * @data: the value to write.
++ *
++ * Writes a register to the Kumeran interface. Currently no func pointer
++ * exists and all implementations are handled in the generic version of
++ * this function.
++ **/
++s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ return e1000_write_kmrn_reg_generic(hw, offset, data);
++}
++
++/**
++ * e1000_get_cable_length - Retrieves cable length estimation
++ * @hw: pointer to the HW structure
++ *
++ * This function estimates the cable length and stores them in
++ * hw->phy.min_length and hw->phy.max_length. This is a function pointer
++ * entry point called by drivers.
++ **/
++s32 e1000_get_cable_length(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.get_cable_length)
++ return hw->phy.ops.get_cable_length(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_get_phy_info - Retrieves PHY information from registers
++ * @hw: pointer to the HW structure
++ *
++ * This function gets some information from various PHY registers and
++ * populates hw->phy values with it. This is a function pointer entry
++ * point called by drivers.
++ **/
++s32 e1000_get_phy_info(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.get_info)
++ return hw->phy.ops.get_info(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_phy_hw_reset - Hard PHY reset
++ * @hw: pointer to the HW structure
++ *
++ * Performs a hard PHY reset. This is a function pointer entry point called
++ * by drivers.
++ **/
++s32 e1000_phy_hw_reset(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.reset)
++ return hw->phy.ops.reset(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_phy_commit - Soft PHY reset
++ * @hw: pointer to the HW structure
++ *
++ * Performs a soft PHY reset on those that apply. This is a function pointer
++ * entry point called by drivers.
++ **/
++s32 e1000_phy_commit(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.commit)
++ return hw->phy.ops.commit(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_set_d0_lplu_state - Sets low power link up state for D0
++ * @hw: pointer to the HW structure
++ * @active: boolean used to enable/disable lplu
++ *
++ * Success returns 0, Failure returns 1
++ *
++ * The low power link up (lplu) state is set to the power management level D0
++ * and SmartSpeed is disabled when active is true, else clear lplu for D0
++ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
++ * is used during Dx states where the power conservation is most important.
++ * During driver activity, SmartSpeed should be enabled so performance is
++ * maintained. This is a function pointer entry point called by drivers.
++ **/
++s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
++{
++ if (hw->phy.ops.set_d0_lplu_state)
++ return hw->phy.ops.set_d0_lplu_state(hw, active);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_set_d3_lplu_state - Sets low power link up state for D3
++ * @hw: pointer to the HW structure
++ * @active: boolean used to enable/disable lplu
++ *
++ * Success returns 0, Failure returns 1
++ *
++ * The low power link up (lplu) state is set to the power management level D3
++ * and SmartSpeed is disabled when active is true, else clear lplu for D3
++ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
++ * is used during Dx states where the power conservation is most important.
++ * During driver activity, SmartSpeed should be enabled so performance is
++ * maintained. This is a function pointer entry point called by drivers.
++ **/
++s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
++{
++ if (hw->phy.ops.set_d3_lplu_state)
++ return hw->phy.ops.set_d3_lplu_state(hw, active);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_mac_addr - Reads MAC address
++ * @hw: pointer to the HW structure
++ *
++ * Reads the MAC address out of the adapter and stores it in the HW structure.
++ * Currently no func pointer exists and all implementations are handled in the
++ * generic version of this function.
++ **/
++s32 e1000_read_mac_addr(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.read_mac_addr)
++ return hw->mac.ops.read_mac_addr(hw);
++
++ return e1000_read_mac_addr_generic(hw);
++}
++
++/**
++ * e1000_read_pba_num - Read device part number
++ * @hw: pointer to the HW structure
++ * @pba_num: pointer to device part number
++ *
++ * Reads the product board assembly (PBA) number from the EEPROM and stores
++ * the value in pba_num.
++ * Currently no func pointer exists and all implementations are handled in the
++ * generic version of this function.
++ **/
++s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
++{
++ return e1000_read_pba_num_generic(hw, pba_num);
++}
++
++/**
++ * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
++ * @hw: pointer to the HW structure
++ *
++ * Validates the NVM checksum is correct. This is a function pointer entry
++ * point called by drivers.
++ **/
++s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
++{
++ if (hw->nvm.ops.validate)
++ return hw->nvm.ops.validate(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
++ * @hw: pointer to the HW structure
++ *
++ * Updates the NVM checksum. Currently no func pointer exists and all
++ * implementations are handled in the generic version of this function.
++ **/
++s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
++{
++ if (hw->nvm.ops.update)
++ return hw->nvm.ops.update(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_reload_nvm - Reloads EEPROM
++ * @hw: pointer to the HW structure
++ *
++ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
++ * extended control register.
++ **/
++void e1000_reload_nvm(struct e1000_hw *hw)
++{
++ if (hw->nvm.ops.reload)
++ hw->nvm.ops.reload(hw);
++}
++
++/**
++ * e1000_read_nvm - Reads NVM (EEPROM)
++ * @hw: pointer to the HW structure
++ * @offset: the word offset to read
++ * @words: number of 16-bit words to read
++ * @data: pointer to the properly sized buffer for the data.
++ *
++ * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
++ * pointer entry point called by drivers.
++ **/
++s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
++{
++ if (hw->nvm.ops.read)
++ return hw->nvm.ops.read(hw, offset, words, data);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_write_nvm - Writes to NVM (EEPROM)
++ * @hw: pointer to the HW structure
++ * @offset: the word offset to read
++ * @words: number of 16-bit words to write
++ * @data: pointer to the properly sized buffer for the data.
++ *
++ * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
++ * pointer entry point called by drivers.
++ **/
++s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
++{
++ if (hw->nvm.ops.write)
++ return hw->nvm.ops.write(hw, offset, words, data);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_write_8bit_ctrl_reg - Writes 8bit Control register
++ * @hw: pointer to the HW structure
++ * @reg: 32bit register offset
++ * @offset: the register to write
++ * @data: the value to write.
++ *
++ * Writes the PHY register at offset with the value in data.
++ * This is a function pointer entry point called by drivers.
++ **/
++s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
++ u8 data)
++{
++ return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
++}
++
++/**
++ * e1000_power_up_phy - Restores link in case of PHY power down
++ * @hw: pointer to the HW structure
++ *
++ * The phy may be powered down to save power, to turn off link when the
++ * driver is unloaded, or wake on lan is not enabled (among others).
++ **/
++void e1000_power_up_phy(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.power_up)
++ hw->phy.ops.power_up(hw);
++
++ e1000_setup_link(hw);
++}
++
++/**
++ * e1000_power_down_phy - Power down PHY
++ * @hw: pointer to the HW structure
++ *
++ * The phy may be powered down to save power, to turn off link when the
++ * driver is unloaded, or wake on lan is not enabled (among others).
++ **/
++void e1000_power_down_phy(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.power_down)
++ hw->phy.ops.power_down(hw);
++}
++
++/**
++ * e1000_shutdown_fiber_serdes_link - Remove link during power down
++ * @hw: pointer to the HW structure
++ *
++ * Shutdown the optics and PCS on driver unload.
++ **/
++void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.shutdown_serdes)
++ hw->mac.ops.shutdown_serdes(hw);
++}
++
+Index: linux-2.6.22/drivers/net/igb/e1000_api.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_api.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,147 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_API_H_
++#define _E1000_API_H_
++
++#include "e1000_hw.h"
++
++extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
++extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
++extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
++extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
++
++s32 e1000_set_mac_type(struct e1000_hw *hw);
++s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
++s32 e1000_init_mac_params(struct e1000_hw *hw);
++s32 e1000_init_nvm_params(struct e1000_hw *hw);
++s32 e1000_init_phy_params(struct e1000_hw *hw);
++s32 e1000_init_mbx_params(struct e1000_hw *hw);
++s32 e1000_get_bus_info(struct e1000_hw *hw);
++void e1000_clear_vfta(struct e1000_hw *hw);
++void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
++s32 e1000_force_mac_fc(struct e1000_hw *hw);
++s32 e1000_check_for_link(struct e1000_hw *hw);
++s32 e1000_reset_hw(struct e1000_hw *hw);
++s32 e1000_init_hw(struct e1000_hw *hw);
++s32 e1000_setup_link(struct e1000_hw *hw);
++s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed,
++ u16 *duplex);
++s32 e1000_disable_pcie_master(struct e1000_hw *hw);
++void e1000_config_collision_dist(struct e1000_hw *hw);
++void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
++void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
++u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
++void e1000_update_mc_addr_list(struct e1000_hw *hw,
++ u8 *mc_addr_list, u32 mc_addr_count);
++s32 e1000_setup_led(struct e1000_hw *hw);
++s32 e1000_cleanup_led(struct e1000_hw *hw);
++s32 e1000_check_reset_block(struct e1000_hw *hw);
++s32 e1000_blink_led(struct e1000_hw *hw);
++s32 e1000_led_on(struct e1000_hw *hw);
++s32 e1000_led_off(struct e1000_hw *hw);
++s32 e1000_id_led_init(struct e1000_hw *hw);
++void e1000_reset_adaptive(struct e1000_hw *hw);
++void e1000_update_adaptive(struct e1000_hw *hw);
++s32 e1000_get_cable_length(struct e1000_hw *hw);
++s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
++s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
++ u32 offset, u8 data);
++s32 e1000_get_phy_info(struct e1000_hw *hw);
++void e1000_release_phy(struct e1000_hw *hw);
++s32 e1000_acquire_phy(struct e1000_hw *hw);
++s32 e1000_phy_hw_reset(struct e1000_hw *hw);
++s32 e1000_phy_commit(struct e1000_hw *hw);
++void e1000_power_up_phy(struct e1000_hw *hw);
++void e1000_power_down_phy(struct e1000_hw *hw);
++s32 e1000_read_mac_addr(struct e1000_hw *hw);
++s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num);
++void e1000_reload_nvm(struct e1000_hw *hw);
++s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
++s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
++s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
++s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
++ u16 *data);
++s32 e1000_wait_autoneg(struct e1000_hw *hw);
++s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
++s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
++bool e1000_check_mng_mode(struct e1000_hw *hw);
++bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
++s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
++s32 e1000_mng_host_if_write(struct e1000_hw *hw,
++ u8 *buffer, u16 length, u16 offset, u8 *sum);
++s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
++ struct e1000_host_mng_command_header *hdr);
++s32 e1000_mng_write_dhcp_info(struct e1000_hw * hw,
++ u8 *buffer, u16 length);
++
++/*
++ * TBI_ACCEPT macro definition:
++ *
++ * This macro requires:
++ * adapter = a pointer to struct e1000_hw
++ * status = the 8 bit status field of the Rx descriptor with EOP set
++ * error = the 8 bit error field of the Rx descriptor with EOP set
++ * length = the sum of all the length fields of the Rx descriptors that
++ * make up the current frame
++ * last_byte = the last byte of the frame DMAed by the hardware
++ * max_frame_length = the maximum frame length we want to accept.
++ * min_frame_length = the minimum frame length we want to accept.
++ *
++ * This macro is a conditional that should be used in the interrupt
++ * handler's Rx processing routine when RxErrors have been detected.
++ *
++ * Typical use:
++ * ...
++ * if (TBI_ACCEPT) {
++ * accept_frame = true;
++ * e1000_tbi_adjust_stats(adapter, MacAddress);
++ * frame_length--;
++ * } else {
++ * accept_frame = false;
++ * }
++ * ...
++ */
++
++/* The carrier extension symbol, as received by the NIC. */
++#define CARRIER_EXTENSION 0x0F
++
++#define TBI_ACCEPT(a, status, errors, length, last_byte, min_frame_size, max_frame_size) \
++ (e1000_tbi_sbp_enabled_82543(a) && \
++ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
++ ((last_byte) == CARRIER_EXTENSION) && \
++ (((status) & E1000_RXD_STAT_VP) ? \
++ (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
++ ((length) <= (max_frame_size + 1))) : \
++ (((length) > min_frame_size) && \
++ ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
++
++#endif
+Index: linux-2.6.22/drivers/net/igb/e1000_defines.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_defines.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,1513 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_DEFINES_H_
++#define _E1000_DEFINES_H_
++
++/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
++#define REQ_TX_DESCRIPTOR_MULTIPLE 8
++#define REQ_RX_DESCRIPTOR_MULTIPLE 8
++
++/* Definitions for power management and wakeup registers */
++/* Wake Up Control */
++#define E1000_WUC_APME 0x00000001 /* APM Enable */
++#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
++#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
++#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
++#define E1000_WUC_LSCWE 0x00000010 /* Link Status wake up enable */
++#define E1000_WUC_LSCWO 0x00000020 /* Link Status wake up override */
++#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
++#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
++
++/* Wake Up Filter Control */
++#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
++#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
++#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
++#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
++#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
++#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
++#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
++#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
++#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
++#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
++#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
++#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
++#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
++#define E1000_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
++#define E1000_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
++#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
++#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
++#define E1000_WUFC_FLX_FILTERS 0x000F0000 /*Mask for the 4 flexible filters */
++/*
++ * For 82576 to utilize Extended filter masks in addition to
++ * existing (filter) masks
++ */
++#define E1000_WUFC_EXT_FLX_FILTERS 0x00300000 /* Ext. FLX filter mask */
++
++/* Wake Up Status */
++#define E1000_WUS_LNKC E1000_WUFC_LNKC
++#define E1000_WUS_MAG E1000_WUFC_MAG
++#define E1000_WUS_EX E1000_WUFC_EX
++#define E1000_WUS_MC E1000_WUFC_MC
++#define E1000_WUS_BC E1000_WUFC_BC
++#define E1000_WUS_ARP E1000_WUFC_ARP
++#define E1000_WUS_IPV4 E1000_WUFC_IPV4
++#define E1000_WUS_IPV6 E1000_WUFC_IPV6
++#define E1000_WUS_FLX0 E1000_WUFC_FLX0
++#define E1000_WUS_FLX1 E1000_WUFC_FLX1
++#define E1000_WUS_FLX2 E1000_WUFC_FLX2
++#define E1000_WUS_FLX3 E1000_WUFC_FLX3
++#define E1000_WUS_FLX_FILTERS E1000_WUFC_FLX_FILTERS
++
++/* Wake Up Packet Length */
++#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
++
++/* Four Flexible Filters are supported */
++#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
++/* Two Extended Flexible Filters are supported (82576) */
++#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
++#define E1000_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
++#define E1000_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
++
++/* Each Flexible Filter is at most 128 (0x80) bytes in length */
++#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128
++
++#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
++#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
++#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
++
++/* Extended Device Control */
++#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */
++#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
++#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
++#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */
++#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */
++/* Reserved (bits 4,5) in >= 82575 */
++#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Definable Pin 4 */
++#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Definable Pin 5 */
++#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA
++#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Definable Pin 6 */
++#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
++/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
++#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
++#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
++#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
++#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */
++#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
++#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
++#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
++/* Physical Func Reset Done Indication */
++#define E1000_CTRL_EXT_PFRSTD 0x00004000
++#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
++#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
++#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
++#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
++#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
++#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
++#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000
++#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
++#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES 0x00800000
++#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
++#define E1000_CTRL_EXT_EIAME 0x01000000
++#define E1000_CTRL_EXT_IRCA 0x00000001
++#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000
++#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000
++#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
++#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
++#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
++#define E1000_CTRL_EXT_CANC 0x04000000 /* Int delay cancellation */
++#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
++/* IAME enable bit (27) was removed in >= 82575 */
++#define E1000_CTRL_EXT_IAME 0x08000000 /* Int acknowledge Auto-mask */
++#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error
++ * detection enabled */
++#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity
++ * error detection enable */
++#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000
++#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
++#define E1000_I2CCMD_REG_ADDR_SHIFT 16
++#define E1000_I2CCMD_REG_ADDR 0x00FF0000
++#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
++#define E1000_I2CCMD_PHY_ADDR 0x07000000
++#define E1000_I2CCMD_OPCODE_READ 0x08000000
++#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
++#define E1000_I2CCMD_RESET 0x10000000
++#define E1000_I2CCMD_READY 0x20000000
++#define E1000_I2CCMD_INTERRUPT_ENA 0x40000000
++#define E1000_I2CCMD_ERROR 0x80000000
++#define E1000_MAX_SGMII_PHY_REG_ADDR 255
++#define E1000_I2CCMD_PHY_TIMEOUT 200
++#define E1000_IVAR_VALID 0x80
++#define E1000_GPIE_NSICR 0x00000001
++#define E1000_GPIE_MSIX_MODE 0x00000010
++#define E1000_GPIE_EIAME 0x40000000
++#define E1000_GPIE_PBA 0x80000000
++
++/* Receive Descriptor bit definitions */
++#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
++#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
++#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
++#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
++#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
++#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
++#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
++#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
++#define E1000_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
++#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
++#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
++#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
++#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
++#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
++#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
++#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
++#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
++#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
++#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
++#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
++#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
++#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
++#define E1000_RXD_SPC_PRI_SHIFT 13
++#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
++#define E1000_RXD_SPC_CFI_SHIFT 12
++
++#define E1000_RXDEXT_STATERR_CE 0x01000000
++#define E1000_RXDEXT_STATERR_SE 0x02000000
++#define E1000_RXDEXT_STATERR_SEQ 0x04000000
++#define E1000_RXDEXT_STATERR_CXE 0x10000000
++#define E1000_RXDEXT_STATERR_TCPE 0x20000000
++#define E1000_RXDEXT_STATERR_IPE 0x40000000
++#define E1000_RXDEXT_STATERR_RXE 0x80000000
++
++/* mask to determine if packets should be dropped due to frame errors */
++#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
++ E1000_RXD_ERR_CE | \
++ E1000_RXD_ERR_SE | \
++ E1000_RXD_ERR_SEQ | \
++ E1000_RXD_ERR_CXE | \
++ E1000_RXD_ERR_RXE)
++
++/* Same mask, but for extended and packet split descriptors */
++#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
++ E1000_RXDEXT_STATERR_CE | \
++ E1000_RXDEXT_STATERR_SE | \
++ E1000_RXDEXT_STATERR_SEQ | \
++ E1000_RXDEXT_STATERR_CXE | \
++ E1000_RXDEXT_STATERR_RXE)
++
++#define E1000_MRQC_ENABLE_MASK 0x00000007
++#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
++#define E1000_MRQC_ENABLE_RSS_INT 0x00000004
++#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
++#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
++#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
++#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
++#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
++#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
++#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
++
++#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
++#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
++
++/* Management Control */
++#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
++#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
++#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
++#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
++#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
++#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
++#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
++#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
++#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
++/* Enable Neighbor Discovery Filtering */
++#define E1000_MANC_NEIGHBOR_EN 0x00004000
++#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
++#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
++#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
++#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
++#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
++#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
++/* Enable MAC address filtering */
++#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
++/* Enable MNG packets to host memory */
++#define E1000_MANC_EN_MNG2HOST 0x00200000
++/* Enable IP address filtering */
++#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000
++#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
++#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
++#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
++#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
++#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
++#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
++#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
++#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
++
++#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
++#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
++
++/* Receive Control */
++#define E1000_RCTL_RST 0x00000001 /* Software reset */
++#define E1000_RCTL_EN 0x00000002 /* enable */
++#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
++#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */
++#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */
++#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
++#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
++#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
++#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
++#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
++#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
++#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
++#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min thresh size */
++#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min thresh size */
++#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min thresh size */
++#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
++#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
++#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
++#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
++#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
++#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
++#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
++/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
++#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
++#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
++#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
++#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
++/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
++#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
++#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
++#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
++#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
++#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
++#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
++#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
++#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
++#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
++#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
++#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
++#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
++
++/*
++ * Use byte values for the following shift parameters
++ * Usage:
++ * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
++ * E1000_PSRCTL_BSIZE0_MASK) |
++ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
++ * E1000_PSRCTL_BSIZE1_MASK) |
++ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
++ * E1000_PSRCTL_BSIZE2_MASK) |
++ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
++ * E1000_PSRCTL_BSIZE3_MASK))
++ * where value0 = [128..16256], default=256
++ * value1 = [1024..64512], default=4096
++ * value2 = [0..64512], default=4096
++ * value3 = [0..64512], default=0
++ */
++
++#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
++#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
++#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
++#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
++
++#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
++#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
++#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
++#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
++
++/* SWFW_SYNC Definitions */
++#define E1000_SWFW_EEP_SM 0x01
++#define E1000_SWFW_PHY0_SM 0x02
++#define E1000_SWFW_PHY1_SM 0x04
++#define E1000_SWFW_CSR_SM 0x08
++
++/* FACTPS Definitions */
++#define E1000_FACTPS_LFS 0x40000000 /* LAN Function Select */
++/* Device Control */
++#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
++#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
++#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
++#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
++#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
++#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
++#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
++#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
++#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
++#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
++#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
++#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
++#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
++#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
++#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
++#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
++#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
++#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
++#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock
++ * indication in SDP[0] */
++#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through
++ * PHYRST_N pin */
++#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external
++ * LINK_0 and LINK_1 pins */
++#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
++#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
++#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
++#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
++#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
++#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
++#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
++#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
++#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
++#define E1000_CTRL_RST 0x04000000 /* Global reset */
++#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
++#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
++#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
++#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
++#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
++#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to ME */
++#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
++
++/*
++ * Bit definitions for the Management Data IO (MDIO) and Management Data
++ * Clock (MDC) pins in the Device Control Register.
++ */
++#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0
++#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0
++#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2
++#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2
++#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3
++#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3
++#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
++#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA
++
++#define E1000_CONNSW_ENRGSRC 0x4
++#define E1000_PCS_CFG_PCS_EN 8
++#define E1000_PCS_LCTL_FLV_LINK_UP 1
++#define E1000_PCS_LCTL_FSV_10 0
++#define E1000_PCS_LCTL_FSV_100 2
++#define E1000_PCS_LCTL_FSV_1000 4
++#define E1000_PCS_LCTL_FDV_FULL 8
++#define E1000_PCS_LCTL_FSD 0x10
++#define E1000_PCS_LCTL_FORCE_LINK 0x20
++#define E1000_PCS_LCTL_LOW_LINK_LATCH 0x40
++#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
++#define E1000_PCS_LCTL_AN_ENABLE 0x10000
++#define E1000_PCS_LCTL_AN_RESTART 0x20000
++#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
++#define E1000_PCS_LCTL_AN_SGMII_BYPASS 0x80000
++#define E1000_PCS_LCTL_AN_SGMII_TRIGGER 0x100000
++#define E1000_PCS_LCTL_FAST_LINK_TIMER 0x1000000
++#define E1000_PCS_LCTL_LINK_OK_FIX 0x2000000
++#define E1000_PCS_LCTL_CRS_ON_NI 0x4000000
++#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
++
++#define E1000_PCS_LSTS_LINK_OK 1
++#define E1000_PCS_LSTS_SPEED_10 0
++#define E1000_PCS_LSTS_SPEED_100 2
++#define E1000_PCS_LSTS_SPEED_1000 4
++#define E1000_PCS_LSTS_DUPLEX_FULL 8
++#define E1000_PCS_LSTS_SYNK_OK 0x10
++#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
++#define E1000_PCS_LSTS_AN_PAGE_RX 0x20000
++#define E1000_PCS_LSTS_AN_TIMED_OUT 0x40000
++#define E1000_PCS_LSTS_AN_REMOTE_FAULT 0x80000
++#define E1000_PCS_LSTS_AN_ERROR_RWS 0x100000
++
++/* Device Status */
++#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
++#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
++#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
++#define E1000_STATUS_FUNC_SHIFT 2
++#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
++#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
++#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
++#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
++#define E1000_STATUS_SPEED_MASK 0x000000C0
++#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
++#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
++#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
++#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
++#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
++#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
++#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state.
++ * Clear on write '0'. */
++#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
++#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
++#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
++#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
++#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
++#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
++#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */
++#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */
++#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */
++#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
++#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution
++ * disabled */
++#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
++#define E1000_STATUS_FUSE_8 0x04000000
++#define E1000_STATUS_FUSE_9 0x08000000
++#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */
++#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */
++
++/* Constants used to interpret the masked PCI-X bus speed. */
++#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
++#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
++#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /*PCI-X bus speed 100-133 MHz*/
++
++#define SPEED_10 10
++#define SPEED_100 100
++#define SPEED_1000 1000
++#define HALF_DUPLEX 1
++#define FULL_DUPLEX 2
++
++#define PHY_FORCE_TIME 20
++
++#define ADVERTISE_10_HALF 0x0001
++#define ADVERTISE_10_FULL 0x0002
++#define ADVERTISE_100_HALF 0x0004
++#define ADVERTISE_100_FULL 0x0008
++#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
++#define ADVERTISE_1000_FULL 0x0020
++
++/* 1000/H is not supported, nor spec-compliant. */
++#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
++ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
++ ADVERTISE_1000_FULL)
++#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
++ ADVERTISE_100_HALF | ADVERTISE_100_FULL)
++#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
++#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
++#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \
++ ADVERTISE_1000_FULL)
++#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
++
++#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
++
++/* LED Control */
++#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
++#define E1000_LEDCTL_LED0_MODE_SHIFT 0
++#define E1000_LEDCTL_LED0_BLINK_RATE 0x00000020
++#define E1000_LEDCTL_LED0_IVRT 0x00000040
++#define E1000_LEDCTL_LED0_BLINK 0x00000080
++#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00
++#define E1000_LEDCTL_LED1_MODE_SHIFT 8
++#define E1000_LEDCTL_LED1_BLINK_RATE 0x00002000
++#define E1000_LEDCTL_LED1_IVRT 0x00004000
++#define E1000_LEDCTL_LED1_BLINK 0x00008000
++#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000
++#define E1000_LEDCTL_LED2_MODE_SHIFT 16
++#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000
++#define E1000_LEDCTL_LED2_IVRT 0x00400000
++#define E1000_LEDCTL_LED2_BLINK 0x00800000
++#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
++#define E1000_LEDCTL_LED3_MODE_SHIFT 24
++#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000
++#define E1000_LEDCTL_LED3_IVRT 0x40000000
++#define E1000_LEDCTL_LED3_BLINK 0x80000000
++
++#define E1000_LEDCTL_MODE_LINK_10_1000 0x0
++#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
++#define E1000_LEDCTL_MODE_LINK_UP 0x2
++#define E1000_LEDCTL_MODE_ACTIVITY 0x3
++#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
++#define E1000_LEDCTL_MODE_LINK_10 0x5
++#define E1000_LEDCTL_MODE_LINK_100 0x6
++#define E1000_LEDCTL_MODE_LINK_1000 0x7
++#define E1000_LEDCTL_MODE_PCIX_MODE 0x8
++#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9
++#define E1000_LEDCTL_MODE_COLLISION 0xA
++#define E1000_LEDCTL_MODE_BUS_SPEED 0xB
++#define E1000_LEDCTL_MODE_BUS_SIZE 0xC
++#define E1000_LEDCTL_MODE_PAUSED 0xD
++#define E1000_LEDCTL_MODE_LED_ON 0xE
++#define E1000_LEDCTL_MODE_LED_OFF 0xF
++
++/* Transmit Descriptor bit definitions */
++#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
++#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
++#define E1000_TXD_POPTS_SHIFT 8 /* POPTS shift */
++#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
++#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
++#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
++#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
++#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
++#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
++#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
++#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
++#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
++#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
++#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
++#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
++#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
++#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
++#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
++#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
++#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
++#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
++/* Extended desc bits for Linksec and timesync */
++
++/* Transmit Control */
++#define E1000_TCTL_RST 0x00000001 /* software reset */
++#define E1000_TCTL_EN 0x00000002 /* enable tx */
++#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
++#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
++#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
++#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
++#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
++#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
++#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
++#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
++#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
++
++/* Transmit Arbitration Count */
++#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */
++
++/* SerDes Control */
++#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
++
++/* Receive Checksum Control */
++#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
++#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
++#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
++#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
++#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
++#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
++#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
++
++/* Header split receive */
++#define E1000_RFCTL_ISCSI_DIS 0x00000001
++#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E
++#define E1000_RFCTL_ISCSI_DWC_SHIFT 1
++#define E1000_RFCTL_NFSW_DIS 0x00000040
++#define E1000_RFCTL_NFSR_DIS 0x00000080
++#define E1000_RFCTL_NFS_VER_MASK 0x00000300
++#define E1000_RFCTL_NFS_VER_SHIFT 8
++#define E1000_RFCTL_IPV6_DIS 0x00000400
++#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800
++#define E1000_RFCTL_ACK_DIS 0x00001000
++#define E1000_RFCTL_ACKD_DIS 0x00002000
++#define E1000_RFCTL_IPFRSP_DIS 0x00004000
++#define E1000_RFCTL_EXTEN 0x00008000
++#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
++#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
++#define E1000_RFCTL_LEF 0x00040000
++
++/* Collision related configuration parameters */
++#define E1000_COLLISION_THRESHOLD 15
++#define E1000_CT_SHIFT 4
++#define E1000_COLLISION_DISTANCE 63
++#define E1000_COLD_SHIFT 12
++
++/* Default values for the transmit IPG register */
++#define DEFAULT_82543_TIPG_IPGT_FIBER 9
++#define DEFAULT_82543_TIPG_IPGT_COPPER 8
++
++#define E1000_TIPG_IPGT_MASK 0x000003FF
++#define E1000_TIPG_IPGR1_MASK 0x000FFC00
++#define E1000_TIPG_IPGR2_MASK 0x3FF00000
++
++#define DEFAULT_82543_TIPG_IPGR1 8
++#define E1000_TIPG_IPGR1_SHIFT 10
++
++#define DEFAULT_82543_TIPG_IPGR2 6
++#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
++#define E1000_TIPG_IPGR2_SHIFT 20
++
++/* Ethertype field values */
++#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
++
++#define ETHERNET_FCS_SIZE 4
++#define MAX_JUMBO_FRAME_SIZE 0x3F00
++
++/* Extended Configuration Control and Size */
++#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
++#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
++#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
++#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
++#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
++#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
++#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
++#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
++
++#define E1000_PHY_CTRL_SPD_EN 0x00000001
++#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
++#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
++#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
++#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
++
++#define E1000_KABGTXD_BGSQLBIAS 0x00050000
++
++/* PBA constants */
++#define E1000_PBA_6K 0x0006 /* 6KB */
++#define E1000_PBA_8K 0x0008 /* 8KB */
++#define E1000_PBA_10K 0x000A /* 10KB */
++#define E1000_PBA_12K 0x000C /* 12KB */
++#define E1000_PBA_14K 0x000E /* 14KB */
++#define E1000_PBA_16K 0x0010 /* 16KB */
++#define E1000_PBA_18K 0x0012
++#define E1000_PBA_20K 0x0014
++#define E1000_PBA_22K 0x0016
++#define E1000_PBA_24K 0x0018
++#define E1000_PBA_26K 0x001A
++#define E1000_PBA_30K 0x001E
++#define E1000_PBA_32K 0x0020
++#define E1000_PBA_34K 0x0022
++#define E1000_PBA_35K 0x0023
++#define E1000_PBA_38K 0x0026
++#define E1000_PBA_40K 0x0028
++#define E1000_PBA_48K 0x0030 /* 48KB */
++#define E1000_PBA_64K 0x0040 /* 64KB */
++
++#define E1000_PBS_16K E1000_PBA_16K
++#define E1000_PBS_24K E1000_PBA_24K
++
++#define IFS_MAX 80
++#define IFS_MIN 40
++#define IFS_RATIO 4
++#define IFS_STEP 10
++#define MIN_NUM_XMITS 1000
++
++/* SW Semaphore Register */
++#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
++#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
++#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
++#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
++
++#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
++
++/* Interrupt Cause Read */
++#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
++#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
++#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
++#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
++#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
++#define E1000_ICR_RXO 0x00000040 /* rx overrun */
++#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
++#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
++#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
++#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
++#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
++#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
++#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
++#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
++#define E1000_ICR_TXD_LOW 0x00008000
++#define E1000_ICR_SRPD 0x00010000
++#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
++#define E1000_ICR_MNG 0x00040000 /* Manageability event */
++#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
++#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver
++ * should claim the interrupt */
++#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */
++#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* Q0 Tx desc FIFO parity error */
++#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */
++#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
++#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* Q1 Rx desc FIFO parity error */
++#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* Q1 Tx desc FIFO parity error */
++#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
++#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW
++ * bit in the FWSM */
++#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates
++ * an interrupt */
++#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
++#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */
++
++
++/* Extended Interrupt Cause Read */
++#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
++#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
++#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
++#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
++#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
++#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
++#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
++#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
++#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
++#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
++/* TCP Timer */
++#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */
++#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */
++#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */
++#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */
++
++/*
++ * This defines the bits that are set in the Interrupt Mask
++ * Set/Read Register. Each bit is documented below:
++ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
++ * o RXSEQ = Receive Sequence Error
++ */
++#define POLL_IMS_ENABLE_MASK ( \
++ E1000_IMS_RXDMT0 | \
++ E1000_IMS_RXSEQ)
++
++/*
++ * This defines the bits that are set in the Interrupt Mask
++ * Set/Read Register. Each bit is documented below:
++ * o RXT0 = Receiver Timer Interrupt (ring 0)
++ * o TXDW = Transmit Descriptor Written Back
++ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
++ * o RXSEQ = Receive Sequence Error
++ * o LSC = Link Status Change
++ */
++#define IMS_ENABLE_MASK ( \
++ E1000_IMS_RXT0 | \
++ E1000_IMS_TXDW | \
++ E1000_IMS_RXDMT0 | \
++ E1000_IMS_RXSEQ | \
++ E1000_IMS_LSC)
++
++/* Interrupt Mask Set */
++#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */
++#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
++#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
++#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
++#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
++#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
++#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
++#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
++#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
++#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */
++#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
++#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
++#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
++#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
++#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
++#define E1000_IMS_SRPD E1000_ICR_SRPD
++#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
++#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
++#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
++#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
++ * parity error */
++#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
++ * parity error */
++#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer
++ * parity error */
++#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity
++ * error */
++#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
++ * parity error */
++#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
++ * parity error */
++#define E1000_IMS_DSW E1000_ICR_DSW
++#define E1000_IMS_PHYINT E1000_ICR_PHYINT
++#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
++#define E1000_IMS_EPRST E1000_ICR_EPRST
++
++/* Extended Interrupt Mask Set */
++#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
++#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
++#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
++#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
++#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
++#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
++#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
++#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
++#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
++#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
++
++/* Interrupt Cause Set */
++#define E1000_ICS_TXDW E1000_ICR_TXDW /* Tx desc written back */
++#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
++#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
++#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
++#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
++#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
++#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
++#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
++#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */
++#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
++#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
++#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
++#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
++#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
++#define E1000_ICS_SRPD E1000_ICR_SRPD
++#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
++#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
++#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
++#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
++ * parity error */
++#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
++ * parity error */
++#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer
++ * parity error */
++#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity
++ * error */
++#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
++ * parity error */
++#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
++ * parity error */
++#define E1000_ICS_DSW E1000_ICR_DSW
++#define E1000_ICS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
++#define E1000_ICS_PHYINT E1000_ICR_PHYINT
++#define E1000_ICS_EPRST E1000_ICR_EPRST
++
++/* Extended Interrupt Cause Set */
++#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
++#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
++#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
++#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
++#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
++#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
++#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
++#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
++#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
++#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
++
++#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
++
++/* Transmit Descriptor Control */
++#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
++#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
++#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
++#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
++#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
++#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
++#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
++/* Enable the counting of descriptors still to be processed. */
++#define E1000_TXDCTL_COUNT_DESC 0x00400000
++
++/* Flow Control Constants */
++#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
++#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
++#define FLOW_CONTROL_TYPE 0x8808
++
++/* 802.1q VLAN Packet Size */
++#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
++#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
++
++/* Receive Address */
++/*
++ * Number of high/low register pairs in the RAR. The RAR (Receive Address
++ * Registers) holds the directed and multicast addresses that we monitor.
++ * Technically, we have 16 spots. However, we reserve one of these spots
++ * (RAR[15]) for our directed address used by controllers with
++ * manageability enabled, allowing us room for 15 multicast addresses.
++ */
++#define E1000_RAR_ENTRIES 15
++#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
++#define E1000_RAL_MAC_ADDR_LEN 4
++#define E1000_RAH_MAC_ADDR_LEN 2
++#define E1000_RAH_POOL_MASK 0x03FC0000
++#define E1000_RAH_POOL_1 0x00040000
++
++/* Error Codes */
++#define E1000_SUCCESS 0
++#define E1000_ERR_NVM 1
++#define E1000_ERR_PHY 2
++#define E1000_ERR_CONFIG 3
++#define E1000_ERR_PARAM 4
++#define E1000_ERR_MAC_INIT 5
++#define E1000_ERR_PHY_TYPE 6
++#define E1000_ERR_RESET 9
++#define E1000_ERR_MASTER_REQUESTS_PENDING 10
++#define E1000_ERR_HOST_INTERFACE_COMMAND 11
++#define E1000_BLK_PHY_RESET 12
++#define E1000_ERR_SWFW_SYNC 13
++#define E1000_NOT_IMPLEMENTED 14
++#define E1000_ERR_MBX 15
++
++/* Loop limit on how long we wait for auto-negotiation to complete */
++#define FIBER_LINK_UP_LIMIT 50
++#define COPPER_LINK_UP_LIMIT 10
++#define PHY_AUTO_NEG_LIMIT 45
++#define PHY_FORCE_LIMIT 20
++/* Number of 100 microseconds we wait for PCI Express master disable */
++#define MASTER_DISABLE_TIMEOUT 800
++/* Number of milliseconds we wait for PHY configuration done after MAC reset */
++#define PHY_CFG_TIMEOUT 100
++/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
++#define MDIO_OWNERSHIP_TIMEOUT 10
++/* Number of milliseconds for NVM auto read done after MAC reset. */
++#define AUTO_READ_DONE_TIMEOUT 10
++
++/* Flow Control */
++#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
++#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */
++#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
++#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
++
++/* Transmit Configuration Word */
++#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
++#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
++#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
++#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
++#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
++#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */
++#define E1000_TXCW_NP 0x00008000 /* TXCW next page */
++#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */
++#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */
++#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
++
++/* Receive Configuration Word */
++#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
++#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */
++#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
++#define E1000_RXCW_CC 0x10000000 /* Receive config change */
++#define E1000_RXCW_C 0x20000000 /* Receive config */
++#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
++#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
++
++#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
++#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
++
++#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
++#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
++#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
++#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
++#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
++#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
++#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
++#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
++
++#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
++#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
++#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
++#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
++#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
++#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
++
++#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
++#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
++#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
++#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
++#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
++#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
++#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
++#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
++
++#define E1000_TIMINCA_16NS_SHIFT 24
++
++/* PCI Express Control */
++#define E1000_GCR_RXD_NO_SNOOP 0x00000001
++#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
++#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
++#define E1000_GCR_TXD_NO_SNOOP 0x00000008
++#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
++#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
++#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
++#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
++#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
++#define E1000_GCR_CAP_VER2 0x00040000
++
++#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
++ E1000_GCR_RXDSCW_NO_SNOOP | \
++ E1000_GCR_RXDSCR_NO_SNOOP | \
++ E1000_GCR_TXD_NO_SNOOP | \
++ E1000_GCR_TXDSCW_NO_SNOOP | \
++ E1000_GCR_TXDSCR_NO_SNOOP)
++
++/* PHY Control Register */
++#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
++#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
++#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
++#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
++#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
++#define MII_CR_POWER_DOWN 0x0800 /* Power down */
++#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
++#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
++#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
++#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
++#define MII_CR_SPEED_1000 0x0040
++#define MII_CR_SPEED_100 0x2000
++#define MII_CR_SPEED_10 0x0000
++
++/* PHY Status Register */
++#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
++#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
++#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
++#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
++#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
++#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
++#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
++#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
++#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
++#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
++#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
++#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
++#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
++#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
++#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
++
++/* Autoneg Advertisement Register */
++#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
++#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
++#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
++#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
++#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
++#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
++#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
++#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
++#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
++#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
++
++/* Link Partner Ability Register (Base Page) */
++#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
++#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */
++#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */
++#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */
++#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */
++#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
++#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
++#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
++#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */
++#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */
++#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
++
++/* Autoneg Expansion Register */
++#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
++#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */
++#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */
++#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
++#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */
++
++/* 1000BASE-T Control Register */
++#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
++#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
++#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
++#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
++ /* 0=DTE device */
++#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
++ /* 0=Configure PHY as Slave */
++#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
++ /* 0=Automatic Master/Slave config */
++#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
++#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
++#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
++#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
++#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
++
++/* 1000BASE-T Status Register */
++#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
++#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
++#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
++#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
++#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
++#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
++#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx is Master, 0=Slave */
++#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
++
++#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
++
++/* PHY 1000 MII Register/Bit Definitions */
++/* PHY Registers defined by IEEE */
++#define PHY_CONTROL 0x00 /* Control Register */
++#define PHY_STATUS 0x01 /* Status Register */
++#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
++#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
++#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
++#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
++#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
++#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
++#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
++#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
++#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
++#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
++
++#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
++
++/* NVM Control */
++#define E1000_EECD_SK 0x00000001 /* NVM Clock */
++#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
++#define E1000_EECD_DI 0x00000004 /* NVM Data In */
++#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
++#define E1000_EECD_FWE_MASK 0x00000030
++#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
++#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
++#define E1000_EECD_FWE_SHIFT 4
++#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
++#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
++#define E1000_EECD_PRES 0x00000100 /* NVM Present */
++#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
++/* NVM Addressing bits based on type 0=small, 1=large */
++#define E1000_EECD_ADDR_BITS 0x00000400
++#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
++#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
++#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
++#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
++#define E1000_EECD_SIZE_EX_SHIFT 11
++#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
++#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
++#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
++#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
++#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
++#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
++#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
++#define E1000_EECD_SECVAL_SHIFT 22
++#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
++
++#define E1000_NVM_SWDPIN0 0x0001 /* SWDPIN 0 NVM Value */
++#define E1000_NVM_LED_LOGIC 0x0020 /* Led Logic Word */
++#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
++#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
++#define E1000_NVM_RW_REG_START 1 /* Start operation */
++#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
++#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
++#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
++#define E1000_FLASH_UPDATES 2000
++
++/* NVM Word Offsets */
++#define NVM_COMPAT 0x0003
++#define NVM_ID_LED_SETTINGS 0x0004
++#define NVM_VERSION 0x0005
++#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */
++#define NVM_PHY_CLASS_WORD 0x0007
++#define NVM_INIT_CONTROL1_REG 0x000A
++#define NVM_INIT_CONTROL2_REG 0x000F
++#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010
++#define NVM_INIT_CONTROL3_PORT_B 0x0014
++#define NVM_INIT_3GIO_3 0x001A
++#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
++#define NVM_INIT_CONTROL3_PORT_A 0x0024
++#define NVM_CFG 0x0012
++#define NVM_FLASH_VERSION 0x0032
++#define NVM_ALT_MAC_ADDR_PTR 0x0037
++#define NVM_CHECKSUM_REG 0x003F
++
++#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
++#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
++
++/* Mask bits for fields in Word 0x0f of the NVM */
++#define NVM_WORD0F_PAUSE_MASK 0x3000
++#define NVM_WORD0F_PAUSE 0x1000
++#define NVM_WORD0F_ASM_DIR 0x2000
++#define NVM_WORD0F_ANE 0x0800
++#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0
++#define NVM_WORD0F_LPLU 0x0001
++
++/* Mask bits for fields in Word 0x1a of the NVM */
++#define NVM_WORD1A_ASPM_MASK 0x000C
++
++/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
++#define NVM_SUM 0xBABA
++
++#define NVM_MAC_ADDR_OFFSET 0
++#define NVM_PBA_OFFSET_0 8
++#define NVM_PBA_OFFSET_1 9
++#define NVM_RESERVED_WORD 0xFFFF
++#define NVM_PHY_CLASS_A 0x8000
++#define NVM_SERDES_AMPLITUDE_MASK 0x000F
++#define NVM_SIZE_MASK 0x1C00
++#define NVM_SIZE_SHIFT 10
++#define NVM_WORD_SIZE_BASE_SHIFT 6
++#define NVM_SWDPIO_EXT_SHIFT 4
++
++/* NVM Commands - SPI */
++#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
++#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
++#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
++#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
++#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
++#define NVM_WRDI_OPCODE_SPI 0x04 /* NVM reset Write Enable latch */
++#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
++#define NVM_WRSR_OPCODE_SPI 0x01 /* NVM write Status register */
++
++/* SPI NVM Status Register */
++#define NVM_STATUS_RDY_SPI 0x01
++#define NVM_STATUS_WEN_SPI 0x02
++#define NVM_STATUS_BP0_SPI 0x04
++#define NVM_STATUS_BP1_SPI 0x08
++#define NVM_STATUS_WPEN_SPI 0x80
++
++/* Word definitions for ID LED Settings */
++#define ID_LED_RESERVED_0000 0x0000
++#define ID_LED_RESERVED_FFFF 0xFFFF
++#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
++ (ID_LED_OFF1_OFF2 << 8) | \
++ (ID_LED_DEF1_DEF2 << 4) | \
++ (ID_LED_DEF1_DEF2))
++#define ID_LED_DEF1_DEF2 0x1
++#define ID_LED_DEF1_ON2 0x2
++#define ID_LED_DEF1_OFF2 0x3
++#define ID_LED_ON1_DEF2 0x4
++#define ID_LED_ON1_ON2 0x5
++#define ID_LED_ON1_OFF2 0x6
++#define ID_LED_OFF1_DEF2 0x7
++#define ID_LED_OFF1_ON2 0x8
++#define ID_LED_OFF1_OFF2 0x9
++
++#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
++#define IGP_ACTIVITY_LED_ENABLE 0x0300
++#define IGP_LED3_MODE 0x07000000
++
++/* PCI/PCI-X/PCI-EX Config space */
++#define PCI_HEADER_TYPE_REGISTER 0x0E
++#define PCIE_LINK_STATUS 0x12
++#define PCIE_DEVICE_CONTROL2 0x28
++
++#define PCI_HEADER_TYPE_MULTIFUNC 0x80
++#define PCIE_LINK_WIDTH_MASK 0x3F0
++#define PCIE_LINK_WIDTH_SHIFT 4
++#define PCIE_DEVICE_CONTROL2_16ms 0x0005
++
++#ifndef ETH_ADDR_LEN
++#define ETH_ADDR_LEN 6
++#endif
++
++#define PHY_REVISION_MASK 0xFFFFFFF0
++#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
++#define MAX_PHY_MULTI_PAGE_REG 0xF
++
++/* Bit definitions for valid PHY IDs. */
++/*
++ * I = Integrated
++ * E = External
++ */
++#define M88E1000_E_PHY_ID 0x01410C50
++#define M88E1000_I_PHY_ID 0x01410C30
++#define M88E1011_I_PHY_ID 0x01410C20
++#define IGP01E1000_I_PHY_ID 0x02A80380
++#define M88E1011_I_REV_4 0x04
++#define M88E1111_I_PHY_ID 0x01410CC0
++#define GG82563_E_PHY_ID 0x01410CA0
++#define IGP03E1000_E_PHY_ID 0x02A80390
++#define IFE_E_PHY_ID 0x02A80330
++#define IFE_PLUS_E_PHY_ID 0x02A80320
++#define IFE_C_E_PHY_ID 0x02A80310
++#define IGP04E1000_E_PHY_ID 0x02A80391
++#define M88_VENDOR 0x0141
++
++/* M88E1000 Specific Registers */
++#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
++#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
++#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */
++#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */
++#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
++#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
++
++#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
++#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
++#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
++#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
++#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
++
++/* M88E1000 PHY Specific Control Register */
++#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
++#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
++#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
++/* 1=CLK125 low, 0=CLK125 toggling */
++#define M88E1000_PSCR_CLK125_DISABLE 0x0010
++#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
++ /* Manual MDI configuration */
++#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
++/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
++#define M88E1000_PSCR_AUTO_X_1000T 0x0040
++/* Auto crossover enabled all speeds */
++#define M88E1000_PSCR_AUTO_X_MODE 0x0060
++/*
++ * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
++ * 0=Normal 10BASE-T Rx Threshold
++ */
++#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080
++/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
++#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100
++#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
++#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
++#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */
++
++/* M88E1000 PHY Specific Status Register */
++#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
++#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
++#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
++#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
++/*
++ * 0 = <50M
++ * 1 = 50-80M
++ * 2 = 80-110M
++ * 3 = 110-140M
++ * 4 = >140M
++ */
++#define M88E1000_PSSR_CABLE_LENGTH 0x0380
++#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
++#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
++#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */
++#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
++#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
++#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */
++#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
++#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
++
++#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
++
++/* M88E1000 Extended PHY Specific Control Register */
++#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
++/*
++ * 1 = Lost lock detect enabled.
++ * Will assert lost lock and bring
++ * link down if idle not seen
++ * within 1ms in 1000BASE-T
++ */
++#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000
++/*
++ * Number of times we will attempt to autonegotiate before downshifting if we
++ * are the master
++ */
++#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
++#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
++#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400
++#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800
++#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00
++/*
++ * Number of times we will attempt to autonegotiate before downshifting if we
++ * are the slave
++ */
++#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
++#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000
++#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
++#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200
++#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300
++#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */
++#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
++#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
++
++/* M88EC018 Rev 2 specific DownShift settings */
++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000
++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200
++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400
++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600
++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00
++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00
++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00
++
++/*
++ * Bits...
++ * 15-5: page
++ * 4-0: register offset
++ */
++#define GG82563_PAGE_SHIFT 5
++#define GG82563_REG(page, reg) \
++ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
++#define GG82563_MIN_ALT_REG 30
++
++/* GG82563 Specific Registers */
++#define GG82563_PHY_SPEC_CTRL \
++ GG82563_REG(0, 16) /* PHY Specific Control */
++#define GG82563_PHY_SPEC_STATUS \
++ GG82563_REG(0, 17) /* PHY Specific Status */
++#define GG82563_PHY_INT_ENABLE \
++ GG82563_REG(0, 18) /* Interrupt Enable */
++#define GG82563_PHY_SPEC_STATUS_2 \
++ GG82563_REG(0, 19) /* PHY Specific Status 2 */
++#define GG82563_PHY_RX_ERR_CNTR \
++ GG82563_REG(0, 21) /* Receive Error Counter */
++#define GG82563_PHY_PAGE_SELECT \
++ GG82563_REG(0, 22) /* Page Select */
++#define GG82563_PHY_SPEC_CTRL_2 \
++ GG82563_REG(0, 26) /* PHY Specific Control 2 */
++#define GG82563_PHY_PAGE_SELECT_ALT \
++ GG82563_REG(0, 29) /* Alternate Page Select */
++#define GG82563_PHY_TEST_CLK_CTRL \
++ GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
++
++#define GG82563_PHY_MAC_SPEC_CTRL \
++ GG82563_REG(2, 21) /* MAC Specific Control Register */
++#define GG82563_PHY_MAC_SPEC_CTRL_2 \
++ GG82563_REG(2, 26) /* MAC Specific Control 2 */
++
++#define GG82563_PHY_DSP_DISTANCE \
++ GG82563_REG(5, 26) /* DSP Distance */
++
++/* Page 193 - Port Control Registers */
++#define GG82563_PHY_KMRN_MODE_CTRL \
++ GG82563_REG(193, 16) /* Kumeran Mode Control */
++#define GG82563_PHY_PORT_RESET \
++ GG82563_REG(193, 17) /* Port Reset */
++#define GG82563_PHY_REVISION_ID \
++ GG82563_REG(193, 18) /* Revision ID */
++#define GG82563_PHY_DEVICE_ID \
++ GG82563_REG(193, 19) /* Device ID */
++#define GG82563_PHY_PWR_MGMT_CTRL \
++ GG82563_REG(193, 20) /* Power Management Control */
++#define GG82563_PHY_RATE_ADAPT_CTRL \
++ GG82563_REG(193, 25) /* Rate Adaptation Control */
++
++/* Page 194 - KMRN Registers */
++#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
++ GG82563_REG(194, 16) /* FIFO's Control/Status */
++#define GG82563_PHY_KMRN_CTRL \
++ GG82563_REG(194, 17) /* Control */
++#define GG82563_PHY_INBAND_CTRL \
++ GG82563_REG(194, 18) /* Inband Control */
++#define GG82563_PHY_KMRN_DIAGNOSTIC \
++ GG82563_REG(194, 19) /* Diagnostic */
++#define GG82563_PHY_ACK_TIMEOUTS \
++ GG82563_REG(194, 20) /* Acknowledge Timeouts */
++#define GG82563_PHY_ADV_ABILITY \
++ GG82563_REG(194, 21) /* Advertised Ability */
++#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
++ GG82563_REG(194, 23) /* Link Partner Advertised Ability */
++#define GG82563_PHY_ADV_NEXT_PAGE \
++ GG82563_REG(194, 24) /* Advertised Next Page */
++#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
++ GG82563_REG(194, 25) /* Link Partner Advertised Next page */
++#define GG82563_PHY_KMRN_MISC \
++ GG82563_REG(194, 26) /* Misc. */
++
++/* MDI Control */
++#define E1000_MDIC_DATA_MASK 0x0000FFFF
++#define E1000_MDIC_REG_MASK 0x001F0000
++#define E1000_MDIC_REG_SHIFT 16
++#define E1000_MDIC_PHY_MASK 0x03E00000
++#define E1000_MDIC_PHY_SHIFT 21
++#define E1000_MDIC_OP_WRITE 0x04000000
++#define E1000_MDIC_OP_READ 0x08000000
++#define E1000_MDIC_READY 0x10000000
++#define E1000_MDIC_INT_EN 0x20000000
++#define E1000_MDIC_ERROR 0x40000000
++
++/* SerDes Control */
++#define E1000_GEN_CTL_READY 0x80000000
++#define E1000_GEN_CTL_ADDRESS_SHIFT 8
++#define E1000_GEN_POLL_TIMEOUT 640
++
++/* LinkSec register fields */
++#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000
++#define E1000_LSECTXCAP_SUM_SHIFT 16
++#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000
++#define E1000_LSECRXCAP_SUM_SHIFT 16
++
++#define E1000_LSECTXCTRL_EN_MASK 0x00000003
++#define E1000_LSECTXCTRL_DISABLE 0x0
++#define E1000_LSECTXCTRL_AUTH 0x1
++#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2
++#define E1000_LSECTXCTRL_AISCI 0x00000020
++#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
++#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8
++
++#define E1000_LSECRXCTRL_EN_MASK 0x0000000C
++#define E1000_LSECRXCTRL_EN_SHIFT 2
++#define E1000_LSECRXCTRL_DISABLE 0x0
++#define E1000_LSECRXCTRL_CHECK 0x1
++#define E1000_LSECRXCTRL_STRICT 0x2
++#define E1000_LSECRXCTRL_DROP 0x3
++#define E1000_LSECRXCTRL_PLSH 0x00000040
++#define E1000_LSECRXCTRL_RP 0x00000080
++#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33
++
++
++
++#endif /* _E1000_DEFINES_H_ */
+Index: linux-2.6.22/drivers/net/igb/e1000_hw.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_hw.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,692 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_HW_H_
++#define _E1000_HW_H_
++
++#include "e1000_osdep.h"
++#include "e1000_regs.h"
++#include "e1000_defines.h"
++
++struct e1000_hw;
++
++#define E1000_DEV_ID_82576 0x10C9
++#define E1000_DEV_ID_82576_FIBER 0x10E6
++#define E1000_DEV_ID_82576_SERDES 0x10E7
++#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
++#define E1000_DEV_ID_82576_NS 0x150A
++#define E1000_DEV_ID_82576_NS_SERDES 0x1518
++#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
++#define E1000_DEV_ID_82575EB_COPPER 0x10A7
++#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
++#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
++#define E1000_REVISION_0 0
++#define E1000_REVISION_1 1
++#define E1000_REVISION_2 2
++#define E1000_REVISION_3 3
++#define E1000_REVISION_4 4
++
++#define E1000_FUNC_0 0
++#define E1000_FUNC_1 1
++
++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
++
++enum e1000_mac_type {
++ e1000_undefined = 0,
++ e1000_82575,
++ e1000_82576,
++ e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
++};
++
++enum e1000_media_type {
++ e1000_media_type_unknown = 0,
++ e1000_media_type_copper = 1,
++ e1000_media_type_fiber = 2,
++ e1000_media_type_internal_serdes = 3,
++ e1000_num_media_types
++};
++
++enum e1000_nvm_type {
++ e1000_nvm_unknown = 0,
++ e1000_nvm_none,
++ e1000_nvm_eeprom_spi,
++ e1000_nvm_flash_hw,
++ e1000_nvm_flash_sw
++};
++
++enum e1000_nvm_override {
++ e1000_nvm_override_none = 0,
++ e1000_nvm_override_spi_small,
++ e1000_nvm_override_spi_large,
++};
++
++enum e1000_phy_type {
++ e1000_phy_unknown = 0,
++ e1000_phy_none,
++ e1000_phy_m88,
++ e1000_phy_igp,
++ e1000_phy_igp_2,
++ e1000_phy_gg82563,
++ e1000_phy_igp_3,
++ e1000_phy_ife,
++ e1000_phy_vf,
++};
++
++enum e1000_bus_type {
++ e1000_bus_type_unknown = 0,
++ e1000_bus_type_pci,
++ e1000_bus_type_pcix,
++ e1000_bus_type_pci_express,
++ e1000_bus_type_reserved
++};
++
++enum e1000_bus_speed {
++ e1000_bus_speed_unknown = 0,
++ e1000_bus_speed_33,
++ e1000_bus_speed_66,
++ e1000_bus_speed_100,
++ e1000_bus_speed_120,
++ e1000_bus_speed_133,
++ e1000_bus_speed_2500,
++ e1000_bus_speed_5000,
++ e1000_bus_speed_reserved
++};
++
++enum e1000_bus_width {
++ e1000_bus_width_unknown = 0,
++ e1000_bus_width_pcie_x1,
++ e1000_bus_width_pcie_x2,
++ e1000_bus_width_pcie_x4 = 4,
++ e1000_bus_width_pcie_x8 = 8,
++ e1000_bus_width_32,
++ e1000_bus_width_64,
++ e1000_bus_width_reserved
++};
++
++enum e1000_1000t_rx_status {
++ e1000_1000t_rx_status_not_ok = 0,
++ e1000_1000t_rx_status_ok,
++ e1000_1000t_rx_status_undefined = 0xFF
++};
++
++enum e1000_rev_polarity {
++ e1000_rev_polarity_normal = 0,
++ e1000_rev_polarity_reversed,
++ e1000_rev_polarity_undefined = 0xFF
++};
++
++enum e1000_fc_mode {
++ e1000_fc_none = 0,
++ e1000_fc_rx_pause,
++ e1000_fc_tx_pause,
++ e1000_fc_full,
++ e1000_fc_default = 0xFF
++};
++
++enum e1000_ms_type {
++ e1000_ms_hw_default = 0,
++ e1000_ms_force_master,
++ e1000_ms_force_slave,
++ e1000_ms_auto
++};
++
++enum e1000_smart_speed {
++ e1000_smart_speed_default = 0,
++ e1000_smart_speed_on,
++ e1000_smart_speed_off
++};
++
++enum e1000_serdes_link_state {
++ e1000_serdes_link_down = 0,
++ e1000_serdes_link_autoneg_progress,
++ e1000_serdes_link_autoneg_complete,
++ e1000_serdes_link_forced_up
++};
++
++/* Receive Descriptor */
++struct e1000_rx_desc {
++ __le64 buffer_addr; /* Address of the descriptor's data buffer */
++ __le16 length; /* Length of data DMAed into data buffer */
++ __le16 csum; /* Packet checksum */
++ u8 status; /* Descriptor status */
++ u8 errors; /* Descriptor Errors */
++ __le16 special;
++};
++
++/* Receive Descriptor - Extended */
++union e1000_rx_desc_extended {
++ struct {
++ __le64 buffer_addr;
++ __le64 reserved;
++ } read;
++ struct {
++ struct {
++ __le32 mrq; /* Multiple Rx Queues */
++ union {
++ __le32 rss; /* RSS Hash */
++ struct {
++ __le16 ip_id; /* IP id */
++ __le16 csum; /* Packet Checksum */
++ } csum_ip;
++ } hi_dword;
++ } lower;
++ struct {
++ __le32 status_error; /* ext status/error */
++ __le16 length;
++ __le16 vlan; /* VLAN tag */
++ } upper;
++ } wb; /* writeback */
++};
++
++#define MAX_PS_BUFFERS 4
++/* Receive Descriptor - Packet Split */
++union e1000_rx_desc_packet_split {
++ struct {
++ /* one buffer for protocol header(s), three data buffers */
++ __le64 buffer_addr[MAX_PS_BUFFERS];
++ } read;
++ struct {
++ struct {
++ __le32 mrq; /* Multiple Rx Queues */
++ union {
++ __le32 rss; /* RSS Hash */
++ struct {
++ __le16 ip_id; /* IP id */
++ __le16 csum; /* Packet Checksum */
++ } csum_ip;
++ } hi_dword;
++ } lower;
++ struct {
++ __le32 status_error; /* ext status/error */
++ __le16 length0; /* length of buffer 0 */
++ __le16 vlan; /* VLAN tag */
++ } middle;
++ struct {
++ __le16 header_status;
++ __le16 length[3]; /* length of buffers 1-3 */
++ } upper;
++ __le64 reserved;
++ } wb; /* writeback */
++};
++
++/* Transmit Descriptor */
++struct e1000_tx_desc {
++ __le64 buffer_addr; /* Address of the descriptor's data buffer */
++ union {
++ __le32 data;
++ struct {
++ __le16 length; /* Data buffer length */
++ u8 cso; /* Checksum offset */
++ u8 cmd; /* Descriptor control */
++ } flags;
++ } lower;
++ union {
++ __le32 data;
++ struct {
++ u8 status; /* Descriptor status */
++ u8 css; /* Checksum start */
++ __le16 special;
++ } fields;
++ } upper;
++};
++
++/* Offload Context Descriptor */
++struct e1000_context_desc {
++ union {
++ __le32 ip_config;
++ struct {
++ u8 ipcss; /* IP checksum start */
++ u8 ipcso; /* IP checksum offset */
++ __le16 ipcse; /* IP checksum end */
++ } ip_fields;
++ } lower_setup;
++ union {
++ __le32 tcp_config;
++ struct {
++ u8 tucss; /* TCP checksum start */
++ u8 tucso; /* TCP checksum offset */
++ __le16 tucse; /* TCP checksum end */
++ } tcp_fields;
++ } upper_setup;
++ __le32 cmd_and_length;
++ union {
++ __le32 data;
++ struct {
++ u8 status; /* Descriptor status */
++ u8 hdr_len; /* Header length */
++ __le16 mss; /* Maximum segment size */
++ } fields;
++ } tcp_seg_setup;
++};
++
++/* Offload data descriptor */
++struct e1000_data_desc {
++ __le64 buffer_addr; /* Address of the descriptor's buffer address */
++ union {
++ __le32 data;
++ struct {
++ __le16 length; /* Data buffer length */
++ u8 typ_len_ext;
++ u8 cmd;
++ } flags;
++ } lower;
++ union {
++ __le32 data;
++ struct {
++ u8 status; /* Descriptor status */
++ u8 popts; /* Packet Options */
++ __le16 special;
++ } fields;
++ } upper;
++};
++
++/* Statistics counters collected by the MAC */
++struct e1000_hw_stats {
++ u64 crcerrs;
++ u64 algnerrc;
++ u64 symerrs;
++ u64 rxerrc;
++ u64 mpc;
++ u64 scc;
++ u64 ecol;
++ u64 mcc;
++ u64 latecol;
++ u64 colc;
++ u64 dc;
++ u64 tncrs;
++ u64 sec;
++ u64 cexterr;
++ u64 rlec;
++ u64 xonrxc;
++ u64 xontxc;
++ u64 xoffrxc;
++ u64 xofftxc;
++ u64 fcruc;
++ u64 prc64;
++ u64 prc127;
++ u64 prc255;
++ u64 prc511;
++ u64 prc1023;
++ u64 prc1522;
++ u64 gprc;
++ u64 bprc;
++ u64 mprc;
++ u64 gptc;
++ u64 gorc;
++ u64 gotc;
++ u64 rnbc;
++ u64 ruc;
++ u64 rfc;
++ u64 roc;
++ u64 rjc;
++ u64 mgprc;
++ u64 mgpdc;
++ u64 mgptc;
++ u64 tor;
++ u64 tot;
++ u64 tpr;
++ u64 tpt;
++ u64 ptc64;
++ u64 ptc127;
++ u64 ptc255;
++ u64 ptc511;
++ u64 ptc1023;
++ u64 ptc1522;
++ u64 mptc;
++ u64 bptc;
++ u64 tsctc;
++ u64 tsctfc;
++ u64 iac;
++ u64 icrxptc;
++ u64 icrxatc;
++ u64 ictxptc;
++ u64 ictxatc;
++ u64 ictxqec;
++ u64 ictxqmtc;
++ u64 icrxdmtc;
++ u64 icrxoc;
++ u64 cbtmpc;
++ u64 htdpmc;
++ u64 cbrdpc;
++ u64 cbrmpc;
++ u64 rpthc;
++ u64 hgptc;
++ u64 htcbdpc;
++ u64 hgorc;
++ u64 hgotc;
++ u64 lenerrs;
++ u64 scvpc;
++ u64 hrmpc;
++ u64 doosync;
++};
++
++
++struct e1000_phy_stats {
++ u32 idle_errors;
++ u32 receive_errors;
++};
++
++struct e1000_host_mng_dhcp_cookie {
++ u32 signature;
++ u8 status;
++ u8 reserved0;
++ u16 vlan_id;
++ u32 reserved1;
++ u16 reserved2;
++ u8 reserved3;
++ u8 checksum;
++};
++
++/* Host Interface "Rev 1" */
++struct e1000_host_command_header {
++ u8 command_id;
++ u8 command_length;
++ u8 command_options;
++ u8 checksum;
++};
++
++#define E1000_HI_MAX_DATA_LENGTH 252
++struct e1000_host_command_info {
++ struct e1000_host_command_header command_header;
++ u8 command_data[E1000_HI_MAX_DATA_LENGTH];
++};
++
++/* Host Interface "Rev 2" */
++struct e1000_host_mng_command_header {
++ u8 command_id;
++ u8 checksum;
++ u16 reserved1;
++ u16 reserved2;
++ u16 command_length;
++};
++
++#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
++struct e1000_host_mng_command_info {
++ struct e1000_host_mng_command_header command_header;
++ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
++};
++
++#include "e1000_mac.h"
++#include "e1000_phy.h"
++#include "e1000_nvm.h"
++#include "e1000_manage.h"
++#include "e1000_mbx.h"
++
++struct e1000_mac_operations {
++ /* Function pointers for the MAC. */
++ s32 (*init_params)(struct e1000_hw *);
++ s32 (*id_led_init)(struct e1000_hw *);
++ s32 (*blink_led)(struct e1000_hw *);
++ s32 (*check_for_link)(struct e1000_hw *);
++ bool (*check_mng_mode)(struct e1000_hw *hw);
++ s32 (*cleanup_led)(struct e1000_hw *);
++ void (*clear_hw_cntrs)(struct e1000_hw *);
++ void (*clear_vfta)(struct e1000_hw *);
++ s32 (*get_bus_info)(struct e1000_hw *);
++ void (*set_lan_id)(struct e1000_hw *);
++ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
++ s32 (*led_on)(struct e1000_hw *);
++ s32 (*led_off)(struct e1000_hw *);
++ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
++ s32 (*reset_hw)(struct e1000_hw *);
++ s32 (*init_hw)(struct e1000_hw *);
++ void (*shutdown_serdes)(struct e1000_hw *);
++ s32 (*setup_link)(struct e1000_hw *);
++ s32 (*setup_physical_interface)(struct e1000_hw *);
++ s32 (*setup_led)(struct e1000_hw *);
++ void (*write_vfta)(struct e1000_hw *, u32, u32);
++ void (*mta_set)(struct e1000_hw *, u32);
++ void (*config_collision_dist)(struct e1000_hw *);
++ void (*rar_set)(struct e1000_hw *, u8*, u32);
++ s32 (*read_mac_addr)(struct e1000_hw *);
++ s32 (*validate_mdi_setting)(struct e1000_hw *);
++ s32 (*mng_host_if_write)(struct e1000_hw *, u8*, u16, u16, u8*);
++ s32 (*mng_write_cmd_header)(struct e1000_hw *hw,
++ struct e1000_host_mng_command_header*);
++ s32 (*mng_enable_host_if)(struct e1000_hw *);
++ s32 (*wait_autoneg)(struct e1000_hw *);
++};
++
++struct e1000_phy_operations {
++ s32 (*init_params)(struct e1000_hw *);
++ s32 (*acquire)(struct e1000_hw *);
++ s32 (*check_polarity)(struct e1000_hw *);
++ s32 (*check_reset_block)(struct e1000_hw *);
++ s32 (*commit)(struct e1000_hw *);
++ s32 (*force_speed_duplex)(struct e1000_hw *);
++ s32 (*get_cfg_done)(struct e1000_hw *hw);
++ s32 (*get_cable_length)(struct e1000_hw *);
++ s32 (*get_info)(struct e1000_hw *);
++ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
++ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
++ void (*release)(struct e1000_hw *);
++ s32 (*reset)(struct e1000_hw *);
++ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
++ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
++ s32 (*write_reg)(struct e1000_hw *, u32, u16);
++ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
++ void (*power_up)(struct e1000_hw *);
++ void (*power_down)(struct e1000_hw *);
++};
++
++struct e1000_nvm_operations {
++ s32 (*init_params)(struct e1000_hw *);
++ s32 (*acquire)(struct e1000_hw *);
++ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
++ void (*release)(struct e1000_hw *);
++ void (*reload)(struct e1000_hw *);
++ s32 (*update)(struct e1000_hw *);
++ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
++ s32 (*validate)(struct e1000_hw *);
++ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
++};
++
++struct e1000_mac_info {
++ struct e1000_mac_operations ops;
++ u8 addr[6];
++ u8 perm_addr[6];
++
++ enum e1000_mac_type type;
++
++ u32 collision_delta;
++ u32 ledctl_default;
++ u32 ledctl_mode1;
++ u32 ledctl_mode2;
++ u32 mc_filter_type;
++ u32 tx_packet_delta;
++ u32 txcw;
++
++ u16 current_ifs_val;
++ u16 ifs_max_val;
++ u16 ifs_min_val;
++ u16 ifs_ratio;
++ u16 ifs_step_size;
++ u16 mta_reg_count;
++ u16 uta_reg_count;
++
++ /* Maximum size of the MTA register table in all supported adapters */
++ #define MAX_MTA_REG 128
++ u32 mta_shadow[MAX_MTA_REG];
++ u16 rar_entry_count;
++
++ u8 forced_speed_duplex;
++
++ bool adaptive_ifs;
++ bool arc_subsystem_valid;
++ bool asf_firmware_present;
++ bool autoneg;
++ bool autoneg_failed;
++ bool get_link_status;
++ bool in_ifs_mode;
++ enum e1000_serdes_link_state serdes_link_state;
++ bool serdes_has_link;
++ bool tx_pkt_filtering;
++};
++
++struct e1000_phy_info {
++ struct e1000_phy_operations ops;
++ enum e1000_phy_type type;
++
++ enum e1000_1000t_rx_status local_rx;
++ enum e1000_1000t_rx_status remote_rx;
++ enum e1000_ms_type ms_type;
++ enum e1000_ms_type original_ms_type;
++ enum e1000_rev_polarity cable_polarity;
++ enum e1000_smart_speed smart_speed;
++
++ u32 addr;
++ u32 id;
++ u32 reset_delay_us; /* in usec */
++ u32 revision;
++
++ enum e1000_media_type media_type;
++
++ u16 autoneg_advertised;
++ u16 autoneg_mask;
++ u16 cable_length;
++ u16 max_cable_length;
++ u16 min_cable_length;
++
++ u8 mdix;
++
++ bool disable_polarity_correction;
++ bool is_mdix;
++ bool polarity_correction;
++ bool reset_disable;
++ bool speed_downgraded;
++ bool autoneg_wait_to_complete;
++};
++
++struct e1000_nvm_info {
++ struct e1000_nvm_operations ops;
++ enum e1000_nvm_type type;
++ enum e1000_nvm_override override;
++
++ u32 flash_bank_size;
++ u32 flash_base_addr;
++
++ u16 word_size;
++ u16 delay_usec;
++ u16 address_bits;
++ u16 opcode_bits;
++ u16 page_size;
++};
++
++struct e1000_bus_info {
++ enum e1000_bus_type type;
++ enum e1000_bus_speed speed;
++ enum e1000_bus_width width;
++
++ u16 func;
++ u16 pci_cmd_word;
++};
++
++struct e1000_fc_info {
++ u32 high_water; /* Flow control high-water mark */
++ u32 low_water; /* Flow control low-water mark */
++ u16 pause_time; /* Flow control pause timer */
++ bool send_xon; /* Flow control send XON */
++ bool strict_ieee; /* Strict IEEE mode */
++ enum e1000_fc_mode current_mode; /* FC mode in effect */
++ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
++};
++
++struct e1000_mbx_operations {
++ s32 (*init_params)(struct e1000_hw *hw);
++ s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
++ s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
++ s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
++ s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
++ s32 (*check_for_msg)(struct e1000_hw *, u16);
++ s32 (*check_for_ack)(struct e1000_hw *, u16);
++ s32 (*check_for_rst)(struct e1000_hw *, u16);
++};
++
++struct e1000_mbx_stats {
++ u32 msgs_tx;
++ u32 msgs_rx;
++
++ u32 acks;
++ u32 reqs;
++ u32 rsts;
++};
++
++struct e1000_mbx_info {
++ struct e1000_mbx_operations ops;
++ struct e1000_mbx_stats stats;
++ u32 timeout;
++ u32 usec_delay;
++ u16 size;
++};
++
++struct e1000_dev_spec_82575 {
++ bool sgmii_active;
++ bool global_device_reset;
++};
++
++struct e1000_dev_spec_vf {
++ u32 vf_number;
++ u32 v2p_mailbox;
++};
++
++
++struct e1000_hw {
++ void *back;
++
++ u8 __iomem *hw_addr;
++ u8 __iomem *flash_address;
++ unsigned long io_base;
++
++ struct e1000_mac_info mac;
++ struct e1000_fc_info fc;
++ struct e1000_phy_info phy;
++ struct e1000_nvm_info nvm;
++ struct e1000_bus_info bus;
++ struct e1000_mbx_info mbx;
++ struct e1000_host_mng_dhcp_cookie mng_cookie;
++
++ union {
++ struct e1000_dev_spec_82575 _82575;
++ struct e1000_dev_spec_vf vf;
++ } dev_spec;
++
++ u16 device_id;
++ u16 subsystem_vendor_id;
++ u16 subsystem_device_id;
++ u16 vendor_id;
++
++ u8 revision_id;
++};
++
++#include "e1000_82575.h"
++
++/* These functions must be implemented by drivers */
++s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
++s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
++
++#endif
+Index: linux-2.6.22/drivers/net/igb/e1000_mac.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_mac.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,1985 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "e1000_api.h"
++
++static s32 e1000_set_default_fc_generic(struct e1000_hw *hw);
++static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw);
++static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw);
++static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
++static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
++
++/**
++ * e1000_init_mac_ops_generic - Initialize MAC function pointers
++ * @hw: pointer to the HW structure
++ *
++ * Setups up the function pointers to no-op functions
++ **/
++void e1000_init_mac_ops_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ DEBUGFUNC("e1000_init_mac_ops_generic");
++
++ /* General Setup */
++ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
++ mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
++ mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
++ /* LINK */
++ mac->ops.wait_autoneg = e1000_wait_autoneg_generic;
++ /* Management */
++ mac->ops.mng_host_if_write = e1000_mng_host_if_write_generic;
++ mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header_generic;
++ mac->ops.mng_enable_host_if = e1000_mng_enable_host_if_generic;
++ /* VLAN, MC, etc. */
++ mac->ops.rar_set = e1000_rar_set_generic;
++ mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
++}
++
++/**
++ * e1000_get_bus_info_pcie_generic - Get PCIe bus information
++ * @hw: pointer to the HW structure
++ *
++ * Determines and stores the system bus information for a particular
++ * network interface. The following bus information is determined and stored:
++ * bus speed, bus width, type (PCIe), and PCIe function.
++ **/
++s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ struct e1000_bus_info *bus = &hw->bus;
++
++ s32 ret_val;
++ u16 pcie_link_status;
++
++ DEBUGFUNC("e1000_get_bus_info_pcie_generic");
++
++ bus->type = e1000_bus_type_pci_express;
++ bus->speed = e1000_bus_speed_2500;
++
++ ret_val = e1000_read_pcie_cap_reg(hw,
++ PCIE_LINK_STATUS,
++ &pcie_link_status);
++ if (ret_val)
++ bus->width = e1000_bus_width_unknown;
++ else
++ bus->width = (enum e1000_bus_width)((pcie_link_status &
++ PCIE_LINK_WIDTH_MASK) >>
++ PCIE_LINK_WIDTH_SHIFT);
++
++ mac->ops.set_lan_id(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
++ *
++ * @hw: pointer to the HW structure
++ *
++ * Determines the LAN function id by reading memory-mapped registers
++ * and swaps the port value if requested.
++ **/
++static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
++{
++ struct e1000_bus_info *bus = &hw->bus;
++ u32 reg;
++
++ /*
++ * The status register reports the correct function number
++ * for the device regardless of function swap state.
++ */
++ reg = E1000_READ_REG(hw, E1000_STATUS);
++ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
++}
++
++/**
++ * e1000_set_lan_id_single_port - Set LAN id for a single port device
++ * @hw: pointer to the HW structure
++ *
++ * Sets the LAN function id to zero for a single port device.
++ **/
++void e1000_set_lan_id_single_port(struct e1000_hw *hw)
++{
++ struct e1000_bus_info *bus = &hw->bus;
++
++ bus->func = 0;
++}
++
++/**
++ * e1000_clear_vfta_generic - Clear VLAN filter table
++ * @hw: pointer to the HW structure
++ *
++ * Clears the register array which contains the VLAN filter table by
++ * setting all the values to 0.
++ **/
++void e1000_clear_vfta_generic(struct e1000_hw *hw)
++{
++ u32 offset;
++
++ DEBUGFUNC("e1000_clear_vfta_generic");
++
++ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
++ E1000_WRITE_FLUSH(hw);
++ }
++}
++
++/**
++ * e1000_write_vfta_generic - Write value to VLAN filter table
++ * @hw: pointer to the HW structure
++ * @offset: register offset in VLAN filter table
++ * @value: register value written to VLAN filter table
++ *
++ * Writes value at the given offset in the register array which stores
++ * the VLAN filter table.
++ **/
++void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
++{
++ DEBUGFUNC("e1000_write_vfta_generic");
++
++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
++ E1000_WRITE_FLUSH(hw);
++}
++
++/**
++ * e1000_init_rx_addrs_generic - Initialize receive address's
++ * @hw: pointer to the HW structure
++ * @rar_count: receive address registers
++ *
++ * Setups the receive address registers by setting the base receive address
++ * register to the devices MAC address and clearing all the other receive
++ * address registers to 0.
++ **/
++void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
++{
++ u32 i;
++ u8 mac_addr[ETH_ADDR_LEN] = {0};
++
++ DEBUGFUNC("e1000_init_rx_addrs_generic");
++
++ /* Setup the receive address */
++ DEBUGOUT("Programming MAC Address into RAR[0]\n");
++
++ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
++
++ /* Zero out the other (rar_entry_count - 1) receive addresses */
++ DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
++ for (i = 1; i < rar_count; i++)
++ hw->mac.ops.rar_set(hw, mac_addr, i);
++}
++
++/**
++ * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
++ * @hw: pointer to the HW structure
++ *
++ * Checks the nvm for an alternate MAC address. An alternate MAC address
++ * can be setup by pre-boot software and must be treated like a permanent
++ * address and must override the actual permanent MAC address. If an
++ * alternate MAC address is found it is programmed into RAR0, replacing
++ * the permanent address that was installed into RAR0 by the Si on reset.
++ * This function will return SUCCESS unless it encounters an error while
++ * reading the EEPROM.
++ **/
++s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
++{
++ u32 i;
++ s32 ret_val = E1000_SUCCESS;
++ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
++ u8 alt_mac_addr[ETH_ADDR_LEN];
++
++ DEBUGFUNC("e1000_check_alt_mac_addr_generic");
++
++ ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
++ &nvm_alt_mac_addr_offset);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ goto out;
++ }
++
++ if (nvm_alt_mac_addr_offset == 0xFFFF) {
++ /* There is no Alternate MAC Address */
++ goto out;
++ }
++
++ if (hw->bus.func == E1000_FUNC_1)
++ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
++ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
++ offset = nvm_alt_mac_addr_offset + (i >> 1);
++ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ goto out;
++ }
++
++ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
++ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
++ }
++
++ /* if multicast bit is set, the alternate address will not be used */
++ if (alt_mac_addr[0] & 0x01) {
++ DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
++ goto out;
++ }
++
++ /*
++ * We have a valid alternate MAC address, and we want to treat it the
++ * same as the normal permanent MAC address stored by the HW into the
++ * RAR. Do this by mapping this address into RAR0.
++ */
++ hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_rar_set_generic - Set receive address register
++ * @hw: pointer to the HW structure
++ * @addr: pointer to the receive address
++ * @index: receive address array register
++ *
++ * Sets the receive address array register at index to the address passed
++ * in by addr.
++ **/
++void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
++{
++ u32 rar_low, rar_high;
++
++ DEBUGFUNC("e1000_rar_set_generic");
++
++ /*
++ * HW expects these in little endian so we reverse the byte order
++ * from network order (big endian) to little endian
++ */
++ rar_low = ((u32) addr[0] |
++ ((u32) addr[1] << 8) |
++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
++
++ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
++
++ /* If MAC address zero, no need to set the AV bit */
++ if (rar_low || rar_high)
++ rar_high |= E1000_RAH_AV;
++
++ /*
++ * Some bridges will combine consecutive 32-bit writes into
++ * a single burst write, which will malfunction on some parts.
++ * The flushes avoid this.
++ */
++ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
++ E1000_WRITE_FLUSH(hw);
++ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
++ E1000_WRITE_FLUSH(hw);
++}
++
++/**
++ * e1000_mta_set_generic - Set multicast filter table address
++ * @hw: pointer to the HW structure
++ * @hash_value: determines the MTA register and bit to set
++ *
++ * The multicast table address is a register array of 32-bit registers.
++ * The hash_value is used to determine what register the bit is in, the
++ * current value is read, the new bit is OR'd in and the new value is
++ * written back into the register.
++ **/
++void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value)
++{
++ u32 hash_bit, hash_reg, mta;
++
++ DEBUGFUNC("e1000_mta_set_generic");
++ /*
++ * The MTA is a register array of 32-bit registers. It is
++ * treated like an array of (32*mta_reg_count) bits. We want to
++ * set bit BitArray[hash_value]. So we figure out what register
++ * the bit is in, read it, OR in the new bit, then write
++ * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
++ * mask to bits 31:5 of the hash value which gives us the
++ * register we're modifying. The hash bit within that register
++ * is determined by the lower 5 bits of the hash value.
++ */
++ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
++ hash_bit = hash_value & 0x1F;
++
++ mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
++
++ mta |= (1 << hash_bit);
++
++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
++ E1000_WRITE_FLUSH(hw);
++}
++
++/**
++ * e1000_update_mc_addr_list_generic - Update Multicast addresses
++ * @hw: pointer to the HW structure
++ * @mc_addr_list: array of multicast addresses to program
++ * @mc_addr_count: number of multicast addresses to program
++ *
++ * Updates entire Multicast Table Array.
++ * The caller must have a packed mc_addr_list of multicast addresses.
++ **/
++void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
++ u8 *mc_addr_list, u32 mc_addr_count)
++{
++ u32 hash_value, hash_bit, hash_reg;
++ int i;
++
++ DEBUGFUNC("e1000_update_mc_addr_list_generic");
++
++ /* clear mta_shadow */
++ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
++
++ /* update mta_shadow from mc_addr_list */
++ for (i = 0; (u32) i < mc_addr_count; i++) {
++ hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
++
++ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
++ hash_bit = hash_value & 0x1F;
++
++ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
++ mc_addr_list += (ETH_ADDR_LEN);
++ }
++
++ /* replace the entire MTA table */
++ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
++ E1000_WRITE_FLUSH(hw);
++}
++
++/**
++ * e1000_hash_mc_addr_generic - Generate a multicast hash value
++ * @hw: pointer to the HW structure
++ * @mc_addr: pointer to a multicast address
++ *
++ * Generates a multicast address hash value which is used to determine
++ * the multicast filter table array address and new table value. See
++ * e1000_mta_set_generic()
++ **/
++u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
++{
++ u32 hash_value, hash_mask;
++ u8 bit_shift = 0;
++
++ DEBUGFUNC("e1000_hash_mc_addr_generic");
++
++ /* Register count multiplied by bits per register */
++ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
++
++ /*
++ * For a mc_filter_type of 0, bit_shift is the number of left-shifts
++ * where 0xFF would still fall within the hash mask.
++ */
++ while (hash_mask >> bit_shift != 0xFF)
++ bit_shift++;
++
++ /*
++ * The portion of the address that is used for the hash table
++ * is determined by the mc_filter_type setting.
++ * The algorithm is such that there is a total of 8 bits of shifting.
++ * The bit_shift for a mc_filter_type of 0 represents the number of
++ * left-shifts where the MSB of mc_addr[5] would still fall within
++ * the hash_mask. Case 0 does this exactly. Since there are a total
++ * of 8 bits of shifting, then mc_addr[4] will shift right the
++ * remaining number of bits. Thus 8 - bit_shift. The rest of the
++ * cases are a variation of this algorithm...essentially raising the
++ * number of bits to shift mc_addr[5] left, while still keeping the
++ * 8-bit shifting total.
++ *
++ * For example, given the following Destination MAC Address and an
++ * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
++ * we can see that the bit_shift for case 0 is 4. These are the hash
++ * values resulting from each mc_filter_type...
++ * [0] [1] [2] [3] [4] [5]
++ * 01 AA 00 12 34 56
++ * LSB MSB
++ *
++ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
++ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
++ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
++ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
++ */
++ switch (hw->mac.mc_filter_type) {
++ default:
++ case 0:
++ break;
++ case 1:
++ bit_shift += 1;
++ break;
++ case 2:
++ bit_shift += 2;
++ break;
++ case 3:
++ bit_shift += 4;
++ break;
++ }
++
++ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
++ (((u16) mc_addr[5]) << bit_shift)));
++
++ return hash_value;
++}
++
++/**
++ * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
++ * @hw: pointer to the HW structure
++ *
++ * Clears the base hardware counters by reading the counter registers.
++ **/
++void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
++{
++ DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
++
++ E1000_READ_REG(hw, E1000_CRCERRS);
++ E1000_READ_REG(hw, E1000_SYMERRS);
++ E1000_READ_REG(hw, E1000_MPC);
++ E1000_READ_REG(hw, E1000_SCC);
++ E1000_READ_REG(hw, E1000_ECOL);
++ E1000_READ_REG(hw, E1000_MCC);
++ E1000_READ_REG(hw, E1000_LATECOL);
++ E1000_READ_REG(hw, E1000_COLC);
++ E1000_READ_REG(hw, E1000_DC);
++ E1000_READ_REG(hw, E1000_SEC);
++ E1000_READ_REG(hw, E1000_RLEC);
++ E1000_READ_REG(hw, E1000_XONRXC);
++ E1000_READ_REG(hw, E1000_XONTXC);
++ E1000_READ_REG(hw, E1000_XOFFRXC);
++ E1000_READ_REG(hw, E1000_XOFFTXC);
++ E1000_READ_REG(hw, E1000_FCRUC);
++ E1000_READ_REG(hw, E1000_GPRC);
++ E1000_READ_REG(hw, E1000_BPRC);
++ E1000_READ_REG(hw, E1000_MPRC);
++ E1000_READ_REG(hw, E1000_GPTC);
++ E1000_READ_REG(hw, E1000_GORCL);
++ E1000_READ_REG(hw, E1000_GORCH);
++ E1000_READ_REG(hw, E1000_GOTCL);
++ E1000_READ_REG(hw, E1000_GOTCH);
++ E1000_READ_REG(hw, E1000_RNBC);
++ E1000_READ_REG(hw, E1000_RUC);
++ E1000_READ_REG(hw, E1000_RFC);
++ E1000_READ_REG(hw, E1000_ROC);
++ E1000_READ_REG(hw, E1000_RJC);
++ E1000_READ_REG(hw, E1000_TORL);
++ E1000_READ_REG(hw, E1000_TORH);
++ E1000_READ_REG(hw, E1000_TOTL);
++ E1000_READ_REG(hw, E1000_TOTH);
++ E1000_READ_REG(hw, E1000_TPR);
++ E1000_READ_REG(hw, E1000_TPT);
++ E1000_READ_REG(hw, E1000_MPTC);
++ E1000_READ_REG(hw, E1000_BPTC);
++}
++
++/**
++ * e1000_check_for_copper_link_generic - Check for link (Copper)
++ * @hw: pointer to the HW structure
++ *
++ * Checks to see of the link status of the hardware has changed. If a
++ * change in link status has been detected, then we read the PHY registers
++ * to get the current speed/duplex if link exists.
++ **/
++s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ s32 ret_val;
++ bool link;
++
++ DEBUGFUNC("e1000_check_for_copper_link");
++
++ /*
++ * We only want to go out to the PHY registers to see if Auto-Neg
++ * has completed and/or if our link status has changed. The
++ * get_link_status flag is set upon receiving a Link Status
++ * Change or Rx Sequence Error interrupt.
++ */
++ if (!mac->get_link_status) {
++ ret_val = E1000_SUCCESS;
++ goto out;
++ }
++
++ /*
++ * First we want to see if the MII Status Register reports
++ * link. If so, then we want to get the current speed/duplex
++ * of the PHY.
++ */
++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
++ if (ret_val)
++ goto out;
++
++ if (!link)
++ goto out; /* No link detected */
++
++ mac->get_link_status = false;
++
++ /*
++ * Check if there was DownShift, must be checked
++ * immediately after link-up
++ */
++ e1000_check_downshift_generic(hw);
++
++ /*
++ * If we are forcing speed/duplex, then we simply return since
++ * we have already determined whether we have link or not.
++ */
++ if (!mac->autoneg) {
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ }
++
++ /*
++ * Auto-Neg is enabled. Auto Speed Detection takes care
++ * of MAC speed/duplex configuration. So we only need to
++ * configure Collision Distance in the MAC.
++ */
++ e1000_config_collision_dist_generic(hw);
++
++ /*
++ * Configure Flow Control now that Auto-Neg has completed.
++ * First, we need to restore the desired flow control
++ * settings because we may have had to re-autoneg with a
++ * different link partner.
++ */
++ ret_val = e1000_config_fc_after_link_up_generic(hw);
++ if (ret_val)
++ DEBUGOUT("Error configuring flow control\n");
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_check_for_fiber_link_generic - Check for link (Fiber)
++ * @hw: pointer to the HW structure
++ *
++ * Checks for link up on the hardware. If link is not up and we have
++ * a signal, then we need to force link up.
++ **/
++s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ u32 rxcw;
++ u32 ctrl;
++ u32 status;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_check_for_fiber_link_generic");
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ rxcw = E1000_READ_REG(hw, E1000_RXCW);
++
++ /*
++ * If we don't have link (auto-negotiation failed or link partner
++ * cannot auto-negotiate), the cable is plugged in (we have signal),
++ * and our link partner is not trying to auto-negotiate with us (we
++ * are receiving idles or data), we need to force link up. We also
++ * need to give auto-negotiation time to complete, in case the cable
++ * was just plugged in. The autoneg_failed flag does this.
++ */
++ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
++ if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
++ (!(rxcw & E1000_RXCW_C))) {
++ if (mac->autoneg_failed == 0) {
++ mac->autoneg_failed = 1;
++ goto out;
++ }
++ DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
++
++ /* Disable auto-negotiation in the TXCW register */
++ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
++
++ /* Force link-up and also force full-duplex. */
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++
++ /* Configure Flow Control after forcing link up. */
++ ret_val = e1000_config_fc_after_link_up_generic(hw);
++ if (ret_val) {
++ DEBUGOUT("Error configuring flow control\n");
++ goto out;
++ }
++ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
++ /*
++ * If we are forcing link and we are receiving /C/ ordered
++ * sets, re-enable auto-negotiation in the TXCW register
++ * and disable forced link in the Device Control register
++ * in an attempt to auto-negotiate with our link partner.
++ */
++ DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
++ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
++ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
++
++ mac->serdes_has_link = true;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_check_for_serdes_link_generic - Check for link (Serdes)
++ * @hw: pointer to the HW structure
++ *
++ * Checks for link up on the hardware. If link is not up and we have
++ * a signal, then we need to force link up.
++ **/
++s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ u32 rxcw;
++ u32 ctrl;
++ u32 status;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_check_for_serdes_link_generic");
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ rxcw = E1000_READ_REG(hw, E1000_RXCW);
++
++ /*
++ * If we don't have link (auto-negotiation failed or link partner
++ * cannot auto-negotiate), and our link partner is not trying to
++ * auto-negotiate with us (we are receiving idles or data),
++ * we need to force link up. We also need to give auto-negotiation
++ * time to complete.
++ */
++ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
++ if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
++ if (mac->autoneg_failed == 0) {
++ mac->autoneg_failed = 1;
++ goto out;
++ }
++ DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
++
++ /* Disable auto-negotiation in the TXCW register */
++ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
++
++ /* Force link-up and also force full-duplex. */
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++
++ /* Configure Flow Control after forcing link up. */
++ ret_val = e1000_config_fc_after_link_up_generic(hw);
++ if (ret_val) {
++ DEBUGOUT("Error configuring flow control\n");
++ goto out;
++ }
++ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
++ /*
++ * If we are forcing link and we are receiving /C/ ordered
++ * sets, re-enable auto-negotiation in the TXCW register
++ * and disable forced link in the Device Control register
++ * in an attempt to auto-negotiate with our link partner.
++ */
++ DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
++ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
++ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
++
++ mac->serdes_has_link = true;
++ } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
++ /*
++ * If we force link for non-auto-negotiation switch, check
++ * link status based on MAC synchronization for internal
++ * serdes media type.
++ */
++ /* SYNCH bit and IV bit are sticky. */
++ usec_delay(10);
++ rxcw = E1000_READ_REG(hw, E1000_RXCW);
++ if (rxcw & E1000_RXCW_SYNCH) {
++ if (!(rxcw & E1000_RXCW_IV)) {
++ mac->serdes_has_link = true;
++ DEBUGOUT("SERDES: Link up - forced.\n");
++ }
++ } else {
++ mac->serdes_has_link = false;
++ DEBUGOUT("SERDES: Link down - force failed.\n");
++ }
++ }
++
++ if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ if (status & E1000_STATUS_LU) {
++ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
++ usec_delay(10);
++ rxcw = E1000_READ_REG(hw, E1000_RXCW);
++ if (rxcw & E1000_RXCW_SYNCH) {
++ if (!(rxcw & E1000_RXCW_IV)) {
++ mac->serdes_has_link = true;
++ DEBUGOUT("SERDES: Link up - autoneg "
++ "completed sucessfully.\n");
++ } else {
++ mac->serdes_has_link = false;
++ DEBUGOUT("SERDES: Link down - invalid"
++ "codewords detected in autoneg.\n");
++ }
++ } else {
++ mac->serdes_has_link = false;
++ DEBUGOUT("SERDES: Link down - no sync.\n");
++ }
++ } else {
++ mac->serdes_has_link = false;
++ DEBUGOUT("SERDES: Link down - autoneg failed\n");
++ }
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_setup_link_generic - Setup flow control and link settings
++ * @hw: pointer to the HW structure
++ *
++ * Determines which flow control settings to use, then configures flow
++ * control. Calls the appropriate media-specific link configuration
++ * function. Assuming the adapter has a valid link partner, a valid link
++ * should be established. Assumes the hardware has previously been reset
++ * and the transmitter and receiver are not enabled.
++ **/
++s32 e1000_setup_link_generic(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_setup_link_generic");
++
++ /*
++ * In the case of the phy reset being blocked, we already have a link.
++ * We do not need to set it up again.
++ */
++ if (hw->phy.ops.check_reset_block)
++ if (hw->phy.ops.check_reset_block(hw))
++ goto out;
++
++ /*
++ * If requested flow control is set to default, set flow control
++ * based on the EEPROM flow control settings.
++ */
++ if (hw->fc.requested_mode == e1000_fc_default) {
++ ret_val = e1000_set_default_fc_generic(hw);
++ if (ret_val)
++ goto out;
++ }
++
++ /*
++ * Save off the requested flow control mode for use later. Depending
++ * on the link partner's capabilities, we may or may not use this mode.
++ */
++ hw->fc.current_mode = hw->fc.requested_mode;
++
++ DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
++ hw->fc.current_mode);
++
++ /* Call the necessary media_type subroutine to configure the link. */
++ ret_val = hw->mac.ops.setup_physical_interface(hw);
++ if (ret_val)
++ goto out;
++
++ /*
++ * Initialize the flow control address, type, and PAUSE timer
++ * registers to their default values. This is done even if flow
++ * control is disabled, because it does not hurt anything to
++ * initialize these registers.
++ */
++ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
++ E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
++ E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
++ E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
++
++ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
++
++ ret_val = e1000_set_fc_watermarks_generic(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
++ * @hw: pointer to the HW structure
++ *
++ * Configures collision distance and flow control for fiber and serdes
++ * links. Upon successful setup, poll for link.
++ **/
++s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
++{
++ u32 ctrl;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++
++ /* Take the link out of reset */
++ ctrl &= ~E1000_CTRL_LRST;
++
++ e1000_config_collision_dist_generic(hw);
++
++ ret_val = e1000_commit_fc_settings_generic(hw);
++ if (ret_val)
++ goto out;
++
++ /*
++ * Since auto-negotiation is enabled, take the link out of reset (the
++ * link will be in reset, because we previously reset the chip). This
++ * will restart auto-negotiation. If auto-negotiation is successful
++ * then the link-up status bit will be set and the flow control enable
++ * bits (RFCE and TFCE) will be set according to their negotiated value.
++ */
++ DEBUGOUT("Auto-negotiation enabled\n");
++
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++ E1000_WRITE_FLUSH(hw);
++ msec_delay(1);
++
++ /*
++ * For these adapters, the SW definable pin 1 is set when the optics
++ * detect a signal. If we have a signal, then poll for a "Link-Up"
++ * indication.
++ */
++ if (hw->phy.media_type == e1000_media_type_internal_serdes ||
++ (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
++ ret_val = e1000_poll_fiber_serdes_link_generic(hw);
++ } else {
++ DEBUGOUT("No signal detected\n");
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_config_collision_dist_generic - Configure collision distance
++ * @hw: pointer to the HW structure
++ *
++ * Configures the collision distance to the default value and is used
++ * during link setup. Currently no func pointer exists and all
++ * implementations are handled in the generic version of this function.
++ **/
++void e1000_config_collision_dist_generic(struct e1000_hw *hw)
++{
++ u32 tctl;
++
++ DEBUGFUNC("e1000_config_collision_dist_generic");
++
++ tctl = E1000_READ_REG(hw, E1000_TCTL);
++
++ tctl &= ~E1000_TCTL_COLD;
++ tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
++
++ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
++ E1000_WRITE_FLUSH(hw);
++}
++
++/**
++ * e1000_poll_fiber_serdes_link_generic - Poll for link up
++ * @hw: pointer to the HW structure
++ *
++ * Polls for link up by reading the status register, if link fails to come
++ * up with auto-negotiation, then the link is forced if a signal is detected.
++ **/
++static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ u32 i, status;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
++
++ /*
++ * If we have a signal (the cable is plugged in, or assumed true for
++ * serdes media) then poll for a "Link-Up" indication in the Device
++ * Status Register. Time-out if a link isn't seen in 500 milliseconds
++ * seconds (Auto-negotiation should complete in less than 500
++ * milliseconds even if the other end is doing it in SW).
++ */
++ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
++ msec_delay(10);
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ if (status & E1000_STATUS_LU)
++ break;
++ }
++ if (i == FIBER_LINK_UP_LIMIT) {
++ DEBUGOUT("Never got a valid link from auto-neg!!!\n");
++ mac->autoneg_failed = 1;
++ /*
++ * AutoNeg failed to achieve a link, so we'll call
++ * mac->check_for_link. This routine will force the
++ * link up if we detect a signal. This will allow us to
++ * communicate with non-autonegotiating link partners.
++ */
++ ret_val = hw->mac.ops.check_for_link(hw);
++ if (ret_val) {
++ DEBUGOUT("Error while checking for link\n");
++ goto out;
++ }
++ mac->autoneg_failed = 0;
++ } else {
++ mac->autoneg_failed = 0;
++ DEBUGOUT("Valid Link Found\n");
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_commit_fc_settings_generic - Configure flow control
++ * @hw: pointer to the HW structure
++ *
++ * Write the flow control settings to the Transmit Config Word Register (TXCW)
++ * base on the flow control settings in e1000_mac_info.
++ **/
++static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ u32 txcw;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_commit_fc_settings_generic");
++
++ /*
++ * Check for a software override of the flow control settings, and
++ * setup the device accordingly. If auto-negotiation is enabled, then
++ * software will have to set the "PAUSE" bits to the correct value in
++ * the Transmit Config Word Register (TXCW) and re-start auto-
++ * negotiation. However, if auto-negotiation is disabled, then
++ * software will have to manually configure the two flow control enable
++ * bits in the CTRL register.
++ *
++ * The possible values of the "fc" parameter are:
++ * 0: Flow control is completely disabled
++ * 1: Rx flow control is enabled (we can receive pause frames,
++ * but not send pause frames).
++ * 2: Tx flow control is enabled (we can send pause frames but we
++ * do not support receiving pause frames).
++ * 3: Both Rx and Tx flow control (symmetric) are enabled.
++ */
++ switch (hw->fc.current_mode) {
++ case e1000_fc_none:
++ /* Flow control completely disabled by a software over-ride. */
++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
++ break;
++ case e1000_fc_rx_pause:
++ /*
++ * Rx Flow control is enabled and Tx Flow control is disabled
++ * by a software over-ride. Since there really isn't a way to
++ * advertise that we are capable of Rx Pause ONLY, we will
++ * advertise that we support both symmetric and asymmetric RX
++ * PAUSE. Later, we will disable the adapter's ability to send
++ * PAUSE frames.
++ */
++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
++ break;
++ case e1000_fc_tx_pause:
++ /*
++ * Tx Flow control is enabled, and Rx Flow control is disabled,
++ * by a software over-ride.
++ */
++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
++ break;
++ case e1000_fc_full:
++ /*
++ * Flow control (both Rx and Tx) is enabled by a software
++ * over-ride.
++ */
++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
++ break;
++ default:
++ DEBUGOUT("Flow control param set incorrectly\n");
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ break;
++ }
++
++ E1000_WRITE_REG(hw, E1000_TXCW, txcw);
++ mac->txcw = txcw;
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
++ * @hw: pointer to the HW structure
++ *
++ * Sets the flow control high/low threshold (watermark) registers. If
++ * flow control XON frame transmission is enabled, then set XON frame
++ * transmission as well.
++ **/
++s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++ u32 fcrtl = 0, fcrth = 0;
++
++ DEBUGFUNC("e1000_set_fc_watermarks_generic");
++
++ /*
++ * Set the flow control receive threshold registers. Normally,
++ * these registers will be set to a default threshold that may be
++ * adjusted later by the driver's runtime code. However, if the
++ * ability to transmit pause frames is not enabled, then these
++ * registers will be set to 0.
++ */
++ if (hw->fc.current_mode & e1000_fc_tx_pause) {
++ /*
++ * We need to set up the Receive Threshold high and low water
++ * marks as well as (optionally) enabling the transmission of
++ * XON frames.
++ */
++ fcrtl = hw->fc.low_water;
++ if (hw->fc.send_xon)
++ fcrtl |= E1000_FCRTL_XONE;
++
++ fcrth = hw->fc.high_water;
++ }
++ E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
++ E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
++
++ return ret_val;
++}
++
++/**
++ * e1000_set_default_fc_generic - Set flow control default values
++ * @hw: pointer to the HW structure
++ *
++ * Read the EEPROM for the default values for flow control and store the
++ * values.
++ **/
++static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++ u16 nvm_data;
++
++ DEBUGFUNC("e1000_set_default_fc_generic");
++
++ /*
++ * Read and store word 0x0F of the EEPROM. This word contains bits
++ * that determine the hardware's default PAUSE (flow control) mode,
++ * a bit that determines whether the HW defaults to enabling or
++ * disabling auto-negotiation, and the direction of the
++ * SW defined pins. If there is no SW over-ride of the flow
++ * control setting, then the variable hw->fc will
++ * be initialized based on a value in the EEPROM.
++ */
++ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
++
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ goto out;
++ }
++
++ if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
++ hw->fc.requested_mode = e1000_fc_none;
++ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
++ NVM_WORD0F_ASM_DIR)
++ hw->fc.requested_mode = e1000_fc_tx_pause;
++ else
++ hw->fc.requested_mode = e1000_fc_full;
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_force_mac_fc_generic - Force the MAC's flow control settings
++ * @hw: pointer to the HW structure
++ *
++ * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
++ * device control register to reflect the adapter settings. TFCE and RFCE
++ * need to be explicitly set by software when a copper PHY is used because
++ * autonegotiation is managed by the PHY rather than the MAC. Software must
++ * also configure these bits when link is forced on a fiber connection.
++ **/
++s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
++{
++ u32 ctrl;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_force_mac_fc_generic");
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++
++ /*
++ * Because we didn't get link via the internal auto-negotiation
++ * mechanism (we either forced link or we got link via PHY
++ * auto-neg), we have to manually enable/disable transmit an
++ * receive flow control.
++ *
++ * The "Case" statement below enables/disable flow control
++ * according to the "hw->fc.current_mode" parameter.
++ *
++ * The possible values of the "fc" parameter are:
++ * 0: Flow control is completely disabled
++ * 1: Rx flow control is enabled (we can receive pause
++ * frames but not send pause frames).
++ * 2: Tx flow control is enabled (we can send pause frames
++ * frames but we do not receive pause frames).
++ * 3: Both Rx and Tx flow control (symmetric) is enabled.
++ * other: No other values should be possible at this point.
++ */
++ DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
++
++ switch (hw->fc.current_mode) {
++ case e1000_fc_none:
++ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
++ break;
++ case e1000_fc_rx_pause:
++ ctrl &= (~E1000_CTRL_TFCE);
++ ctrl |= E1000_CTRL_RFCE;
++ break;
++ case e1000_fc_tx_pause:
++ ctrl &= (~E1000_CTRL_RFCE);
++ ctrl |= E1000_CTRL_TFCE;
++ break;
++ case e1000_fc_full:
++ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
++ break;
++ default:
++ DEBUGOUT("Flow control param set incorrectly\n");
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ }
++
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_config_fc_after_link_up_generic - Configures flow control after link
++ * @hw: pointer to the HW structure
++ *
++ * Checks the status of auto-negotiation after link up to ensure that the
++ * speed and duplex were not forced. If the link needed to be forced, then
++ * flow control needs to be forced also. If auto-negotiation is enabled
++ * and did not fail, then we configure flow control based on our link
++ * partner.
++ **/
++s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ s32 ret_val = E1000_SUCCESS;
++ u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
++ u16 speed, duplex;
++
++ DEBUGFUNC("e1000_config_fc_after_link_up_generic");
++
++ /*
++ * Check for the case where we have fiber media and auto-neg failed
++ * so we had to force link. In this case, we need to force the
++ * configuration of the MAC to match the "fc" parameter.
++ */
++ if (mac->autoneg_failed) {
++ if (hw->phy.media_type == e1000_media_type_fiber ||
++ hw->phy.media_type == e1000_media_type_internal_serdes)
++ ret_val = e1000_force_mac_fc_generic(hw);
++ } else {
++ if (hw->phy.media_type == e1000_media_type_copper)
++ ret_val = e1000_force_mac_fc_generic(hw);
++ }
++
++ if (ret_val) {
++ DEBUGOUT("Error forcing flow control settings\n");
++ goto out;
++ }
++
++ /*
++ * Check for the case where we have copper media and auto-neg is
++ * enabled. In this case, we need to check and see if Auto-Neg
++ * has completed, and if so, how the PHY and link partner has
++ * flow control configured.
++ */
++ if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
++ /*
++ * Read the MII Status Register and check to see if AutoNeg
++ * has completed. We read this twice because this reg has
++ * some "sticky" (latched) bits.
++ */
++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
++ if (ret_val)
++ goto out;
++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
++ if (ret_val)
++ goto out;
++
++ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
++ DEBUGOUT("Copper PHY and Auto Neg "
++ "has not completed.\n");
++ goto out;
++ }
++
++ /*
++ * The AutoNeg process has completed, so we now need to
++ * read both the Auto Negotiation Advertisement
++ * Register (Address 4) and the Auto_Negotiation Base
++ * Page Ability Register (Address 5) to determine how
++ * flow control was negotiated.
++ */
++ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
++ &mii_nway_adv_reg);
++ if (ret_val)
++ goto out;
++ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
++ &mii_nway_lp_ability_reg);
++ if (ret_val)
++ goto out;
++
++ /*
++ * Two bits in the Auto Negotiation Advertisement Register
++ * (Address 4) and two bits in the Auto Negotiation Base
++ * Page Ability Register (Address 5) determine flow control
++ * for both the PHY and the link partner. The following
++ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
++ * 1999, describes these PAUSE resolution bits and how flow
++ * control is determined based upon these settings.
++ * NOTE: DC = Don't Care
++ *
++ * LOCAL DEVICE | LINK PARTNER
++ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
++ *-------|---------|-------|---------|--------------------
++ * 0 | 0 | DC | DC | e1000_fc_none
++ * 0 | 1 | 0 | DC | e1000_fc_none
++ * 0 | 1 | 1 | 0 | e1000_fc_none
++ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
++ * 1 | 0 | 0 | DC | e1000_fc_none
++ * 1 | DC | 1 | DC | e1000_fc_full
++ * 1 | 1 | 0 | 0 | e1000_fc_none
++ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
++ *
++ * Are both PAUSE bits set to 1? If so, this implies
++ * Symmetric Flow Control is enabled at both ends. The
++ * ASM_DIR bits are irrelevant per the spec.
++ *
++ * For Symmetric Flow Control:
++ *
++ * LOCAL DEVICE | LINK PARTNER
++ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
++ *-------|---------|-------|---------|--------------------
++ * 1 | DC | 1 | DC | E1000_fc_full
++ *
++ */
++ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
++ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
++ /*
++ * Now we need to check if the user selected Rx ONLY
++ * of pause frames. In this case, we had to advertise
++ * FULL flow control because we could not advertise RX
++ * ONLY. Hence, we must now check to see if we need to
++ * turn OFF the TRANSMISSION of PAUSE frames.
++ */
++ if (hw->fc.requested_mode == e1000_fc_full) {
++ hw->fc.current_mode = e1000_fc_full;
++ DEBUGOUT("Flow Control = FULL.\r\n");
++ } else {
++ hw->fc.current_mode = e1000_fc_rx_pause;
++ DEBUGOUT("Flow Control = "
++ "RX PAUSE frames only.\r\n");
++ }
++ }
++ /*
++ * For receiving PAUSE frames ONLY.
++ *
++ * LOCAL DEVICE | LINK PARTNER
++ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
++ *-------|---------|-------|---------|--------------------
++ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
++ */
++ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
++ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
++ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
++ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
++ hw->fc.current_mode = e1000_fc_tx_pause;
++ DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
++ }
++ /*
++ * For transmitting PAUSE frames ONLY.
++ *
++ * LOCAL DEVICE | LINK PARTNER
++ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
++ *-------|---------|-------|---------|--------------------
++ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
++ */
++ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
++ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
++ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
++ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
++ hw->fc.current_mode = e1000_fc_rx_pause;
++ DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
++ } else {
++ /*
++ * Per the IEEE spec, at this point flow control
++ * should be disabled.
++ */
++ hw->fc.current_mode = e1000_fc_none;
++ DEBUGOUT("Flow Control = NONE.\r\n");
++ }
++
++ /*
++ * Now we need to do one last check... If we auto-
++ * negotiated to HALF DUPLEX, flow control should not be
++ * enabled per IEEE 802.3 spec.
++ */
++ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
++ if (ret_val) {
++ DEBUGOUT("Error getting link speed and duplex\n");
++ goto out;
++ }
++
++ if (duplex == HALF_DUPLEX)
++ hw->fc.current_mode = e1000_fc_none;
++
++ /*
++ * Now we call a subroutine to actually force the MAC
++ * controller to use the correct flow control settings.
++ */
++ ret_val = e1000_force_mac_fc_generic(hw);
++ if (ret_val) {
++ DEBUGOUT("Error forcing flow control settings\n");
++ goto out;
++ }
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
++ * @hw: pointer to the HW structure
++ * @speed: stores the current speed
++ * @duplex: stores the current duplex
++ *
++ * Read the status register for the current speed/duplex and store the current
++ * speed and duplex for copper connections.
++ **/
++s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
++ u16 *duplex)
++{
++ u32 status;
++
++ DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
++
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ if (status & E1000_STATUS_SPEED_1000) {
++ *speed = SPEED_1000;
++ DEBUGOUT("1000 Mbs, ");
++ } else if (status & E1000_STATUS_SPEED_100) {
++ *speed = SPEED_100;
++ DEBUGOUT("100 Mbs, ");
++ } else {
++ *speed = SPEED_10;
++ DEBUGOUT("10 Mbs, ");
++ }
++
++ if (status & E1000_STATUS_FD) {
++ *duplex = FULL_DUPLEX;
++ DEBUGOUT("Full Duplex\n");
++ } else {
++ *duplex = HALF_DUPLEX;
++ DEBUGOUT("Half Duplex\n");
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
++ * @hw: pointer to the HW structure
++ * @speed: stores the current speed
++ * @duplex: stores the current duplex
++ *
++ * Sets the speed and duplex to gigabit full duplex (the only possible option)
++ * for fiber/serdes links.
++ **/
++s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
++ u16 *speed, u16 *duplex)
++{
++ DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
++
++ *speed = SPEED_1000;
++ *duplex = FULL_DUPLEX;
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_get_hw_semaphore_generic - Acquire hardware semaphore
++ * @hw: pointer to the HW structure
++ *
++ * Acquire the HW semaphore to access the PHY or NVM
++ **/
++s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
++{
++ u32 swsm;
++ s32 ret_val = E1000_SUCCESS;
++ s32 timeout = hw->nvm.word_size + 1;
++ s32 i = 0;
++
++ DEBUGFUNC("e1000_get_hw_semaphore_generic");
++
++ /* Get the SW semaphore */
++ while (i < timeout) {
++ swsm = E1000_READ_REG(hw, E1000_SWSM);
++ if (!(swsm & E1000_SWSM_SMBI))
++ break;
++
++ usec_delay(50);
++ i++;
++ }
++
++ if (i == timeout) {
++ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
++ ret_val = -E1000_ERR_NVM;
++ goto out;
++ }
++
++ /* Get the FW semaphore. */
++ for (i = 0; i < timeout; i++) {
++ swsm = E1000_READ_REG(hw, E1000_SWSM);
++ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
++
++ /* Semaphore acquired if bit latched */
++ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
++ break;
++
++ usec_delay(50);
++ }
++
++ if (i == timeout) {
++ /* Release semaphores */
++ e1000_put_hw_semaphore_generic(hw);
++ DEBUGOUT("Driver can't access the NVM\n");
++ ret_val = -E1000_ERR_NVM;
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_put_hw_semaphore_generic - Release hardware semaphore
++ * @hw: pointer to the HW structure
++ *
++ * Release hardware semaphore used to access the PHY or NVM
++ **/
++void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
++{
++ u32 swsm;
++
++ DEBUGFUNC("e1000_put_hw_semaphore_generic");
++
++ swsm = E1000_READ_REG(hw, E1000_SWSM);
++
++ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
++
++ E1000_WRITE_REG(hw, E1000_SWSM, swsm);
++}
++
++/**
++ * e1000_get_auto_rd_done_generic - Check for auto read completion
++ * @hw: pointer to the HW structure
++ *
++ * Check EEPROM for Auto Read done bit.
++ **/
++s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
++{
++ s32 i = 0;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_get_auto_rd_done_generic");
++
++ while (i < AUTO_READ_DONE_TIMEOUT) {
++ if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
++ break;
++ msec_delay(1);
++ i++;
++ }
++
++ if (i == AUTO_READ_DONE_TIMEOUT) {
++ DEBUGOUT("Auto read by HW from NVM has not completed.\n");
++ ret_val = -E1000_ERR_RESET;
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_valid_led_default_generic - Verify a valid default LED config
++ * @hw: pointer to the HW structure
++ * @data: pointer to the NVM (EEPROM)
++ *
++ * Read the EEPROM for the current default LED configuration. If the
++ * LED configuration is not valid, set to a valid LED configuration.
++ **/
++s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
++{
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_valid_led_default_generic");
++
++ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ goto out;
++ }
++
++ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
++ *data = ID_LED_DEFAULT;
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_id_led_init_generic -
++ * @hw: pointer to the HW structure
++ *
++ **/
++s32 e1000_id_led_init_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ s32 ret_val;
++ const u32 ledctl_mask = 0x000000FF;
++ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
++ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
++ u16 data, i, temp;
++ const u16 led_mask = 0x0F;
++
++ DEBUGFUNC("e1000_id_led_init_generic");
++
++ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
++ if (ret_val)
++ goto out;
++
++ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
++ mac->ledctl_mode1 = mac->ledctl_default;
++ mac->ledctl_mode2 = mac->ledctl_default;
++
++ for (i = 0; i < 4; i++) {
++ temp = (data >> (i << 2)) & led_mask;
++ switch (temp) {
++ case ID_LED_ON1_DEF2:
++ case ID_LED_ON1_ON2:
++ case ID_LED_ON1_OFF2:
++ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
++ mac->ledctl_mode1 |= ledctl_on << (i << 3);
++ break;
++ case ID_LED_OFF1_DEF2:
++ case ID_LED_OFF1_ON2:
++ case ID_LED_OFF1_OFF2:
++ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
++ mac->ledctl_mode1 |= ledctl_off << (i << 3);
++ break;
++ default:
++ /* Do nothing */
++ break;
++ }
++ switch (temp) {
++ case ID_LED_DEF1_ON2:
++ case ID_LED_ON1_ON2:
++ case ID_LED_OFF1_ON2:
++ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
++ mac->ledctl_mode2 |= ledctl_on << (i << 3);
++ break;
++ case ID_LED_DEF1_OFF2:
++ case ID_LED_ON1_OFF2:
++ case ID_LED_OFF1_OFF2:
++ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
++ mac->ledctl_mode2 |= ledctl_off << (i << 3);
++ break;
++ default:
++ /* Do nothing */
++ break;
++ }
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_setup_led_generic - Configures SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * This prepares the SW controllable LED for use and saves the current state
++ * of the LED so it can be later restored.
++ **/
++s32 e1000_setup_led_generic(struct e1000_hw *hw)
++{
++ u32 ledctl;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_setup_led_generic");
++
++ if (hw->mac.ops.setup_led != e1000_setup_led_generic) {
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ }
++
++ if (hw->phy.media_type == e1000_media_type_fiber) {
++ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
++ hw->mac.ledctl_default = ledctl;
++ /* Turn off LED0 */
++ ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
++ E1000_LEDCTL_LED0_BLINK |
++ E1000_LEDCTL_LED0_MODE_MASK);
++ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
++ E1000_LEDCTL_LED0_MODE_SHIFT);
++ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
++ } else if (hw->phy.media_type == e1000_media_type_copper) {
++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_cleanup_led_generic - Set LED config to default operation
++ * @hw: pointer to the HW structure
++ *
++ * Remove the current LED configuration and set the LED configuration
++ * to the default value, saved from the EEPROM.
++ **/
++s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_cleanup_led_generic");
++
++ if (hw->mac.ops.cleanup_led != e1000_cleanup_led_generic) {
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ }
++
++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_blink_led_generic - Blink LED
++ * @hw: pointer to the HW structure
++ *
++ * Blink the LEDs which are set to be on.
++ **/
++s32 e1000_blink_led_generic(struct e1000_hw *hw)
++{
++ u32 ledctl_blink = 0;
++ u32 i;
++
++ DEBUGFUNC("e1000_blink_led_generic");
++
++ if (hw->phy.media_type == e1000_media_type_fiber) {
++ /* always blink LED0 for PCI-E fiber */
++ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
++ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
++ } else {
++ /*
++ * set the blink bit for each LED that's "on" (0x0E)
++ * in ledctl_mode2
++ */
++ ledctl_blink = hw->mac.ledctl_mode2;
++ for (i = 0; i < 4; i++)
++ if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
++ E1000_LEDCTL_MODE_LED_ON)
++ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
++ (i * 8));
++ }
++
++ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_led_on_generic - Turn LED on
++ * @hw: pointer to the HW structure
++ *
++ * Turn LED on.
++ **/
++s32 e1000_led_on_generic(struct e1000_hw *hw)
++{
++ u32 ctrl;
++
++ DEBUGFUNC("e1000_led_on_generic");
++
++ switch (hw->phy.media_type) {
++ case e1000_media_type_fiber:
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl &= ~E1000_CTRL_SWDPIN0;
++ ctrl |= E1000_CTRL_SWDPIO0;
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++ break;
++ case e1000_media_type_copper:
++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
++ break;
++ default:
++ break;
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_led_off_generic - Turn LED off
++ * @hw: pointer to the HW structure
++ *
++ * Turn LED off.
++ **/
++s32 e1000_led_off_generic(struct e1000_hw *hw)
++{
++ u32 ctrl;
++
++ DEBUGFUNC("e1000_led_off_generic");
++
++ switch (hw->phy.media_type) {
++ case e1000_media_type_fiber:
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl |= E1000_CTRL_SWDPIN0;
++ ctrl |= E1000_CTRL_SWDPIO0;
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++ break;
++ case e1000_media_type_copper:
++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
++ break;
++ default:
++ break;
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
++ * @hw: pointer to the HW structure
++ * @no_snoop: bitmap of snoop events
++ *
++ * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
++ **/
++void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
++{
++ u32 gcr;
++
++ DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
++
++ if (hw->bus.type != e1000_bus_type_pci_express)
++ goto out;
++
++ if (no_snoop) {
++ gcr = E1000_READ_REG(hw, E1000_GCR);
++ gcr &= ~(PCIE_NO_SNOOP_ALL);
++ gcr |= no_snoop;
++ E1000_WRITE_REG(hw, E1000_GCR, gcr);
++ }
++out:
++ return;
++}
++
++/**
++ * e1000_disable_pcie_master_generic - Disables PCI-express master access
++ * @hw: pointer to the HW structure
++ *
++ * Returns 0 (E1000_SUCCESS) if successful, else returns -10
++ * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
++ * the master requests to be disabled.
++ *
++ * Disables PCI-Express master access and verifies there are no pending
++ * requests.
++ **/
++s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
++{
++ u32 ctrl;
++ s32 timeout = MASTER_DISABLE_TIMEOUT;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_disable_pcie_master_generic");
++
++ if (hw->bus.type != e1000_bus_type_pci_express)
++ goto out;
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++
++ while (timeout) {
++ if (!(E1000_READ_REG(hw, E1000_STATUS) &
++ E1000_STATUS_GIO_MASTER_ENABLE))
++ break;
++ usec_delay(100);
++ timeout--;
++ }
++
++ if (!timeout) {
++ DEBUGOUT("Master requests are pending.\n");
++ ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
++ * @hw: pointer to the HW structure
++ *
++ * Reset the Adaptive Interframe Spacing throttle to default values.
++ **/
++void e1000_reset_adaptive_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++
++ DEBUGFUNC("e1000_reset_adaptive_generic");
++
++ if (!mac->adaptive_ifs) {
++ DEBUGOUT("Not in Adaptive IFS mode!\n");
++ goto out;
++ }
++
++ mac->current_ifs_val = 0;
++ mac->ifs_min_val = IFS_MIN;
++ mac->ifs_max_val = IFS_MAX;
++ mac->ifs_step_size = IFS_STEP;
++ mac->ifs_ratio = IFS_RATIO;
++
++ mac->in_ifs_mode = false;
++ E1000_WRITE_REG(hw, E1000_AIT, 0);
++out:
++ return;
++}
++
++/**
++ * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
++ * @hw: pointer to the HW structure
++ *
++ * Update the Adaptive Interframe Spacing Throttle value based on the
++ * time between transmitted packets and time between collisions.
++ **/
++void e1000_update_adaptive_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++
++ DEBUGFUNC("e1000_update_adaptive_generic");
++
++ if (!mac->adaptive_ifs) {
++ DEBUGOUT("Not in Adaptive IFS mode!\n");
++ goto out;
++ }
++
++ if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
++ if (mac->tx_packet_delta > MIN_NUM_XMITS) {
++ mac->in_ifs_mode = true;
++ if (mac->current_ifs_val < mac->ifs_max_val) {
++ if (!mac->current_ifs_val)
++ mac->current_ifs_val = mac->ifs_min_val;
++ else
++ mac->current_ifs_val +=
++ mac->ifs_step_size;
++ E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val);
++ }
++ }
++ } else {
++ if (mac->in_ifs_mode &&
++ (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
++ mac->current_ifs_val = 0;
++ mac->in_ifs_mode = false;
++ E1000_WRITE_REG(hw, E1000_AIT, 0);
++ }
++ }
++out:
++ return;
++}
++
++/**
++ * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
++ * @hw: pointer to the HW structure
++ *
++ * Verify that when not using auto-negotiation that MDI/MDIx is correctly
++ * set, which is forced to MDI mode only.
++ **/
++static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_validate_mdi_setting_generic");
++
++ if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
++ DEBUGOUT("Invalid MDI setting detected\n");
++ hw->phy.mdix = 1;
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
++ * @hw: pointer to the HW structure
++ * @reg: 32bit register offset such as E1000_SCTL
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ *
++ * Writes an address/data control type register. There are several of these
++ * and they all have the format address << 8 | data and bit 31 is polled for
++ * completion.
++ **/
++s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
++ u32 offset, u8 data)
++{
++ u32 i, regvalue = 0;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
++
++ /* Set up the address and data */
++ regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
++ E1000_WRITE_REG(hw, reg, regvalue);
++
++ /* Poll the ready bit to see if the MDI read completed */
++ for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
++ usec_delay(5);
++ regvalue = E1000_READ_REG(hw, reg);
++ if (regvalue & E1000_GEN_CTL_READY)
++ break;
++ }
++ if (!(regvalue & E1000_GEN_CTL_READY)) {
++ DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
++ ret_val = -E1000_ERR_PHY;
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
+Index: linux-2.6.22/drivers/net/igb/e1000_mac.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_mac.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,80 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_MAC_H_
++#define _E1000_MAC_H_
++
++/*
++ * Functions that should not be called directly from drivers but can be used
++ * by other files in this 'shared code'
++ */
++void e1000_init_mac_ops_generic(struct e1000_hw *hw);
++s32 e1000_blink_led_generic(struct e1000_hw *hw);
++s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
++s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
++s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
++s32 e1000_cleanup_led_generic(struct e1000_hw *hw);
++s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
++s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw);
++s32 e1000_force_mac_fc_generic(struct e1000_hw *hw);
++s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
++s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
++void e1000_set_lan_id_single_port(struct e1000_hw *hw);
++s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
++s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
++ u16 *duplex);
++s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
++ u16 *speed, u16 *duplex);
++s32 e1000_id_led_init_generic(struct e1000_hw *hw);
++s32 e1000_led_on_generic(struct e1000_hw *hw);
++s32 e1000_led_off_generic(struct e1000_hw *hw);
++void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
++ u8 *mc_addr_list, u32 mc_addr_count);
++s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
++s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
++s32 e1000_setup_led_generic(struct e1000_hw *hw);
++s32 e1000_setup_link_generic(struct e1000_hw *hw);
++s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
++ u32 offset, u8 data);
++
++u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
++
++void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
++void e1000_clear_vfta_generic(struct e1000_hw *hw);
++void e1000_config_collision_dist_generic(struct e1000_hw *hw);
++void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
++void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value);
++void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
++void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
++void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
++s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
++void e1000_reset_adaptive_generic(struct e1000_hw *hw);
++void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
++void e1000_update_adaptive_generic(struct e1000_hw *hw);
++void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
++
++#endif
+Index: linux-2.6.22/drivers/net/igb/e1000_manage.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_manage.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,383 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "e1000_api.h"
++
++static u8 e1000_calculate_checksum(u8 *buffer, u32 length);
++
++/**
++ * e1000_calculate_checksum - Calculate checksum for buffer
++ * @buffer: pointer to EEPROM
++ * @length: size of EEPROM to calculate a checksum for
++ *
++ * Calculates the checksum for some buffer on a specified length. The
++ * checksum calculated is returned.
++ **/
++static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
++{
++ u32 i;
++ u8 sum = 0;
++
++ DEBUGFUNC("e1000_calculate_checksum");
++
++ if (!buffer)
++ return 0;
++
++ for (i = 0; i < length; i++)
++ sum += buffer[i];
++
++ return (u8) (0 - sum);
++}
++
++/**
++ * e1000_mng_enable_host_if_generic - Checks host interface is enabled
++ * @hw: pointer to the HW structure
++ *
++ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
++ *
++ * This function checks whether the HOST IF is enabled for command operation
++ * and also checks whether the previous command is completed. It busy waits
++ * in case of previous command is not completed.
++ **/
++s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
++{
++ u32 hicr;
++ s32 ret_val = E1000_SUCCESS;
++ u8 i;
++
++ DEBUGFUNC("e1000_mng_enable_host_if_generic");
++
++ /* Check that the host interface is enabled. */
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ if ((hicr & E1000_HICR_EN) == 0) {
++ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
++ ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
++ goto out;
++ }
++ /* check the previous command is completed */
++ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ if (!(hicr & E1000_HICR_C))
++ break;
++ msec_delay_irq(1);
++ }
++
++ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
++ DEBUGOUT("Previous command timeout failed .\n");
++ ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_check_mng_mode_generic - Generic check management mode
++ * @hw: pointer to the HW structure
++ *
++ * Reads the firmware semaphore register and returns true (>0) if
++ * manageability is enabled, else false (0).
++ **/
++bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
++{
++ u32 fwsm;
++
++ DEBUGFUNC("e1000_check_mng_mode_generic");
++
++ fwsm = E1000_READ_REG(hw, E1000_FWSM);
++
++ return (fwsm & E1000_FWSM_MODE_MASK) ==
++ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
++}
++
++/**
++ * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on TX
++ * @hw: pointer to the HW structure
++ *
++ * Enables packet filtering on transmit packets if manageability is enabled
++ * and host interface is enabled.
++ **/
++bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
++{
++ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
++ u32 *buffer = (u32 *)&hw->mng_cookie;
++ u32 offset;
++ s32 ret_val, hdr_csum, csum;
++ u8 i, len;
++ bool tx_filter = true;
++
++ DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
++
++ /* No manageability, no filtering */
++ if (!hw->mac.ops.check_mng_mode(hw)) {
++ tx_filter = false;
++ goto out;
++ }
++
++ /*
++ * If we can't read from the host interface for whatever
++ * reason, disable filtering.
++ */
++ ret_val = hw->mac.ops.mng_enable_host_if(hw);
++ if (ret_val != E1000_SUCCESS) {
++ tx_filter = false;
++ goto out;
++ }
++
++ /* Read in the header. Length and offset are in dwords. */
++ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
++ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
++ for (i = 0; i < len; i++) {
++ *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
++ E1000_HOST_IF,
++ offset + i);
++ }
++ hdr_csum = hdr->checksum;
++ hdr->checksum = 0;
++ csum = e1000_calculate_checksum((u8 *)hdr,
++ E1000_MNG_DHCP_COOKIE_LENGTH);
++ /*
++ * If either the checksums or signature don't match, then
++ * the cookie area isn't considered valid, in which case we
++ * take the safe route of assuming Tx filtering is enabled.
++ */
++ if (hdr_csum != csum)
++ goto out;
++ if (hdr->signature != E1000_IAMT_SIGNATURE)
++ goto out;
++
++ /* Cookie area is valid, make the final check for filtering. */
++ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
++ tx_filter = false;
++
++out:
++ hw->mac.tx_pkt_filtering = tx_filter;
++ return tx_filter;
++}
++
++/**
++ * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
++ * @hw: pointer to the HW structure
++ * @buffer: pointer to the host interface
++ * @length: size of the buffer
++ *
++ * Writes the DHCP information to the host interface.
++ **/
++s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
++ u16 length)
++{
++ struct e1000_host_mng_command_header hdr;
++ s32 ret_val;
++ u32 hicr;
++
++ DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
++
++ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
++ hdr.command_length = length;
++ hdr.reserved1 = 0;
++ hdr.reserved2 = 0;
++ hdr.checksum = 0;
++
++ /* Enable the host interface */
++ ret_val = hw->mac.ops.mng_enable_host_if(hw);
++ if (ret_val)
++ goto out;
++
++ /* Populate the host interface with the contents of "buffer". */
++ ret_val = hw->mac.ops.mng_host_if_write(hw, buffer, length,
++ sizeof(hdr), &(hdr.checksum));
++ if (ret_val)
++ goto out;
++
++ /* Write the manageability command header */
++ ret_val = hw->mac.ops.mng_write_cmd_header(hw, &hdr);
++ if (ret_val)
++ goto out;
++
++ /* Tell the ARC a new command is pending. */
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_mng_write_cmd_header_generic - Writes manageability command header
++ * @hw: pointer to the HW structure
++ * @hdr: pointer to the host interface command header
++ *
++ * Writes the command header after does the checksum calculation.
++ **/
++s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
++ struct e1000_host_mng_command_header *hdr)
++{
++ u16 i, length = sizeof(struct e1000_host_mng_command_header);
++
++ DEBUGFUNC("e1000_mng_write_cmd_header_generic");
++
++ /* Write the whole command header structure with new checksum. */
++
++ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
++
++ length >>= 2;
++ /* Write the relevant command block into the ram area. */
++ for (i = 0; i < length; i++) {
++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
++ *((u32 *) hdr + i));
++ E1000_WRITE_FLUSH(hw);
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_mng_host_if_write_generic - Write to the manageability host interface
++ * @hw: pointer to the HW structure
++ * @buffer: pointer to the host interface buffer
++ * @length: size of the buffer
++ * @offset: location in the buffer to write to
++ * @sum: sum of the data (not checksum)
++ *
++ * This function writes the buffer content at the offset given on the host if.
++ * It also does alignment considerations to do the writes in most efficient
++ * way. Also fills up the sum of the buffer in *buffer parameter.
++ **/
++s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
++ u16 length, u16 offset, u8 *sum)
++{
++ u8 *tmp;
++ u8 *bufptr = buffer;
++ u32 data = 0;
++ s32 ret_val = E1000_SUCCESS;
++ u16 remaining, i, j, prev_bytes;
++
++ DEBUGFUNC("e1000_mng_host_if_write_generic");
++
++ /* sum = only sum of the data and it is not checksum */
++
++ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
++ ret_val = -E1000_ERR_PARAM;
++ goto out;
++ }
++
++ tmp = (u8 *)&data;
++ prev_bytes = offset & 0x3;
++ offset >>= 2;
++
++ if (prev_bytes) {
++ data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
++ for (j = prev_bytes; j < sizeof(u32); j++) {
++ *(tmp + j) = *bufptr++;
++ *sum += *(tmp + j);
++ }
++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
++ length -= j - prev_bytes;
++ offset++;
++ }
++
++ remaining = length & 0x3;
++ length -= remaining;
++
++ /* Calculate length in DWORDs */
++ length >>= 2;
++
++ /*
++ * The device driver writes the relevant command block into the
++ * ram area.
++ */
++ for (i = 0; i < length; i++) {
++ for (j = 0; j < sizeof(u32); j++) {
++ *(tmp + j) = *bufptr++;
++ *sum += *(tmp + j);
++ }
++
++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
++ data);
++ }
++ if (remaining) {
++ for (j = 0; j < sizeof(u32); j++) {
++ if (j < remaining)
++ *(tmp + j) = *bufptr++;
++ else
++ *(tmp + j) = 0;
++
++ *sum += *(tmp + j);
++ }
++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_enable_mng_pass_thru - Enable processing of ARP's
++ * @hw: pointer to the HW structure
++ *
++ * Verifies the hardware needs to allow ARPs to be processed by the host.
++ **/
++bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
++{
++ u32 manc;
++ u32 fwsm, factps;
++ bool ret_val = false;
++
++ DEBUGFUNC("e1000_enable_mng_pass_thru");
++
++ if (!hw->mac.asf_firmware_present)
++ goto out;
++
++ manc = E1000_READ_REG(hw, E1000_MANC);
++
++ if (!(manc & E1000_MANC_RCV_TCO_EN) ||
++ !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
++ goto out;
++
++ if (hw->mac.arc_subsystem_valid) {
++ fwsm = E1000_READ_REG(hw, E1000_FWSM);
++ factps = E1000_READ_REG(hw, E1000_FACTPS);
++
++ if (!(factps & E1000_FACTPS_MNGCG) &&
++ ((fwsm & E1000_FWSM_MODE_MASK) ==
++ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
++ ret_val = true;
++ goto out;
++ }
++ } else {
++ if ((manc & E1000_MANC_SMBUS_EN) &&
++ !(manc & E1000_MANC_ASF_EN)) {
++ ret_val = true;
++ goto out;
++ }
++ }
++
++out:
++ return ret_val;
++}
++
+Index: linux-2.6.22/drivers/net/igb/e1000_manage.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_manage.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,81 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_MANAGE_H_
++#define _E1000_MANAGE_H_
++
++bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
++bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
++s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
++s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
++ u16 length, u16 offset, u8 *sum);
++s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
++ struct e1000_host_mng_command_header *hdr);
++s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
++ u8 *buffer, u16 length);
++bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
++
++enum e1000_mng_mode {
++ e1000_mng_mode_none = 0,
++ e1000_mng_mode_asf,
++ e1000_mng_mode_pt,
++ e1000_mng_mode_ipmi,
++ e1000_mng_mode_host_if_only
++};
++
++#define E1000_FACTPS_MNGCG 0x20000000
++
++#define E1000_FWSM_MODE_MASK 0xE
++#define E1000_FWSM_MODE_SHIFT 1
++
++#define E1000_MNG_IAMT_MODE 0x3
++#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
++#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
++#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
++#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
++#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
++#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
++
++#define E1000_VFTA_ENTRY_SHIFT 5
++#define E1000_VFTA_ENTRY_MASK 0x7F
++#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
++
++#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
++#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
++#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
++
++#define E1000_HICR_EN 0x01 /* Enable bit - RO */
++/* Driver sets this bit when done to put command in RAM */
++#define E1000_HICR_C 0x02
++#define E1000_HICR_SV 0x04 /* Status Validity */
++#define E1000_HICR_FW_RESET_ENABLE 0x40
++#define E1000_HICR_FW_RESET 0x80
++
++/* Intel(R) Active Management Technology signature */
++#define E1000_IAMT_SIGNATURE 0x544D4149
++
++#endif
+Index: linux-2.6.22/drivers/net/igb/e1000_mbx.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_mbx.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,491 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "e1000_mbx.h"
++
++/**
++ * e1000_read_mbx - Reads a message from the mailbox
++ * @hw: pointer to the HW structure
++ * @msg: The message buffer
++ * @size: Length of buffer
++ * @mbx_id: id of mailbox to read
++ *
++ * returns SUCCESS if it successfuly read message from buffer
++ **/
++s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
++{
++ struct e1000_mbx_info *mbx = &hw->mbx;
++ s32 ret_val = -E1000_ERR_MBX;
++
++ DEBUGFUNC("e1000_read_mbx");
++
++ /* limit read to size of mailbox */
++ if (size > mbx->size)
++ size = mbx->size;
++
++ if (mbx->ops.read)
++ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
++
++ return ret_val;
++}
++
++/**
++ * e1000_write_mbx - Write a message to the mailbox
++ * @hw: pointer to the HW structure
++ * @msg: The message buffer
++ * @size: Length of buffer
++ * @mbx_id: id of mailbox to write
++ *
++ * returns SUCCESS if it successfully copied message into the buffer
++ **/
++s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
++{
++ struct e1000_mbx_info *mbx = &hw->mbx;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_write_mbx");
++
++ if (size > mbx->size)
++ ret_val = -E1000_ERR_MBX;
++
++ else if (mbx->ops.write)
++ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
++
++ return ret_val;
++}
++
++/**
++ * e1000_check_for_msg - checks to see if someone sent us mail
++ * @hw: pointer to the HW structure
++ * @mbx_id: id of mailbox to check
++ *
++ * returns SUCCESS if the Status bit was found or else ERR_MBX
++ **/
++s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
++{
++ struct e1000_mbx_info *mbx = &hw->mbx;
++ s32 ret_val = -E1000_ERR_MBX;
++
++ DEBUGFUNC("e1000_check_for_msg");
++
++ if (mbx->ops.check_for_msg)
++ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
++
++ return ret_val;
++}
++
++/**
++ * e1000_check_for_ack - checks to see if someone sent us ACK
++ * @hw: pointer to the HW structure
++ * @mbx_id: id of mailbox to check
++ *
++ * returns SUCCESS if the Status bit was found or else ERR_MBX
++ **/
++s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
++{
++ struct e1000_mbx_info *mbx = &hw->mbx;
++ s32 ret_val = -E1000_ERR_MBX;
++
++ DEBUGFUNC("e1000_check_for_ack");
++
++ if (mbx->ops.check_for_ack)
++ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
++
++ return ret_val;
++}
++
++/**
++ * e1000_check_for_rst - checks to see if other side has reset
++ * @hw: pointer to the HW structure
++ * @mbx_id: id of mailbox to check
++ *
++ * returns SUCCESS if the Status bit was found or else ERR_MBX
++ **/
++s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
++{
++ struct e1000_mbx_info *mbx = &hw->mbx;
++ s32 ret_val = -E1000_ERR_MBX;
++
++ DEBUGFUNC("e1000_check_for_rst");
++
++ if (mbx->ops.check_for_rst)
++ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
++
++ return ret_val;
++}
++
++/**
++ * e1000_poll_for_msg - Wait for message notification
++ * @hw: pointer to the HW structure
++ * @mbx_id: id of mailbox to write
++ *
++ * returns SUCCESS if it successfully received a message notification
++ **/
++static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
++{
++ struct e1000_mbx_info *mbx = &hw->mbx;
++ int countdown = mbx->timeout;
++
++ DEBUGFUNC("e1000_poll_for_msg");
++
++ if (!countdown || !mbx->ops.check_for_msg)
++ goto out;
++
++ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
++ countdown--;
++ if (!countdown)
++ break;
++ usec_delay(mbx->usec_delay);
++ }
++
++ /* if we failed, all future posted messages fail until reset */
++ if (!countdown)
++ mbx->timeout = 0;
++out:
++ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
++}
++
++/**
++ * e1000_poll_for_ack - Wait for message acknowledgement
++ * @hw: pointer to the HW structure
++ * @mbx_id: id of mailbox to write
++ *
++ * returns SUCCESS if it successfully received a message acknowledgement
++ **/
++static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
++{
++ struct e1000_mbx_info *mbx = &hw->mbx;
++ int countdown = mbx->timeout;
++
++ DEBUGFUNC("e1000_poll_for_ack");
++
++ if (!countdown || !mbx->ops.check_for_ack)
++ goto out;
++
++ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
++ countdown--;
++ if (!countdown)
++ break;
++ usec_delay(mbx->usec_delay);
++ }
++
++ /* if we failed, all future posted messages fail until reset */
++ if (!countdown)
++ mbx->timeout = 0;
++out:
++ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
++}
++
++/**
++ * e1000_read_posted_mbx - Wait for message notification and receive message
++ * @hw: pointer to the HW structure
++ * @msg: The message buffer
++ * @size: Length of buffer
++ * @mbx_id: id of mailbox to write
++ *
++ * returns SUCCESS if it successfully received a message notification and
++ * copied it into the receive buffer.
++ **/
++s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
++{
++ struct e1000_mbx_info *mbx = &hw->mbx;
++ s32 ret_val = -E1000_ERR_MBX;
++
++ DEBUGFUNC("e1000_read_posted_mbx");
++
++ if (!mbx->ops.read)
++ goto out;
++
++ ret_val = e1000_poll_for_msg(hw, mbx_id);
++
++ /* if ack received read message, otherwise we timed out */
++ if (!ret_val)
++ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
++ * @hw: pointer to the HW structure
++ * @msg: The message buffer
++ * @size: Length of buffer
++ * @mbx_id: id of mailbox to write
++ *
++ * returns SUCCESS if it successfully copied message into the buffer and
++ * received an ack to that message within delay * timeout period
++ **/
++s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
++{
++ struct e1000_mbx_info *mbx = &hw->mbx;
++ s32 ret_val = -E1000_ERR_MBX;
++
++ DEBUGFUNC("e1000_write_posted_mbx");
++
++ /* exit if either we can't write or there isn't a defined timeout */
++ if (!mbx->ops.write || !mbx->timeout)
++ goto out;
++
++ /* send msg */
++ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
++
++ /* if msg sent wait until we receive an ack */
++ if (!ret_val)
++ ret_val = e1000_poll_for_ack(hw, mbx_id);
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_init_mbx_ops_generic - Initialize NVM function pointers
++ * @hw: pointer to the HW structure
++ *
++ * Setups up the function pointers to no-op functions
++ **/
++void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
++{
++ struct e1000_mbx_info *mbx = &hw->mbx;
++ mbx->ops.read_posted = e1000_read_posted_mbx;
++ mbx->ops.write_posted = e1000_write_posted_mbx;
++}
++
++static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
++{
++ u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
++ s32 ret_val = -E1000_ERR_MBX;
++
++ if (mbvficr & mask) {
++ ret_val = E1000_SUCCESS;
++ E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
++ }
++
++ return ret_val;
++}
++
++/**
++ * e1000_check_for_msg_pf - checks to see if the VF has sent mail
++ * @hw: pointer to the HW structure
++ * @vf_number: the VF index
++ *
++ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
++ **/
++static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
++{
++ s32 ret_val = -E1000_ERR_MBX;
++
++ DEBUGFUNC("e1000_check_for_msg_pf");
++
++ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
++ ret_val = E1000_SUCCESS;
++ hw->mbx.stats.reqs++;
++ }
++
++ return ret_val;
++}
++
++/**
++ * e1000_check_for_ack_pf - checks to see if the VF has ACKed
++ * @hw: pointer to the HW structure
++ * @vf_number: the VF index
++ *
++ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
++ **/
++static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
++{
++ s32 ret_val = -E1000_ERR_MBX;
++
++ DEBUGFUNC("e1000_check_for_ack_pf");
++
++ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
++ ret_val = E1000_SUCCESS;
++ hw->mbx.stats.acks++;
++ }
++
++ return ret_val;
++}
++
++/**
++ * e1000_check_for_rst_pf - checks to see if the VF has reset
++ * @hw: pointer to the HW structure
++ * @vf_number: the VF index
++ *
++ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
++ **/
++static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
++{
++ u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
++ s32 ret_val = -E1000_ERR_MBX;
++
++ DEBUGFUNC("e1000_check_for_rst_pf");
++
++ if (vflre & (1 << vf_number)) {
++ ret_val = E1000_SUCCESS;
++ E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
++ hw->mbx.stats.rsts++;
++ }
++
++ return ret_val;
++}
++
++/**
++ * e1000_obtain_mbx_lock_pf - obtain mailbox lock
++ * @hw: pointer to the HW structure
++ * @vf_number: the VF index
++ *
++ * return SUCCESS if we obtained the mailbox lock
++ **/
++static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
++{
++ s32 ret_val = -E1000_ERR_MBX;
++ u32 p2v_mailbox;
++
++ DEBUGFUNC("e1000_obtain_mbx_lock_pf");
++
++ /* Take ownership of the buffer */
++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
++
++ /* reserve mailbox for vf use */
++ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
++ if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
++ ret_val = E1000_SUCCESS;
++
++ return ret_val;
++}
++
++/**
++ * e1000_write_mbx_pf - Places a message in the mailbox
++ * @hw: pointer to the HW structure
++ * @msg: The message buffer
++ * @size: Length of buffer
++ * @vf_number: the VF index
++ *
++ * returns SUCCESS if it successfully copied message into the buffer
++ **/
++static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
++ u16 vf_number)
++{
++ s32 ret_val;
++ u16 i;
++
++ DEBUGFUNC("e1000_write_mbx_pf");
++
++ /* lock the mailbox to prevent pf/vf race condition */
++ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
++ if (ret_val)
++ goto out_no_write;
++
++ /* flush msg and acks as we are overwriting the message buffer */
++ e1000_check_for_msg_pf(hw, vf_number);
++ e1000_check_for_ack_pf(hw, vf_number);
++
++ /* copy the caller specified message to the mailbox memory buffer */
++ for (i = 0; i < size; i++)
++ E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
++
++ /* Interrupt VF to tell it a message has been sent and release buffer*/
++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
++
++ /* update stats */
++ hw->mbx.stats.msgs_tx++;
++
++out_no_write:
++ return ret_val;
++
++}
++
++/**
++ * e1000_read_mbx_pf - Read a message from the mailbox
++ * @hw: pointer to the HW structure
++ * @msg: The message buffer
++ * @size: Length of buffer
++ * @vf_number: the VF index
++ *
++ * This function copies a message from the mailbox buffer to the caller's
++ * memory buffer. The presumption is that the caller knows that there was
++ * a message due to a VF request so no polling for message is needed.
++ **/
++static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
++ u16 vf_number)
++{
++ s32 ret_val;
++ u16 i;
++
++ DEBUGFUNC("e1000_read_mbx_pf");
++
++ /* lock the mailbox to prevent pf/vf race condition */
++ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
++ if (ret_val)
++ goto out_no_read;
++
++ /* copy the message to the mailbox memory buffer */
++ for (i = 0; i < size; i++)
++ msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
++
++ /* Acknowledge the message and release buffer */
++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
++
++ /* update stats */
++ hw->mbx.stats.msgs_rx++;
++
++out_no_read:
++ return ret_val;
++}
++
++/**
++ * e1000_init_mbx_params_pf - set initial values for pf mailbox
++ * @hw: pointer to the HW structure
++ *
++ * Initializes the hw->mbx struct to correct values for pf mailbox
++ */
++s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
++{
++ struct e1000_mbx_info *mbx = &hw->mbx;
++
++ if (hw->mac.type == e1000_82576) {
++ mbx->timeout = 0;
++ mbx->usec_delay = 0;
++
++ mbx->size = E1000_VFMAILBOX_SIZE;
++
++ mbx->ops.read = e1000_read_mbx_pf;
++ mbx->ops.write = e1000_write_mbx_pf;
++ mbx->ops.read_posted = e1000_read_posted_mbx;
++ mbx->ops.write_posted = e1000_write_posted_mbx;
++ mbx->ops.check_for_msg = e1000_check_for_msg_pf;
++ mbx->ops.check_for_ack = e1000_check_for_ack_pf;
++ mbx->ops.check_for_rst = e1000_check_for_rst_pf;
++
++ mbx->stats.msgs_tx = 0;
++ mbx->stats.msgs_rx = 0;
++ mbx->stats.reqs = 0;
++ mbx->stats.acks = 0;
++ mbx->stats.rsts = 0;
++ }
++
++ return E1000_SUCCESS;
++}
++
+Index: linux-2.6.22/drivers/net/igb/e1000_mbx.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_mbx.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,87 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_MBX_H_
++#define _E1000_MBX_H_
++
++#include "e1000_api.h"
++
++#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
++#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
++#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
++#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
++#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
++
++#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
++#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
++#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
++#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
++
++#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
++
++/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
++ * PF. The reverse is true if it is E1000_PF_*.
++ * Message ACK's are the value or'd with 0xF0000000
++ */
++#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
++ * this are the ACK */
++#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
++ * this are the NACK */
++#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
++ clear to send requests */
++#define E1000_VT_MSGINFO_SHIFT 16
++/* bits 23:16 are used for exra info for certain messages */
++#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
++
++#define E1000_VF_RESET 0x01 /* VF requests reset */
++#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
++#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
++#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
++#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT)
++#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
++#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT)
++#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
++#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
++#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT)
++#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
++
++#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
++
++#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
++#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
++
++s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
++s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
++s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
++s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
++s32 e1000_check_for_msg(struct e1000_hw *, u16);
++s32 e1000_check_for_ack(struct e1000_hw *, u16);
++s32 e1000_check_for_rst(struct e1000_hw *, u16);
++void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
++s32 e1000_init_mbx_params_pf(struct e1000_hw *);
++
++#endif /* _E1000_MBX_H_ */
+Index: linux-2.6.22/drivers/net/igb/e1000_nvm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_nvm.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,625 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "e1000_api.h"
++
++static void e1000_stop_nvm(struct e1000_hw *hw);
++static void e1000_reload_nvm_generic(struct e1000_hw *hw);
++
++/**
++ * e1000_init_nvm_ops_generic - Initialize NVM function pointers
++ * @hw: pointer to the HW structure
++ *
++ * Setups up the function pointers to no-op functions
++ **/
++void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
++{
++ struct e1000_nvm_info *nvm = &hw->nvm;
++ DEBUGFUNC("e1000_init_nvm_ops_generic");
++
++ /* Initialize function pointers */
++ nvm->ops.reload = e1000_reload_nvm_generic;
++}
++
++/**
++ * e1000_raise_eec_clk - Raise EEPROM clock
++ * @hw: pointer to the HW structure
++ * @eecd: pointer to the EEPROM
++ *
++ * Enable/Raise the EEPROM clock bit.
++ **/
++static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
++{
++ *eecd = *eecd | E1000_EECD_SK;
++ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
++ E1000_WRITE_FLUSH(hw);
++ usec_delay(hw->nvm.delay_usec);
++}
++
++/**
++ * e1000_lower_eec_clk - Lower EEPROM clock
++ * @hw: pointer to the HW structure
++ * @eecd: pointer to the EEPROM
++ *
++ * Clear/Lower the EEPROM clock bit.
++ **/
++static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
++{
++ *eecd = *eecd & ~E1000_EECD_SK;
++ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
++ E1000_WRITE_FLUSH(hw);
++ usec_delay(hw->nvm.delay_usec);
++}
++
++/**
++ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
++ * @hw: pointer to the HW structure
++ * @data: data to send to the EEPROM
++ * @count: number of bits to shift out
++ *
++ * We need to shift 'count' bits out to the EEPROM. So, the value in the
++ * "data" parameter will be shifted out to the EEPROM one bit at a time.
++ * In order to do this, "data" must be broken down into bits.
++ **/
++static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
++{
++ struct e1000_nvm_info *nvm = &hw->nvm;
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
++ u32 mask;
++
++ DEBUGFUNC("e1000_shift_out_eec_bits");
++
++ mask = 0x01 << (count - 1);
++ if (nvm->type == e1000_nvm_eeprom_spi)
++ eecd |= E1000_EECD_DO;
++
++ do {
++ eecd &= ~E1000_EECD_DI;
++
++ if (data & mask)
++ eecd |= E1000_EECD_DI;
++
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++ E1000_WRITE_FLUSH(hw);
++
++ usec_delay(nvm->delay_usec);
++
++ e1000_raise_eec_clk(hw, &eecd);
++ e1000_lower_eec_clk(hw, &eecd);
++
++ mask >>= 1;
++ } while (mask);
++
++ eecd &= ~E1000_EECD_DI;
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++}
++
++/**
++ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
++ * @hw: pointer to the HW structure
++ * @count: number of bits to shift in
++ *
++ * In order to read a register from the EEPROM, we need to shift 'count' bits
++ * in from the EEPROM. Bits are "shifted in" by raising the clock input to
++ * the EEPROM (setting the SK bit), and then reading the value of the data out
++ * "DO" bit. During this "shifting in" process the data in "DI" bit should
++ * always be clear.
++ **/
++static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
++{
++ u32 eecd;
++ u32 i;
++ u16 data;
++
++ DEBUGFUNC("e1000_shift_in_eec_bits");
++
++ eecd = E1000_READ_REG(hw, E1000_EECD);
++
++ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
++ data = 0;
++
++ for (i = 0; i < count; i++) {
++ data <<= 1;
++ e1000_raise_eec_clk(hw, &eecd);
++
++ eecd = E1000_READ_REG(hw, E1000_EECD);
++
++ eecd &= ~E1000_EECD_DI;
++ if (eecd & E1000_EECD_DO)
++ data |= 1;
++
++ e1000_lower_eec_clk(hw, &eecd);
++ }
++
++ return data;
++}
++
++/**
++ * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
++ * @hw: pointer to the HW structure
++ * @ee_reg: EEPROM flag for polling
++ *
++ * Polls the EEPROM status bit for either read or write completion based
++ * upon the value of 'ee_reg'.
++ **/
++s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
++{
++ u32 attempts = 100000;
++ u32 i, reg = 0;
++ s32 ret_val = -E1000_ERR_NVM;
++
++ DEBUGFUNC("e1000_poll_eerd_eewr_done");
++
++ for (i = 0; i < attempts; i++) {
++ if (ee_reg == E1000_NVM_POLL_READ)
++ reg = E1000_READ_REG(hw, E1000_EERD);
++ else
++ reg = E1000_READ_REG(hw, E1000_EEWR);
++
++ if (reg & E1000_NVM_RW_REG_DONE) {
++ ret_val = E1000_SUCCESS;
++ break;
++ }
++
++ usec_delay(5);
++ }
++
++ return ret_val;
++}
++
++/**
++ * e1000_acquire_nvm_generic - Generic request for access to EEPROM
++ * @hw: pointer to the HW structure
++ *
++ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
++ * Return successful if access grant bit set, else clear the request for
++ * EEPROM access and return -E1000_ERR_NVM (-1).
++ **/
++s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
++{
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
++ s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_acquire_nvm_generic");
++
++ E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
++ eecd = E1000_READ_REG(hw, E1000_EECD);
++
++ while (timeout) {
++ if (eecd & E1000_EECD_GNT)
++ break;
++ usec_delay(5);
++ eecd = E1000_READ_REG(hw, E1000_EECD);
++ timeout--;
++ }
++
++ if (!timeout) {
++ eecd &= ~E1000_EECD_REQ;
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++ DEBUGOUT("Could not acquire NVM grant\n");
++ ret_val = -E1000_ERR_NVM;
++ }
++
++ return ret_val;
++}
++
++/**
++ * e1000_standby_nvm - Return EEPROM to standby state
++ * @hw: pointer to the HW structure
++ *
++ * Return the EEPROM to a standby state.
++ **/
++static void e1000_standby_nvm(struct e1000_hw *hw)
++{
++ struct e1000_nvm_info *nvm = &hw->nvm;
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
++
++ DEBUGFUNC("e1000_standby_nvm");
++
++ if (nvm->type == e1000_nvm_eeprom_spi) {
++ /* Toggle CS to flush commands */
++ eecd |= E1000_EECD_CS;
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++ E1000_WRITE_FLUSH(hw);
++ usec_delay(nvm->delay_usec);
++ eecd &= ~E1000_EECD_CS;
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++ E1000_WRITE_FLUSH(hw);
++ usec_delay(nvm->delay_usec);
++ }
++}
++
++/**
++ * e1000_stop_nvm - Terminate EEPROM command
++ * @hw: pointer to the HW structure
++ *
++ * Terminates the current command by inverting the EEPROM's chip select pin.
++ **/
++static void e1000_stop_nvm(struct e1000_hw *hw)
++{
++ u32 eecd;
++
++ DEBUGFUNC("e1000_stop_nvm");
++
++ eecd = E1000_READ_REG(hw, E1000_EECD);
++ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
++ /* Pull CS high */
++ eecd |= E1000_EECD_CS;
++ e1000_lower_eec_clk(hw, &eecd);
++ }
++}
++
++/**
++ * e1000_release_nvm_generic - Release exclusive access to EEPROM
++ * @hw: pointer to the HW structure
++ *
++ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
++ **/
++void e1000_release_nvm_generic(struct e1000_hw *hw)
++{
++ u32 eecd;
++
++ DEBUGFUNC("e1000_release_nvm_generic");
++
++ e1000_stop_nvm(hw);
++
++ eecd = E1000_READ_REG(hw, E1000_EECD);
++ eecd &= ~E1000_EECD_REQ;
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++}
++
++/**
++ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
++ * @hw: pointer to the HW structure
++ *
++ * Setups the EEPROM for reading and writing.
++ **/
++static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
++{
++ struct e1000_nvm_info *nvm = &hw->nvm;
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
++ s32 ret_val = E1000_SUCCESS;
++ u16 timeout = 0;
++ u8 spi_stat_reg;
++
++ DEBUGFUNC("e1000_ready_nvm_eeprom");
++
++ if (nvm->type == e1000_nvm_eeprom_spi) {
++ /* Clear SK and CS */
++ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++ usec_delay(1);
++ timeout = NVM_MAX_RETRY_SPI;
++
++ /*
++ * Read "Status Register" repeatedly until the LSB is cleared.
++ * The EEPROM will signal that the command has been completed
++ * by clearing bit 0 of the internal status register. If it's
++ * not cleared within 'timeout', then error out.
++ */
++ while (timeout) {
++ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
++ hw->nvm.opcode_bits);
++ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
++ if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
++ break;
++
++ usec_delay(5);
++ e1000_standby_nvm(hw);
++ timeout--;
++ }
++
++ if (!timeout) {
++ DEBUGOUT("SPI NVM Status error\n");
++ ret_val = -E1000_ERR_NVM;
++ goto out;
++ }
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_read_nvm_eerd - Reads EEPROM using EERD register
++ * @hw: pointer to the HW structure
++ * @offset: offset of word in the EEPROM to read
++ * @words: number of words to read
++ * @data: word read from the EEPROM
++ *
++ * Reads a 16 bit word from the EEPROM using the EERD register.
++ **/
++s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
++{
++ struct e1000_nvm_info *nvm = &hw->nvm;
++ u32 i, eerd = 0;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_read_nvm_eerd");
++
++ /*
++ * A check for invalid values: offset too large, too many words,
++ * too many words for the offset, and not enough words.
++ */
++ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
++ (words == 0)) {
++ DEBUGOUT("nvm parameter(s) out of bounds\n");
++ ret_val = -E1000_ERR_NVM;
++ goto out;
++ }
++
++ for (i = 0; i < words; i++) {
++ eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
++ E1000_NVM_RW_REG_START;
++
++ E1000_WRITE_REG(hw, E1000_EERD, eerd);
++ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
++ if (ret_val)
++ break;
++
++ data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
++ E1000_NVM_RW_REG_DATA);
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_write_nvm_spi - Write to EEPROM using SPI
++ * @hw: pointer to the HW structure
++ * @offset: offset within the EEPROM to be written to
++ * @words: number of words to write
++ * @data: 16 bit word(s) to be written to the EEPROM
++ *
++ * Writes data to EEPROM at offset using SPI interface.
++ *
++ * If e1000_update_nvm_checksum is not called after this function , the
++ * EEPROM will most likely contain an invalid checksum.
++ **/
++s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
++{
++ struct e1000_nvm_info *nvm = &hw->nvm;
++ s32 ret_val;
++ u16 widx = 0;
++
++ DEBUGFUNC("e1000_write_nvm_spi");
++
++ /*
++ * A check for invalid values: offset too large, too many words,
++ * and not enough words.
++ */
++ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
++ (words == 0)) {
++ DEBUGOUT("nvm parameter(s) out of bounds\n");
++ ret_val = -E1000_ERR_NVM;
++ goto out;
++ }
++
++ ret_val = nvm->ops.acquire(hw);
++ if (ret_val)
++ goto out;
++
++ while (widx < words) {
++ u8 write_opcode = NVM_WRITE_OPCODE_SPI;
++
++ ret_val = e1000_ready_nvm_eeprom(hw);
++ if (ret_val)
++ goto release;
++
++ e1000_standby_nvm(hw);
++
++ /* Send the WRITE ENABLE command (8 bit opcode) */
++ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
++ nvm->opcode_bits);
++
++ e1000_standby_nvm(hw);
++
++ /*
++ * Some SPI eeproms use the 8th address bit embedded in the
++ * opcode
++ */
++ if ((nvm->address_bits == 8) && (offset >= 128))
++ write_opcode |= NVM_A8_OPCODE_SPI;
++
++ /* Send the Write command (8-bit opcode + addr) */
++ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
++ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
++ nvm->address_bits);
++
++ /* Loop to allow for up to whole page write of eeprom */
++ while (widx < words) {
++ u16 word_out = data[widx];
++ word_out = (word_out >> 8) | (word_out << 8);
++ e1000_shift_out_eec_bits(hw, word_out, 16);
++ widx++;
++
++ if ((((offset + widx) * 2) % nvm->page_size) == 0) {
++ e1000_standby_nvm(hw);
++ break;
++ }
++ }
++ }
++
++ msec_delay(10);
++release:
++ nvm->ops.release(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_read_pba_num_generic - Read device part number
++ * @hw: pointer to the HW structure
++ * @pba_num: pointer to device part number
++ *
++ * Reads the product board assembly (PBA) number from the EEPROM and stores
++ * the value in pba_num.
++ **/
++s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
++{
++ s32 ret_val;
++ u16 nvm_data;
++
++ DEBUGFUNC("e1000_read_pba_num_generic");
++
++ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ goto out;
++ }
++ *pba_num = (u32)(nvm_data << 16);
++
++ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ goto out;
++ }
++ *pba_num |= nvm_data;
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_read_mac_addr_generic - Read device MAC address
++ * @hw: pointer to the HW structure
++ *
++ * Reads the device MAC address from the EEPROM and stores the value.
++ * Since devices with two ports use the same EEPROM, we increment the
++ * last bit in the MAC address for the second port.
++ **/
++s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
++{
++ u32 rar_high;
++ u32 rar_low;
++ u16 i;
++
++ rar_high = E1000_READ_REG(hw, E1000_RAH(0));
++ rar_low = E1000_READ_REG(hw, E1000_RAL(0));
++
++ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
++ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
++
++ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
++ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
++
++ for (i = 0; i < ETH_ADDR_LEN; i++)
++ hw->mac.addr[i] = hw->mac.perm_addr[i];
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
++ * @hw: pointer to the HW structure
++ *
++ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
++ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
++ **/
++s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++ u16 checksum = 0;
++ u16 i, nvm_data;
++
++ DEBUGFUNC("e1000_validate_nvm_checksum_generic");
++
++ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
++ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ goto out;
++ }
++ checksum += nvm_data;
++ }
++
++ if (checksum != (u16) NVM_SUM) {
++ DEBUGOUT("NVM Checksum Invalid\n");
++ ret_val = -E1000_ERR_NVM;
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_update_nvm_checksum_generic - Update EEPROM checksum
++ * @hw: pointer to the HW structure
++ *
++ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
++ * up to the checksum. Then calculates the EEPROM checksum and writes the
++ * value to the EEPROM.
++ **/
++s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
++{
++ s32 ret_val;
++ u16 checksum = 0;
++ u16 i, nvm_data;
++
++ DEBUGFUNC("e1000_update_nvm_checksum");
++
++ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
++ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error while updating checksum.\n");
++ goto out;
++ }
++ checksum += nvm_data;
++ }
++ checksum = (u16) NVM_SUM - checksum;
++ ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
++ if (ret_val)
++ DEBUGOUT("NVM Write Error while updating checksum.\n");
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_reload_nvm_generic - Reloads EEPROM
++ * @hw: pointer to the HW structure
++ *
++ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
++ * extended control register.
++ **/
++static void e1000_reload_nvm_generic(struct e1000_hw *hw)
++{
++ u32 ctrl_ext;
++
++ DEBUGFUNC("e1000_reload_nvm_generic");
++
++ usec_delay(10);
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
++ E1000_WRITE_FLUSH(hw);
++}
++
+Index: linux-2.6.22/drivers/net/igb/e1000_nvm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_nvm.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,50 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_NVM_H_
++#define _E1000_NVM_H_
++
++void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
++s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
++
++s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
++s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
++s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num);
++s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
++ u16 *data);
++s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
++s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
++s32 e1000_write_nvm_eewr(struct e1000_hw *hw, u16 offset,
++ u16 words, u16 *data);
++s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
++ u16 *data);
++s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
++void e1000_release_nvm_generic(struct e1000_hw *hw);
++
++#define E1000_STM_OPCODE 0xDB00
++
++#endif
+Index: linux-2.6.22/drivers/net/igb/e1000_osdep.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_osdep.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,122 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++
++/* glue for the OS independent part of e1000
++ * includes register access macros
++ */
++
++#ifndef _E1000_OSDEP_H_
++#define _E1000_OSDEP_H_
++
++#include <linux/pci.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/if_ether.h>
++#include <linux/sched.h>
++#include "kcompat.h"
++
++#define usec_delay(x) udelay(x)
++#ifndef msec_delay
++#define msec_delay(x) do { \
++ /* Don't mdelay in interrupt context! */ \
++ if (in_interrupt()) \
++ BUG(); \
++ else \
++ msleep(x); \
++} while (0)
++
++/* Some workarounds require millisecond delays and are run during interrupt
++ * context. Most notably, when establishing link, the phy may need tweaking
++ * but cannot process phy register reads/writes faster than millisecond
++ * intervals...and we establish link due to a "link status change" interrupt.
++ */
++#define msec_delay_irq(x) mdelay(x)
++#endif
++
++#define PCI_COMMAND_REGISTER PCI_COMMAND
++#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
++#define ETH_ADDR_LEN ETH_ALEN
++
++#ifdef __BIG_ENDIAN
++#define E1000_BIG_ENDIAN __BIG_ENDIAN
++#endif
++
++
++#define DEBUGOUT(S)
++#define DEBUGOUT1(S, A...)
++
++#define DEBUGFUNC(F) DEBUGOUT(F "\n")
++#define DEBUGOUT2 DEBUGOUT1
++#define DEBUGOUT3 DEBUGOUT2
++#define DEBUGOUT7 DEBUGOUT3
++
++#define E1000_REGISTER(a, reg) reg
++
++#define E1000_WRITE_REG(a, reg, value) ( \
++ writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg))))
++
++#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg)))
++
++#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
++ writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))))
++
++#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
++ readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))
++
++#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
++#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
++
++#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
++ writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))))
++
++#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
++ readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
++
++#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
++ writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
++
++#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
++ readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
++
++#define E1000_WRITE_REG_IO(a, reg, offset) do { \
++ outl(reg, ((a)->io_base)); \
++ outl(offset, ((a)->io_base + 4)); } while (0)
++
++#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
++
++#define E1000_WRITE_FLASH_REG(a, reg, value) ( \
++ writel((value), ((a)->flash_address + reg)))
++
++#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
++ writew((value), ((a)->flash_address + reg)))
++
++#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
++
++#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
++
++#endif /* _E1000_OSDEP_H_ */
+Index: linux-2.6.22/drivers/net/igb/e1000_phy.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_phy.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,2445 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "e1000_api.h"
++
++static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
++/* Cable length tables */
++static const u16 e1000_m88_cable_length_table[] =
++ { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
++#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
++ (sizeof(e1000_m88_cable_length_table) / \
++ sizeof(e1000_m88_cable_length_table[0]))
++
++static const u16 e1000_igp_2_cable_length_table[] =
++ { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
++ 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
++ 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
++ 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
++ 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
++ 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
++ 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
++ 104, 109, 114, 118, 121, 124};
++#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
++ (sizeof(e1000_igp_2_cable_length_table) / \
++ sizeof(e1000_igp_2_cable_length_table[0]))
++
++/**
++ * e1000_check_reset_block_generic - Check if PHY reset is blocked
++ * @hw: pointer to the HW structure
++ *
++ * Read the PHY management control register and check whether a PHY reset
++ * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise
++ * return E1000_BLK_PHY_RESET (12).
++ **/
++s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
++{
++ u32 manc;
++
++ DEBUGFUNC("e1000_check_reset_block");
++
++ manc = E1000_READ_REG(hw, E1000_MANC);
++
++ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
++ E1000_BLK_PHY_RESET : E1000_SUCCESS;
++}
++
++/**
++ * e1000_get_phy_id - Retrieve the PHY ID and revision
++ * @hw: pointer to the HW structure
++ *
++ * Reads the PHY registers and stores the PHY ID and possibly the PHY
++ * revision in the hardware structure.
++ **/
++s32 e1000_get_phy_id(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val = E1000_SUCCESS;
++ u16 phy_id;
++
++ DEBUGFUNC("e1000_get_phy_id");
++
++ if (!(phy->ops.read_reg))
++ goto out;
++
++ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
++ if (ret_val)
++ goto out;
++
++ phy->id = (u32)(phy_id << 16);
++ usec_delay(20);
++ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
++ if (ret_val)
++ goto out;
++
++ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
++ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_phy_reset_dsp_generic - Reset PHY DSP
++ * @hw: pointer to the HW structure
++ *
++ * Reset the digital signal processor.
++ **/
++s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_phy_reset_dsp_generic");
++
++ if (!(hw->phy.ops.write_reg))
++ goto out;
++
++ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
++ if (ret_val)
++ goto out;
++
++ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_read_phy_reg_mdic - Read MDI control register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Reads the MDI control register in the PHY at offset and stores the
++ * information read to data.
++ **/
++s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ u32 i, mdic = 0;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_read_phy_reg_mdic");
++
++ /*
++ * Set up Op-code, Phy Address, and register offset in the MDI
++ * Control register. The MAC will take care of interfacing with the
++ * PHY to retrieve the desired data.
++ */
++ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
++ (phy->addr << E1000_MDIC_PHY_SHIFT) |
++ (E1000_MDIC_OP_READ));
++
++ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
++
++ /*
++ * Poll the ready bit to see if the MDI read completed
++ * Increasing the time out as testing showed failures with
++ * the lower time out
++ */
++ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
++ usec_delay(50);
++ mdic = E1000_READ_REG(hw, E1000_MDIC);
++ if (mdic & E1000_MDIC_READY)
++ break;
++ }
++ if (!(mdic & E1000_MDIC_READY)) {
++ DEBUGOUT("MDI Read did not complete\n");
++ ret_val = -E1000_ERR_PHY;
++ goto out;
++ }
++ if (mdic & E1000_MDIC_ERROR) {
++ DEBUGOUT("MDI Error\n");
++ ret_val = -E1000_ERR_PHY;
++ goto out;
++ }
++ *data = (u16) mdic;
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_write_phy_reg_mdic - Write MDI control register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write to register at offset
++ *
++ * Writes data to MDI control register in the PHY at offset.
++ **/
++s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ u32 i, mdic = 0;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_write_phy_reg_mdic");
++
++ /*
++ * Set up Op-code, Phy Address, and register offset in the MDI
++ * Control register. The MAC will take care of interfacing with the
++ * PHY to retrieve the desired data.
++ */
++ mdic = (((u32)data) |
++ (offset << E1000_MDIC_REG_SHIFT) |
++ (phy->addr << E1000_MDIC_PHY_SHIFT) |
++ (E1000_MDIC_OP_WRITE));
++
++ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
++
++ /*
++ * Poll the ready bit to see if the MDI read completed
++ * Increasing the time out as testing showed failures with
++ * the lower time out
++ */
++ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
++ usec_delay(50);
++ mdic = E1000_READ_REG(hw, E1000_MDIC);
++ if (mdic & E1000_MDIC_READY)
++ break;
++ }
++ if (!(mdic & E1000_MDIC_READY)) {
++ DEBUGOUT("MDI Write did not complete\n");
++ ret_val = -E1000_ERR_PHY;
++ goto out;
++ }
++ if (mdic & E1000_MDIC_ERROR) {
++ DEBUGOUT("MDI Error\n");
++ ret_val = -E1000_ERR_PHY;
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_read_phy_reg_i2c - Read PHY register using i2c
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Reads the PHY register at offset using the i2c interface and stores the
++ * retrieved information in data.
++ **/
++s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ u32 i, i2ccmd = 0;
++
++ DEBUGFUNC("e1000_read_phy_reg_i2c");
++
++ /*
++ * Set up Op-code, Phy Address, and register address in the I2CCMD
++ * register. The MAC will take care of interfacing with the
++ * PHY to retrieve the desired data.
++ */
++ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
++ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
++ (E1000_I2CCMD_OPCODE_READ));
++
++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
++
++ /* Poll the ready bit to see if the I2C read completed */
++ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
++ usec_delay(50);
++ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
++ if (i2ccmd & E1000_I2CCMD_READY)
++ break;
++ }
++ if (!(i2ccmd & E1000_I2CCMD_READY)) {
++ DEBUGOUT("I2CCMD Read did not complete\n");
++ return -E1000_ERR_PHY;
++ }
++ if (i2ccmd & E1000_I2CCMD_ERROR) {
++ DEBUGOUT("I2CCMD Error bit set\n");
++ return -E1000_ERR_PHY;
++ }
++
++ /* Need to byte-swap the 16-bit value. */
++ *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_write_phy_reg_i2c - Write PHY register using i2c
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ *
++ * Writes the data to PHY register at the offset using the i2c interface.
++ **/
++s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ u32 i, i2ccmd = 0;
++ u16 phy_data_swapped;
++
++ DEBUGFUNC("e1000_write_phy_reg_i2c");
++
++ /* Swap the data bytes for the I2C interface */
++ phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
++
++ /*
++ * Set up Op-code, Phy Address, and register address in the I2CCMD
++ * register. The MAC will take care of interfacing with the
++ * PHY to retrieve the desired data.
++ */
++ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
++ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
++ E1000_I2CCMD_OPCODE_WRITE |
++ phy_data_swapped);
++
++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
++
++ /* Poll the ready bit to see if the I2C read completed */
++ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
++ usec_delay(50);
++ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
++ if (i2ccmd & E1000_I2CCMD_READY)
++ break;
++ }
++ if (!(i2ccmd & E1000_I2CCMD_READY)) {
++ DEBUGOUT("I2CCMD Write did not complete\n");
++ return -E1000_ERR_PHY;
++ }
++ if (i2ccmd & E1000_I2CCMD_ERROR) {
++ DEBUGOUT("I2CCMD Error bit set\n");
++ return -E1000_ERR_PHY;
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_phy_reg_m88 - Read m88 PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Acquires semaphore, if necessary, then reads the PHY register at offset
++ * and storing the retrieved information in data. Release any acquired
++ * semaphores before exiting.
++ **/
++s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_read_phy_reg_m88");
++
++ if (!(hw->phy.ops.acquire))
++ goto out;
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
++ data);
++
++ hw->phy.ops.release(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_write_phy_reg_m88 - Write m88 PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ *
++ * Acquires semaphore, if necessary, then writes the data to PHY register
++ * at the offset. Release any acquired semaphores before exiting.
++ **/
++s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_write_phy_reg_m88");
++
++ if (!(hw->phy.ops.acquire))
++ goto out;
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
++ data);
++
++ hw->phy.ops.release(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * __e1000_read_phy_reg_igp - Read igp PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ * @locked: semaphore has already been acquired or not
++ *
++ * Acquires semaphore, if necessary, then reads the PHY register at offset
++ * and stores the retrieved information in data. Release any acquired
++ * semaphores before exiting.
++ **/
++static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
++ bool locked)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("__e1000_read_phy_reg_igp");
++
++ if (!locked) {
++ if (!(hw->phy.ops.acquire))
++ goto out;
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ goto out;
++ }
++
++ if (offset > MAX_PHY_MULTI_PAGE_REG) {
++ ret_val = e1000_write_phy_reg_mdic(hw,
++ IGP01E1000_PHY_PAGE_SELECT,
++ (u16)offset);
++ if (ret_val)
++ goto release;
++ }
++
++ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
++ data);
++
++release:
++ if (!locked)
++ hw->phy.ops.release(hw);
++out:
++ return ret_val;
++}
++/**
++ * e1000_read_phy_reg_igp - Read igp PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Acquires semaphore then reads the PHY register at offset and stores the
++ * retrieved information in data.
++ * Release the acquired semaphore before exiting.
++ **/
++s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ return __e1000_read_phy_reg_igp(hw, offset, data, false);
++}
++
++/**
++ * e1000_read_phy_reg_igp_locked - Read igp PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Reads the PHY register at offset and stores the retrieved information
++ * in data. Assumes semaphore already acquired.
++ **/
++s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ return __e1000_read_phy_reg_igp(hw, offset, data, true);
++}
++
++/**
++ * e1000_write_phy_reg_igp - Write igp PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ * @locked: semaphore has already been acquired or not
++ *
++ * Acquires semaphore, if necessary, then writes the data to PHY register
++ * at the offset. Release any acquired semaphores before exiting.
++ **/
++static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
++ bool locked)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_write_phy_reg_igp");
++
++ if (!locked) {
++ if (!(hw->phy.ops.acquire))
++ goto out;
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ goto out;
++ }
++
++ if (offset > MAX_PHY_MULTI_PAGE_REG) {
++ ret_val = e1000_write_phy_reg_mdic(hw,
++ IGP01E1000_PHY_PAGE_SELECT,
++ (u16)offset);
++ if (ret_val)
++ goto release;
++ }
++
++ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
++ data);
++
++release:
++ if (!locked)
++ hw->phy.ops.release(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_write_phy_reg_igp - Write igp PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ *
++ * Acquires semaphore then writes the data to PHY register
++ * at the offset. Release any acquired semaphores before exiting.
++ **/
++s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ return __e1000_write_phy_reg_igp(hw, offset, data, false);
++}
++
++/**
++ * e1000_write_phy_reg_igp_locked - Write igp PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ *
++ * Writes the data to PHY register at the offset.
++ * Assumes semaphore already acquired.
++ **/
++s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ return __e1000_write_phy_reg_igp(hw, offset, data, true);
++}
++
++/**
++ * __e1000_read_kmrn_reg - Read kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ * @locked: semaphore has already been acquired or not
++ *
++ * Acquires semaphore, if necessary. Then reads the PHY register at offset
++ * using the kumeran interface. The information retrieved is stored in data.
++ * Release any acquired semaphores before exiting.
++ **/
++static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
++ bool locked)
++{
++ u32 kmrnctrlsta;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("__e1000_read_kmrn_reg");
++
++ if (!locked) {
++ if (!(hw->phy.ops.acquire))
++ goto out;
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ goto out;
++ }
++
++ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
++ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
++ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
++
++ usec_delay(2);
++
++ kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
++ *data = (u16)kmrnctrlsta;
++
++ if (!locked)
++ hw->phy.ops.release(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_read_kmrn_reg_generic - Read kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Acquires semaphore then reads the PHY register at offset using the
++ * kumeran interface. The information retrieved is stored in data.
++ * Release the acquired semaphore before exiting.
++ **/
++s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ return __e1000_read_kmrn_reg(hw, offset, data, false);
++}
++
++/**
++ * e1000_read_kmrn_reg_locked - Read kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Reads the PHY register at offset using the kumeran interface. The
++ * information retrieved is stored in data.
++ * Assumes semaphore already acquired.
++ **/
++s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ return __e1000_read_kmrn_reg(hw, offset, data, true);
++}
++
++/**
++ * __e1000_write_kmrn_reg - Write kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ * @locked: semaphore has already been acquired or not
++ *
++ * Acquires semaphore, if necessary. Then write the data to PHY register
++ * at the offset using the kumeran interface. Release any acquired semaphores
++ * before exiting.
++ **/
++static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
++ bool locked)
++{
++ u32 kmrnctrlsta;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_write_kmrn_reg_generic");
++
++ if (!locked) {
++ if (!(hw->phy.ops.acquire))
++ goto out;
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ goto out;
++ }
++
++ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
++ E1000_KMRNCTRLSTA_OFFSET) | data;
++ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
++
++ usec_delay(2);
++
++ if (!locked)
++ hw->phy.ops.release(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_write_kmrn_reg_generic - Write kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ *
++ * Acquires semaphore then writes the data to the PHY register at the offset
++ * using the kumeran interface. Release the acquired semaphore before exiting.
++ **/
++s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ return __e1000_write_kmrn_reg(hw, offset, data, false);
++}
++
++/**
++ * e1000_write_kmrn_reg_locked - Write kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ *
++ * Write the data to PHY register at the offset using the kumeran interface.
++ * Assumes semaphore already acquired.
++ **/
++s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ return __e1000_write_kmrn_reg(hw, offset, data, true);
++}
++
++/**
++ * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
++ * @hw: pointer to the HW structure
++ *
++ * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
++ * and downshift values are set also.
++ **/
++s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 phy_data;
++
++ DEBUGFUNC("e1000_copper_link_setup_m88");
++
++ if (phy->reset_disable) {
++ ret_val = E1000_SUCCESS;
++ goto out;
++ }
++
++ /* Enable CRS on TX. This must be set for half-duplex operation. */
++ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
++ if (ret_val)
++ goto out;
++
++ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
++
++ /*
++ * Options:
++ * MDI/MDI-X = 0 (default)
++ * 0 - Auto for all speeds
++ * 1 - MDI mode
++ * 2 - MDI-X mode
++ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
++ */
++ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
++
++ switch (phy->mdix) {
++ case 1:
++ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
++ break;
++ case 2:
++ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
++ break;
++ case 3:
++ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
++ break;
++ case 0:
++ default:
++ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
++ break;
++ }
++
++ /*
++ * Options:
++ * disable_polarity_correction = 0 (default)
++ * Automatic Correction for Reversed Cable Polarity
++ * 0 - Disabled
++ * 1 - Enabled
++ */
++ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
++ if (phy->disable_polarity_correction == 1)
++ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
++
++ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
++ if (ret_val)
++ goto out;
++
++ if (phy->revision < E1000_REVISION_4) {
++ /*
++ * Force TX_CLK in the Extended PHY Specific Control Register
++ * to 25MHz clock.
++ */
++ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
++ &phy_data);
++ if (ret_val)
++ goto out;
++
++ phy_data |= M88E1000_EPSCR_TX_CLK_25;
++
++ if ((phy->revision == E1000_REVISION_2) &&
++ (phy->id == M88E1111_I_PHY_ID)) {
++ /* 82573L PHY - set the downshift counter to 5x. */
++ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
++ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
++ } else {
++ /* Configure Master and Slave downshift values */
++ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
++ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
++ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
++ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
++ }
++ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
++ phy_data);
++ if (ret_val)
++ goto out;
++ }
++
++ /* Commit the changes. */
++ ret_val = phy->ops.commit(hw);
++ if (ret_val) {
++ DEBUGOUT("Error committing the PHY changes\n");
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_copper_link_setup_igp - Setup igp PHY's for copper link
++ * @hw: pointer to the HW structure
++ *
++ * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
++ * igp PHY's.
++ **/
++s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 data;
++
++ DEBUGFUNC("e1000_copper_link_setup_igp");
++
++ if (phy->reset_disable) {
++ ret_val = E1000_SUCCESS;
++ goto out;
++ }
++
++ ret_val = hw->phy.ops.reset(hw);
++ if (ret_val) {
++ DEBUGOUT("Error resetting the PHY.\n");
++ goto out;
++ }
++
++ /*
++ * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
++ * timeout issues when LFS is enabled.
++ */
++ msec_delay(100);
++
++ /*
++ * The NVM settings will configure LPLU in D3 for
++ * non-IGP1 PHYs.
++ */
++ if (phy->type == e1000_phy_igp) {
++ /* disable lplu d3 during driver init */
++ ret_val = hw->phy.ops.set_d3_lplu_state(hw, false);
++ if (ret_val) {
++ DEBUGOUT("Error Disabling LPLU D3\n");
++ goto out;
++ }
++ }
++
++ /* disable lplu d0 during driver init */
++ if (hw->phy.ops.set_d0_lplu_state) {
++ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
++ if (ret_val) {
++ DEBUGOUT("Error Disabling LPLU D0\n");
++ goto out;
++ }
++ }
++ /* Configure mdi-mdix settings */
++ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
++ if (ret_val)
++ goto out;
++
++ data &= ~IGP01E1000_PSCR_AUTO_MDIX;
++
++ switch (phy->mdix) {
++ case 1:
++ data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
++ break;
++ case 2:
++ data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
++ break;
++ case 0:
++ default:
++ data |= IGP01E1000_PSCR_AUTO_MDIX;
++ break;
++ }
++ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
++ if (ret_val)
++ goto out;
++
++ /* set auto-master slave resolution settings */
++ if (hw->mac.autoneg) {
++ /*
++ * when autonegotiation advertisement is only 1000Mbps then we
++ * should disable SmartSpeed and enable Auto MasterSlave
++ * resolution as hardware default.
++ */
++ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
++ /* Disable SmartSpeed */
++ ret_val = phy->ops.read_reg(hw,
++ IGP01E1000_PHY_PORT_CONFIG,
++ &data);
++ if (ret_val)
++ goto out;
++
++ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
++ ret_val = phy->ops.write_reg(hw,
++ IGP01E1000_PHY_PORT_CONFIG,
++ data);
++ if (ret_val)
++ goto out;
++
++ /* Set auto Master/Slave resolution process */
++ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
++ if (ret_val)
++ goto out;
++
++ data &= ~CR_1000T_MS_ENABLE;
++ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
++ if (ret_val)
++ goto out;
++ }
++
++ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
++ if (ret_val)
++ goto out;
++
++ /* load defaults for future use */
++ phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
++ ((data & CR_1000T_MS_VALUE) ?
++ e1000_ms_force_master :
++ e1000_ms_force_slave) :
++ e1000_ms_auto;
++
++ switch (phy->ms_type) {
++ case e1000_ms_force_master:
++ data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
++ break;
++ case e1000_ms_force_slave:
++ data |= CR_1000T_MS_ENABLE;
++ data &= ~(CR_1000T_MS_VALUE);
++ break;
++ case e1000_ms_auto:
++ data &= ~CR_1000T_MS_ENABLE;
++ default:
++ break;
++ }
++ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
++ if (ret_val)
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
++ * @hw: pointer to the HW structure
++ *
++ * Performs initial bounds checking on autoneg advertisement parameter, then
++ * configure to advertise the full capability. Setup the PHY to autoneg
++ * and restart the negotiation process between the link partner. If
++ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
++ **/
++s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 phy_ctrl;
++
++ DEBUGFUNC("e1000_copper_link_autoneg");
++
++ /*
++ * Perform some bounds checking on the autoneg advertisement
++ * parameter.
++ */
++ phy->autoneg_advertised &= phy->autoneg_mask;
++
++ /*
++ * If autoneg_advertised is zero, we assume it was not defaulted
++ * by the calling code so we set to advertise full capability.
++ */
++ if (phy->autoneg_advertised == 0)
++ phy->autoneg_advertised = phy->autoneg_mask;
++
++ DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
++ ret_val = e1000_phy_setup_autoneg(hw);
++ if (ret_val) {
++ DEBUGOUT("Error Setting up Auto-Negotiation\n");
++ goto out;
++ }
++ DEBUGOUT("Restarting Auto-Neg\n");
++
++ /*
++ * Restart auto-negotiation by setting the Auto Neg Enable bit and
++ * the Auto Neg Restart bit in the PHY control register.
++ */
++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
++ if (ret_val)
++ goto out;
++
++ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
++ if (ret_val)
++ goto out;
++
++ /*
++ * Does the user want to wait for Auto-Neg to complete here, or
++ * check at a later time (for example, callback routine).
++ */
++ if (phy->autoneg_wait_to_complete) {
++ ret_val = hw->mac.ops.wait_autoneg(hw);
++ if (ret_val) {
++ DEBUGOUT("Error while waiting for "
++ "autoneg to complete\n");
++ goto out;
++ }
++ }
++
++ hw->mac.get_link_status = true;
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
++ * @hw: pointer to the HW structure
++ *
++ * Reads the MII auto-neg advertisement register and/or the 1000T control
++ * register and if the PHY is already setup for auto-negotiation, then
++ * return successful. Otherwise, setup advertisement and flow control to
++ * the appropriate values for the wanted auto-negotiation.
++ **/
++static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 mii_autoneg_adv_reg;
++ u16 mii_1000t_ctrl_reg = 0;
++
++ DEBUGFUNC("e1000_phy_setup_autoneg");
++
++ phy->autoneg_advertised &= phy->autoneg_mask;
++
++ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
++ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
++ if (ret_val)
++ goto out;
++
++ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
++ /* Read the MII 1000Base-T Control Register (Address 9). */
++ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
++ &mii_1000t_ctrl_reg);
++ if (ret_val)
++ goto out;
++ }
++
++ /*
++ * Need to parse both autoneg_advertised and fc and set up
++ * the appropriate PHY registers. First we will parse for
++ * autoneg_advertised software override. Since we can advertise
++ * a plethora of combinations, we need to check each bit
++ * individually.
++ */
++
++ /*
++ * First we clear all the 10/100 mb speed bits in the Auto-Neg
++ * Advertisement Register (Address 4) and the 1000 mb speed bits in
++ * the 1000Base-T Control Register (Address 9).
++ */
++ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
++ NWAY_AR_100TX_HD_CAPS |
++ NWAY_AR_10T_FD_CAPS |
++ NWAY_AR_10T_HD_CAPS);
++ mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
++
++ DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
++
++ /* Do we want to advertise 10 Mb Half Duplex? */
++ if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
++ DEBUGOUT("Advertise 10mb Half duplex\n");
++ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
++ }
++
++ /* Do we want to advertise 10 Mb Full Duplex? */
++ if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
++ DEBUGOUT("Advertise 10mb Full duplex\n");
++ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
++ }
++
++ /* Do we want to advertise 100 Mb Half Duplex? */
++ if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
++ DEBUGOUT("Advertise 100mb Half duplex\n");
++ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
++ }
++
++ /* Do we want to advertise 100 Mb Full Duplex? */
++ if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
++ DEBUGOUT("Advertise 100mb Full duplex\n");
++ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
++ }
++
++ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
++ if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
++ DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
++
++ /* Do we want to advertise 1000 Mb Full Duplex? */
++ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
++ DEBUGOUT("Advertise 1000mb Full duplex\n");
++ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
++ }
++
++ /*
++ * Check for a software override of the flow control settings, and
++ * setup the PHY advertisement registers accordingly. If
++ * auto-negotiation is enabled, then software will have to set the
++ * "PAUSE" bits to the correct value in the Auto-Negotiation
++ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
++ * negotiation.
++ *
++ * The possible values of the "fc" parameter are:
++ * 0: Flow control is completely disabled
++ * 1: Rx flow control is enabled (we can receive pause frames
++ * but not send pause frames).
++ * 2: Tx flow control is enabled (we can send pause frames
++ * but we do not support receiving pause frames).
++ * 3: Both Rx and Tx flow control (symmetric) are enabled.
++ * other: No software override. The flow control configuration
++ * in the EEPROM is used.
++ */
++ switch (hw->fc.current_mode) {
++ case e1000_fc_none:
++ /*
++ * Flow control (Rx & Tx) is completely disabled by a
++ * software over-ride.
++ */
++ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
++ break;
++ case e1000_fc_rx_pause:
++ /*
++ * Rx Flow control is enabled, and Tx Flow control is
++ * disabled, by a software over-ride.
++ *
++ * Since there really isn't a way to advertise that we are
++ * capable of Rx Pause ONLY, we will advertise that we
++ * support both symmetric and asymmetric Rx PAUSE. Later
++ * (in e1000_config_fc_after_link_up) we will disable the
++ * hw's ability to send PAUSE frames.
++ */
++ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
++ break;
++ case e1000_fc_tx_pause:
++ /*
++ * Tx Flow control is enabled, and Rx Flow control is
++ * disabled, by a software over-ride.
++ */
++ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
++ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
++ break;
++ case e1000_fc_full:
++ /*
++ * Flow control (both Rx and Tx) is enabled by a software
++ * over-ride.
++ */
++ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
++ break;
++ default:
++ DEBUGOUT("Flow control param set incorrectly\n");
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ }
++
++ ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
++ if (ret_val)
++ goto out;
++
++ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
++
++ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
++ ret_val = phy->ops.write_reg(hw,
++ PHY_1000T_CTRL,
++ mii_1000t_ctrl_reg);
++ if (ret_val)
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_setup_copper_link_generic - Configure copper link settings
++ * @hw: pointer to the HW structure
++ *
++ * Calls the appropriate function to configure the link for auto-neg or forced
++ * speed and duplex. Then we check for link, once link is established calls
++ * to configure collision distance and flow control are called. If link is
++ * not established, we return -E1000_ERR_PHY (-2).
++ **/
++s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
++{
++ s32 ret_val;
++ bool link;
++
++ DEBUGFUNC("e1000_setup_copper_link_generic");
++
++ if (hw->mac.autoneg) {
++ /*
++ * Setup autoneg and flow control advertisement and perform
++ * autonegotiation.
++ */
++ ret_val = e1000_copper_link_autoneg(hw);
++ if (ret_val)
++ goto out;
++ } else {
++ /*
++ * PHY will be set to 10H, 10F, 100H or 100F
++ * depending on user settings.
++ */
++ DEBUGOUT("Forcing Speed and Duplex\n");
++ ret_val = hw->phy.ops.force_speed_duplex(hw);
++ if (ret_val) {
++ DEBUGOUT("Error Forcing Speed and Duplex\n");
++ goto out;
++ }
++ }
++
++ /*
++ * Check link status. Wait up to 100 microseconds for link to become
++ * valid.
++ */
++ ret_val = e1000_phy_has_link_generic(hw,
++ COPPER_LINK_UP_LIMIT,
++ 10,
++ &link);
++ if (ret_val)
++ goto out;
++
++ if (link) {
++ DEBUGOUT("Valid link established!!!\n");
++ e1000_config_collision_dist_generic(hw);
++ ret_val = e1000_config_fc_after_link_up_generic(hw);
++ } else {
++ DEBUGOUT("Unable to establish link!!!\n");
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
++ * @hw: pointer to the HW structure
++ *
++ * Calls the PHY setup function to force speed and duplex. Clears the
++ * auto-crossover to force MDI manually. Waits for link and returns
++ * successful if link up is successful, else -E1000_ERR_PHY (-2).
++ **/
++s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 phy_data;
++ bool link;
++
++ DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
++
++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
++ if (ret_val)
++ goto out;
++
++ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
++
++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
++ if (ret_val)
++ goto out;
++
++ /*
++ * Clear Auto-Crossover to force MDI manually. IGP requires MDI
++ * forced whenever speed and duplex are forced.
++ */
++ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
++ if (ret_val)
++ goto out;
++
++ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
++ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
++
++ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
++ if (ret_val)
++ goto out;
++
++ DEBUGOUT1("IGP PSCR: %X\n", phy_data);
++
++ usec_delay(1);
++
++ if (phy->autoneg_wait_to_complete) {
++ DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
++
++ ret_val = e1000_phy_has_link_generic(hw,
++ PHY_FORCE_LIMIT,
++ 100000,
++ &link);
++ if (ret_val)
++ goto out;
++
++ if (!link)
++ DEBUGOUT("Link taking longer than expected.\n");
++
++ /* Try once more */
++ ret_val = e1000_phy_has_link_generic(hw,
++ PHY_FORCE_LIMIT,
++ 100000,
++ &link);
++ if (ret_val)
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
++ * @hw: pointer to the HW structure
++ *
++ * Calls the PHY setup function to force speed and duplex. Clears the
++ * auto-crossover to force MDI manually. Resets the PHY to commit the
++ * changes. If time expires while waiting for link up, we reset the DSP.
++ * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
++ * successful completion, else return corresponding error code.
++ **/
++s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 phy_data;
++ bool link;
++
++ DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
++
++ /*
++ * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
++ * forced whenever speed and duplex are forced.
++ */
++ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
++ if (ret_val)
++ goto out;
++
++ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
++ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
++ if (ret_val)
++ goto out;
++
++ DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
++
++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
++ if (ret_val)
++ goto out;
++
++ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
++
++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
++ if (ret_val)
++ goto out;
++
++ /* Reset the phy to commit changes. */
++ ret_val = hw->phy.ops.commit(hw);
++ if (ret_val)
++ goto out;
++
++ if (phy->autoneg_wait_to_complete) {
++ DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
++
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
++ if (ret_val)
++ goto out;
++
++ if (!link) {
++ /*
++ * We didn't get link.
++ * Reset the DSP and cross our fingers.
++ */
++ ret_val = phy->ops.write_reg(hw,
++ M88E1000_PHY_PAGE_SELECT,
++ 0x001d);
++ if (ret_val)
++ goto out;
++ ret_val = e1000_phy_reset_dsp_generic(hw);
++ if (ret_val)
++ goto out;
++ }
++
++ /* Try once more */
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
++ if (ret_val)
++ goto out;
++ }
++
++ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
++ if (ret_val)
++ goto out;
++
++ /*
++ * Resetting the phy means we need to re-force TX_CLK in the
++ * Extended PHY Specific Control Register to 25MHz clock from
++ * the reset value of 2.5MHz.
++ */
++ phy_data |= M88E1000_EPSCR_TX_CLK_25;
++ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
++ if (ret_val)
++ goto out;
++
++ /*
++ * In addition, we must re-enable CRS on Tx for both half and full
++ * duplex.
++ */
++ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
++ if (ret_val)
++ goto out;
++
++ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
++ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
++ * @hw: pointer to the HW structure
++ *
++ * Forces the speed and duplex settings of the PHY.
++ * This is a function pointer entry point only called by
++ * PHY setup routines.
++ **/
++s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 data;
++ bool link;
++
++ DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
++
++ if (phy->type != e1000_phy_ife) {
++ ret_val = e1000_phy_force_speed_duplex_igp(hw);
++ goto out;
++ }
++
++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
++ if (ret_val)
++ goto out;
++
++ e1000_phy_force_speed_duplex_setup(hw, &data);
++
++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
++ if (ret_val)
++ goto out;
++
++ /* Disable MDI-X support for 10/100 */
++ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
++ if (ret_val)
++ goto out;
++
++ data &= ~IFE_PMC_AUTO_MDIX;
++ data &= ~IFE_PMC_FORCE_MDIX;
++
++ ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
++ if (ret_val)
++ goto out;
++
++ DEBUGOUT1("IFE PMC: %X\n", data);
++
++ usec_delay(1);
++
++ if (phy->autoneg_wait_to_complete) {
++ DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
++
++ ret_val = e1000_phy_has_link_generic(hw,
++ PHY_FORCE_LIMIT,
++ 100000,
++ &link);
++ if (ret_val)
++ goto out;
++
++ if (!link)
++ DEBUGOUT("Link taking longer than expected.\n");
++
++ /* Try once more */
++ ret_val = e1000_phy_has_link_generic(hw,
++ PHY_FORCE_LIMIT,
++ 100000,
++ &link);
++ if (ret_val)
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
++ * @hw: pointer to the HW structure
++ * @phy_ctrl: pointer to current value of PHY_CONTROL
++ *
++ * Forces speed and duplex on the PHY by doing the following: disable flow
++ * control, force speed/duplex on the MAC, disable auto speed detection,
++ * disable auto-negotiation, configure duplex, configure speed, configure
++ * the collision distance, write configuration to CTRL register. The
++ * caller must write to the PHY_CONTROL register for these settings to
++ * take affect.
++ **/
++void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ u32 ctrl;
++
++ DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
++
++ /* Turn off flow control when forcing speed/duplex */
++ hw->fc.current_mode = e1000_fc_none;
++
++ /* Force speed/duplex on the mac */
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
++ ctrl &= ~E1000_CTRL_SPD_SEL;
++
++ /* Disable Auto Speed Detection */
++ ctrl &= ~E1000_CTRL_ASDE;
++
++ /* Disable autoneg on the phy */
++ *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
++
++ /* Forcing Full or Half Duplex? */
++ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
++ ctrl &= ~E1000_CTRL_FD;
++ *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
++ DEBUGOUT("Half Duplex\n");
++ } else {
++ ctrl |= E1000_CTRL_FD;
++ *phy_ctrl |= MII_CR_FULL_DUPLEX;
++ DEBUGOUT("Full Duplex\n");
++ }
++
++ /* Forcing 10mb or 100mb? */
++ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
++ ctrl |= E1000_CTRL_SPD_100;
++ *phy_ctrl |= MII_CR_SPEED_100;
++ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
++ DEBUGOUT("Forcing 100mb\n");
++ } else {
++ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
++ *phy_ctrl |= MII_CR_SPEED_10;
++ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
++ DEBUGOUT("Forcing 10mb\n");
++ }
++
++ e1000_config_collision_dist_generic(hw);
++
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++}
++
++/**
++ * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
++ * @hw: pointer to the HW structure
++ * @active: boolean used to enable/disable lplu
++ *
++ * Success returns 0, Failure returns 1
++ *
++ * The low power link up (lplu) state is set to the power management level D3
++ * and SmartSpeed is disabled when active is true, else clear lplu for D3
++ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
++ * is used during Dx states where the power conservation is most important.
++ * During driver activity, SmartSpeed should be enabled so performance is
++ * maintained.
++ **/
++s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val = E1000_SUCCESS;
++ u16 data;
++
++ DEBUGFUNC("e1000_set_d3_lplu_state_generic");
++
++ if (!(hw->phy.ops.read_reg))
++ goto out;
++
++ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
++ if (ret_val)
++ goto out;
++
++ if (!active) {
++ data &= ~IGP02E1000_PM_D3_LPLU;
++ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
++ data);
++ if (ret_val)
++ goto out;
++ /*
++ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
++ * during Dx states where the power conservation is most
++ * important. During driver activity we should enable
++ * SmartSpeed, so performance is maintained.
++ */
++ if (phy->smart_speed == e1000_smart_speed_on) {
++ ret_val = phy->ops.read_reg(hw,
++ IGP01E1000_PHY_PORT_CONFIG,
++ &data);
++ if (ret_val)
++ goto out;
++
++ data |= IGP01E1000_PSCFR_SMART_SPEED;
++ ret_val = phy->ops.write_reg(hw,
++ IGP01E1000_PHY_PORT_CONFIG,
++ data);
++ if (ret_val)
++ goto out;
++ } else if (phy->smart_speed == e1000_smart_speed_off) {
++ ret_val = phy->ops.read_reg(hw,
++ IGP01E1000_PHY_PORT_CONFIG,
++ &data);
++ if (ret_val)
++ goto out;
++
++ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
++ ret_val = phy->ops.write_reg(hw,
++ IGP01E1000_PHY_PORT_CONFIG,
++ data);
++ if (ret_val)
++ goto out;
++ }
++ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
++ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
++ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
++ data |= IGP02E1000_PM_D3_LPLU;
++ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
++ data);
++ if (ret_val)
++ goto out;
++
++ /* When LPLU is enabled, we should disable SmartSpeed */
++ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
++ &data);
++ if (ret_val)
++ goto out;
++
++ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
++ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
++ data);
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_check_downshift_generic - Checks whether a downshift in speed occurred
++ * @hw: pointer to the HW structure
++ *
++ * Success returns 0, Failure returns 1
++ *
++ * A downshift is detected by querying the PHY link health.
++ **/
++s32 e1000_check_downshift_generic(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 phy_data, offset, mask;
++
++ DEBUGFUNC("e1000_check_downshift_generic");
++
++ switch (phy->type) {
++ case e1000_phy_m88:
++ case e1000_phy_gg82563:
++ offset = M88E1000_PHY_SPEC_STATUS;
++ mask = M88E1000_PSSR_DOWNSHIFT;
++ break;
++ case e1000_phy_igp_2:
++ case e1000_phy_igp:
++ case e1000_phy_igp_3:
++ offset = IGP01E1000_PHY_LINK_HEALTH;
++ mask = IGP01E1000_PLHR_SS_DOWNGRADE;
++ break;
++ default:
++ /* speed downshift not supported */
++ phy->speed_downgraded = false;
++ ret_val = E1000_SUCCESS;
++ goto out;
++ }
++
++ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
++
++ if (!ret_val)
++ phy->speed_downgraded = (phy_data & mask) ? true : false;
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_check_polarity_m88 - Checks the polarity.
++ * @hw: pointer to the HW structure
++ *
++ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
++ *
++ * Polarity is determined based on the PHY specific status register.
++ **/
++s32 e1000_check_polarity_m88(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 data;
++
++ DEBUGFUNC("e1000_check_polarity_m88");
++
++ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
++
++ if (!ret_val)
++ phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
++ ? e1000_rev_polarity_reversed
++ : e1000_rev_polarity_normal;
++
++ return ret_val;
++}
++
++/**
++ * e1000_check_polarity_igp - Checks the polarity.
++ * @hw: pointer to the HW structure
++ *
++ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
++ *
++ * Polarity is determined based on the PHY port status register, and the
++ * current speed (since there is no polarity at 100Mbps).
++ **/
++s32 e1000_check_polarity_igp(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 data, offset, mask;
++
++ DEBUGFUNC("e1000_check_polarity_igp");
++
++ /*
++ * Polarity is determined based on the speed of
++ * our connection.
++ */
++ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
++ if (ret_val)
++ goto out;
++
++ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
++ IGP01E1000_PSSR_SPEED_1000MBPS) {
++ offset = IGP01E1000_PHY_PCS_INIT_REG;
++ mask = IGP01E1000_PHY_POLARITY_MASK;
++ } else {
++ /*
++ * This really only applies to 10Mbps since
++ * there is no polarity for 100Mbps (always 0).
++ */
++ offset = IGP01E1000_PHY_PORT_STATUS;
++ mask = IGP01E1000_PSSR_POLARITY_REVERSED;
++ }
++
++ ret_val = phy->ops.read_reg(hw, offset, &data);
++
++ if (!ret_val)
++ phy->cable_polarity = (data & mask)
++ ? e1000_rev_polarity_reversed
++ : e1000_rev_polarity_normal;
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_check_polarity_ife - Check cable polarity for IFE PHY
++ * @hw: pointer to the HW structure
++ *
++ * Polarity is determined on the polarity reversal feature being enabled.
++ **/
++s32 e1000_check_polarity_ife(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 phy_data, offset, mask;
++
++ DEBUGFUNC("e1000_check_polarity_ife");
++
++ /*
++ * Polarity is determined based on the reversal feature being enabled.
++ */
++ if (phy->polarity_correction) {
++ offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
++ mask = IFE_PESC_POLARITY_REVERSED;
++ } else {
++ offset = IFE_PHY_SPECIAL_CONTROL;
++ mask = IFE_PSC_FORCE_POLARITY;
++ }
++
++ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
++
++ if (!ret_val)
++ phy->cable_polarity = (phy_data & mask)
++ ? e1000_rev_polarity_reversed
++ : e1000_rev_polarity_normal;
++
++ return ret_val;
++}
++
++/**
++ * e1000_wait_autoneg_generic - Wait for auto-neg completion
++ * @hw: pointer to the HW structure
++ *
++ * Waits for auto-negotiation to complete or for the auto-negotiation time
++ * limit to expire, which ever happens first.
++ **/
++s32 e1000_wait_autoneg_generic(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++ u16 i, phy_status;
++
++ DEBUGFUNC("e1000_wait_autoneg_generic");
++
++ if (!(hw->phy.ops.read_reg))
++ return E1000_SUCCESS;
++
++ /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
++ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
++ if (ret_val)
++ break;
++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
++ if (ret_val)
++ break;
++ if (phy_status & MII_SR_AUTONEG_COMPLETE)
++ break;
++ msec_delay(100);
++ }
++
++ /*
++ * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
++ * has completed.
++ */
++ return ret_val;
++}
++
++/**
++ * e1000_phy_has_link_generic - Polls PHY for link
++ * @hw: pointer to the HW structure
++ * @iterations: number of times to poll for link
++ * @usec_interval: delay between polling attempts
++ * @success: pointer to whether polling was successful or not
++ *
++ * Polls the PHY status register for link, 'iterations' number of times.
++ **/
++s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
++ u32 usec_interval, bool *success)
++{
++ s32 ret_val = E1000_SUCCESS;
++ u16 i, phy_status;
++
++ DEBUGFUNC("e1000_phy_has_link_generic");
++
++ if (!(hw->phy.ops.read_reg))
++ return E1000_SUCCESS;
++
++ for (i = 0; i < iterations; i++) {
++ /*
++ * Some PHYs require the PHY_STATUS register to be read
++ * twice due to the link bit being sticky. No harm doing
++ * it across the board.
++ */
++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
++ if (ret_val) {
++ /*
++ * If the first read fails, another entity may have
++ * ownership of the resources, wait and try again to
++ * see if they have relinquished the resources yet.
++ */
++ usec_delay(usec_interval);
++ }
++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
++ if (ret_val)
++ break;
++ if (phy_status & MII_SR_LINK_STATUS)
++ break;
++ if (usec_interval >= 1000)
++ msec_delay_irq(usec_interval/1000);
++ else
++ usec_delay(usec_interval);
++ }
++
++ *success = (i < iterations) ? true : false;
++
++ return ret_val;
++}
++
++/**
++ * e1000_get_cable_length_m88 - Determine cable length for m88 PHY
++ * @hw: pointer to the HW structure
++ *
++ * Reads the PHY specific status register to retrieve the cable length
++ * information. The cable length is determined by averaging the minimum and
++ * maximum values to get the "average" cable length. The m88 PHY has four
++ * possible cable length values, which are:
++ * Register Value Cable Length
++ * 0 < 50 meters
++ * 1 50 - 80 meters
++ * 2 80 - 110 meters
++ * 3 110 - 140 meters
++ * 4 > 140 meters
++ **/
++s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 phy_data, index;
++
++ DEBUGFUNC("e1000_get_cable_length_m88");
++
++ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
++ if (ret_val)
++ goto out;
++
++ index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
++ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
++ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
++ ret_val = -E1000_ERR_PHY;
++ goto out;
++ }
++
++ phy->min_cable_length = e1000_m88_cable_length_table[index];
++ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
++
++ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
++ * @hw: pointer to the HW structure
++ *
++ * The automatic gain control (agc) normalizes the amplitude of the
++ * received signal, adjusting for the attenuation produced by the
++ * cable. By reading the AGC registers, which represent the
++ * combination of coarse and fine gain value, the value can be put
++ * into a lookup table to obtain the approximate cable length
++ * for each channel.
++ **/
++s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val = E1000_SUCCESS;
++ u16 phy_data, i, agc_value = 0;
++ u16 cur_agc_index, max_agc_index = 0;
++ u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
++ u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
++ {IGP02E1000_PHY_AGC_A,
++ IGP02E1000_PHY_AGC_B,
++ IGP02E1000_PHY_AGC_C,
++ IGP02E1000_PHY_AGC_D};
++
++ DEBUGFUNC("e1000_get_cable_length_igp_2");
++
++ /* Read the AGC registers for all channels */
++ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
++ ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
++ if (ret_val)
++ goto out;
++
++ /*
++ * Getting bits 15:9, which represent the combination of
++ * coarse and fine gain values. The result is a number
++ * that can be put into the lookup table to obtain the
++ * approximate cable length.
++ */
++ cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
++ IGP02E1000_AGC_LENGTH_MASK;
++
++ /* Array index bound check. */
++ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
++ (cur_agc_index == 0)) {
++ ret_val = -E1000_ERR_PHY;
++ goto out;
++ }
++
++ /* Remove min & max AGC values from calculation. */
++ if (e1000_igp_2_cable_length_table[min_agc_index] >
++ e1000_igp_2_cable_length_table[cur_agc_index])
++ min_agc_index = cur_agc_index;
++ if (e1000_igp_2_cable_length_table[max_agc_index] <
++ e1000_igp_2_cable_length_table[cur_agc_index])
++ max_agc_index = cur_agc_index;
++
++ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
++ }
++
++ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
++ e1000_igp_2_cable_length_table[max_agc_index]);
++ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
++
++ /* Calculate cable length with the error range of +/- 10 meters. */
++ phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
++ (agc_value - IGP02E1000_AGC_RANGE) : 0;
++ phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
++
++ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_get_phy_info_m88 - Retrieve PHY information
++ * @hw: pointer to the HW structure
++ *
++ * Valid for only copper links. Read the PHY status register (sticky read)
++ * to verify that link is up. Read the PHY special control register to
++ * determine the polarity and 10base-T extended distance. Read the PHY
++ * special status register to determine MDI/MDIx and current speed. If
++ * speed is 1000, then determine cable length, local and remote receiver.
++ **/
++s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 phy_data;
++ bool link;
++
++ DEBUGFUNC("e1000_get_phy_info_m88");
++
++ if (phy->media_type != e1000_media_type_copper) {
++ DEBUGOUT("Phy info is only valid for copper media\n");
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ }
++
++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
++ if (ret_val)
++ goto out;
++
++ if (!link) {
++ DEBUGOUT("Phy info is only valid if link is up\n");
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ }
++
++ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
++ if (ret_val)
++ goto out;
++
++ phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
++ ? true : false;
++
++ ret_val = e1000_check_polarity_m88(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
++ if (ret_val)
++ goto out;
++
++ phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
++
++ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
++ ret_val = hw->phy.ops.get_cable_length(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
++ if (ret_val)
++ goto out;
++
++ phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
++ ? e1000_1000t_rx_status_ok
++ : e1000_1000t_rx_status_not_ok;
++
++ phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
++ ? e1000_1000t_rx_status_ok
++ : e1000_1000t_rx_status_not_ok;
++ } else {
++ /* Set values to "undefined" */
++ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
++ phy->local_rx = e1000_1000t_rx_status_undefined;
++ phy->remote_rx = e1000_1000t_rx_status_undefined;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_get_phy_info_igp - Retrieve igp PHY information
++ * @hw: pointer to the HW structure
++ *
++ * Read PHY status to determine if link is up. If link is up, then
++ * set/determine 10base-T extended distance and polarity correction. Read
++ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
++ * determine on the cable length, local and remote receiver.
++ **/
++s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 data;
++ bool link;
++
++ DEBUGFUNC("e1000_get_phy_info_igp");
++
++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
++ if (ret_val)
++ goto out;
++
++ if (!link) {
++ DEBUGOUT("Phy info is only valid if link is up\n");
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ }
++
++ phy->polarity_correction = true;
++
++ ret_val = e1000_check_polarity_igp(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
++ if (ret_val)
++ goto out;
++
++ phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
++
++ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
++ IGP01E1000_PSSR_SPEED_1000MBPS) {
++ ret_val = phy->ops.get_cable_length(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
++ if (ret_val)
++ goto out;
++
++ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
++ ? e1000_1000t_rx_status_ok
++ : e1000_1000t_rx_status_not_ok;
++
++ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
++ ? e1000_1000t_rx_status_ok
++ : e1000_1000t_rx_status_not_ok;
++ } else {
++ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
++ phy->local_rx = e1000_1000t_rx_status_undefined;
++ phy->remote_rx = e1000_1000t_rx_status_undefined;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_phy_sw_reset_generic - PHY software reset
++ * @hw: pointer to the HW structure
++ *
++ * Does a software reset of the PHY by reading the PHY control register and
++ * setting/write the control register reset bit to the PHY.
++ **/
++s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++ u16 phy_ctrl;
++
++ DEBUGFUNC("e1000_phy_sw_reset_generic");
++
++ if (!(hw->phy.ops.read_reg))
++ goto out;
++
++ ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
++ if (ret_val)
++ goto out;
++
++ phy_ctrl |= MII_CR_RESET;
++ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
++ if (ret_val)
++ goto out;
++
++ usec_delay(1);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_phy_hw_reset_generic - PHY hardware reset
++ * @hw: pointer to the HW structure
++ *
++ * Verify the reset block is not blocking us from resetting. Acquire
++ * semaphore (if necessary) and read/set/write the device control reset
++ * bit in the PHY. Wait the appropriate delay time for the device to
++ * reset and release the semaphore (if necessary).
++ **/
++s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val = E1000_SUCCESS;
++ u32 ctrl;
++
++ DEBUGFUNC("e1000_phy_hw_reset_generic");
++
++ ret_val = phy->ops.check_reset_block(hw);
++ if (ret_val) {
++ ret_val = E1000_SUCCESS;
++ goto out;
++ }
++
++ ret_val = phy->ops.acquire(hw);
++ if (ret_val)
++ goto out;
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
++ E1000_WRITE_FLUSH(hw);
++
++ usec_delay(phy->reset_delay_us);
++
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++ E1000_WRITE_FLUSH(hw);
++
++ usec_delay(150);
++
++ phy->ops.release(hw);
++
++ ret_val = phy->ops.get_cfg_done(hw);
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_get_cfg_done_generic - Generic configuration done
++ * @hw: pointer to the HW structure
++ *
++ * Generic function to wait 10 milli-seconds for configuration to complete
++ * and return success.
++ **/
++s32 e1000_get_cfg_done_generic(struct e1000_hw *hw)
++{
++ DEBUGFUNC("e1000_get_cfg_done_generic");
++
++ msec_delay_irq(10);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_phy_init_script_igp3 - Inits the IGP3 PHY
++ * @hw: pointer to the HW structure
++ *
++ * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
++ **/
++s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
++{
++ DEBUGOUT("Running IGP 3 PHY init script\n");
++
++ /* PHY init IGP 3 */
++ /* Enable rise/fall, 10-mode work in class-A */
++ hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
++ /* Remove all caps from Replica path filter */
++ hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
++ /* Bias trimming for ADC, AFE and Driver (Default) */
++ hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
++ /* Increase Hybrid poly bias */
++ hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
++ /* Add 4% to Tx amplitude in Gig mode */
++ hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
++ /* Disable trimming (TTT) */
++ hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
++ /* Poly DC correction to 94.6% + 2% for all channels */
++ hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
++ /* ABS DC correction to 95.9% */
++ hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
++ /* BG temp curve trim */
++ hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
++ /* Increasing ADC OPAMP stage 1 currents to max */
++ hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
++ /* Force 1000 ( required for enabling PHY regs configuration) */
++ hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
++ /* Set upd_freq to 6 */
++ hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
++ /* Disable NPDFE */
++ hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
++ /* Disable adaptive fixed FFE (Default) */
++ hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
++ /* Enable FFE hysteresis */
++ hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
++ /* Fixed FFE for short cable lengths */
++ hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
++ /* Fixed FFE for medium cable lengths */
++ hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
++ /* Fixed FFE for long cable lengths */
++ hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
++ /* Enable Adaptive Clip Threshold */
++ hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
++ /* AHT reset limit to 1 */
++ hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
++ /* Set AHT master delay to 127 msec */
++ hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
++ /* Set scan bits for AHT */
++ hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
++ /* Set AHT Preset bits */
++ hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
++ /* Change integ_factor of channel A to 3 */
++ hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
++ /* Change prop_factor of channels BCD to 8 */
++ hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
++ /* Change cg_icount + enable integbp for channels BCD */
++ hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
++ /*
++ * Change cg_icount + enable integbp + change prop_factor_master
++ * to 8 for channel A
++ */
++ hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
++ /* Disable AHT in Slave mode on channel A */
++ hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
++ /*
++ * Enable LPLU and disable AN to 1000 in non-D0a states,
++ * Enable SPD+B2B
++ */
++ hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
++ /* Enable restart AN on an1000_dis change */
++ hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
++ /* Enable wh_fifo read clock in 10/100 modes */
++ hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
++ /* Restart AN, Speed selection is 1000 */
++ hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_get_phy_type_from_id - Get PHY type from id
++ * @phy_id: phy_id read from the phy
++ *
++ * Returns the phy type from the id.
++ **/
++enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
++{
++ enum e1000_phy_type phy_type = e1000_phy_unknown;
++
++ switch (phy_id) {
++ case M88E1000_I_PHY_ID:
++ case M88E1000_E_PHY_ID:
++ case M88E1111_I_PHY_ID:
++ case M88E1011_I_PHY_ID:
++ phy_type = e1000_phy_m88;
++ break;
++ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
++ phy_type = e1000_phy_igp_2;
++ break;
++ case GG82563_E_PHY_ID:
++ phy_type = e1000_phy_gg82563;
++ break;
++ case IGP03E1000_E_PHY_ID:
++ phy_type = e1000_phy_igp_3;
++ break;
++ case IFE_E_PHY_ID:
++ case IFE_PLUS_E_PHY_ID:
++ case IFE_C_E_PHY_ID:
++ phy_type = e1000_phy_ife;
++ break;
++ default:
++ phy_type = e1000_phy_unknown;
++ break;
++ }
++ return phy_type;
++}
++
++/**
++ * e1000_determine_phy_address - Determines PHY address.
++ * @hw: pointer to the HW structure
++ *
++ * This uses a trial and error method to loop through possible PHY
++ * addresses. It tests each by reading the PHY ID registers and
++ * checking for a match.
++ **/
++s32 e1000_determine_phy_address(struct e1000_hw *hw)
++{
++ s32 ret_val = -E1000_ERR_PHY_TYPE;
++ u32 phy_addr = 0;
++ u32 i;
++ enum e1000_phy_type phy_type = e1000_phy_unknown;
++
++ hw->phy.id = phy_type;
++
++ for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
++ hw->phy.addr = phy_addr;
++ i = 0;
++
++ do {
++ e1000_get_phy_id(hw);
++ phy_type = e1000_get_phy_type_from_id(hw->phy.id);
++
++ /*
++ * If phy_type is valid, break - we found our
++ * PHY address
++ */
++ if (phy_type != e1000_phy_unknown) {
++ ret_val = E1000_SUCCESS;
++ goto out;
++ }
++ msec_delay(1);
++ i++;
++ } while (i < 10);
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
++ * @hw: pointer to the HW structure
++ *
++ * In the case of a PHY power down to save power, or to turn off link during a
++ * driver unload, or wake on lan is not enabled, restore the link to previous
++ * settings.
++ **/
++void e1000_power_up_phy_copper(struct e1000_hw *hw)
++{
++ u16 mii_reg = 0;
++
++ /* The PHY will retain its settings across a power down/up cycle */
++ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
++ mii_reg &= ~MII_CR_POWER_DOWN;
++ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
++}
++
++/**
++ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
++ * @hw: pointer to the HW structure
++ *
++ * In the case of a PHY power down to save power, or to turn off link during a
++ * driver unload, or wake on lan is not enabled, restore the link to previous
++ * settings.
++ **/
++void e1000_power_down_phy_copper(struct e1000_hw *hw)
++{
++ u16 mii_reg = 0;
++
++ /* The PHY will retain its settings across a power down/up cycle */
++ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
++ mii_reg |= MII_CR_POWER_DOWN;
++ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
++ msec_delay(1);
++}
+Index: linux-2.6.22/drivers/net/igb/e1000_phy.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_phy.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,163 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_PHY_H_
++#define _E1000_PHY_H_
++
++void e1000_init_phy_ops_generic(struct e1000_hw *hw);
++s32 e1000_check_downshift_generic(struct e1000_hw *hw);
++s32 e1000_check_polarity_m88(struct e1000_hw *hw);
++s32 e1000_check_polarity_igp(struct e1000_hw *hw);
++s32 e1000_check_polarity_ife(struct e1000_hw *hw);
++s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
++s32 e1000_copper_link_autoneg(struct e1000_hw *hw);
++s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
++s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
++s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
++s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
++s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
++s32 e1000_get_cable_length_m88(struct e1000_hw *hw);
++s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw);
++s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
++s32 e1000_get_phy_id(struct e1000_hw *hw);
++s32 e1000_get_phy_info_igp(struct e1000_hw *hw);
++s32 e1000_get_phy_info_m88(struct e1000_hw *hw);
++s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw);
++void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
++s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw);
++s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
++s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
++s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
++s32 e1000_wait_autoneg_generic(struct e1000_hw *hw);
++s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
++s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
++ u32 usec_interval, bool *success);
++s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
++enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
++s32 e1000_determine_phy_address(struct e1000_hw *hw);
++void e1000_power_up_phy_copper(struct e1000_hw *hw);
++void e1000_power_down_phy_copper(struct e1000_hw *hw);
++s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
++
++#define E1000_MAX_PHY_ADDR 4
++
++/* IGP01E1000 Specific Registers */
++#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
++#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
++#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
++#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
++#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */
++#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality */
++#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
++#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
++#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
++#define IGP_PAGE_SHIFT 5
++#define PHY_REG_MASK 0x1F
++
++#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
++#define IGP01E1000_PHY_POLARITY_MASK 0x0078
++
++#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
++#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
++
++#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
++
++/* Enable flexible speed on link-up */
++#define IGP01E1000_GMII_FLEX_SPD 0x0010
++#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */
++
++#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
++#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
++#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
++
++#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
++
++#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
++#define IGP01E1000_PSSR_MDIX 0x0800
++#define IGP01E1000_PSSR_SPEED_MASK 0xC000
++#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
++
++#define IGP02E1000_PHY_CHANNEL_NUM 4
++#define IGP02E1000_PHY_AGC_A 0x11B1
++#define IGP02E1000_PHY_AGC_B 0x12B1
++#define IGP02E1000_PHY_AGC_C 0x14B1
++#define IGP02E1000_PHY_AGC_D 0x18B1
++
++#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
++#define IGP02E1000_AGC_LENGTH_MASK 0x7F
++#define IGP02E1000_AGC_RANGE 15
++
++#define IGP03E1000_PHY_MISC_CTRL 0x1B
++#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Manually Set Duplex */
++
++#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
++
++#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
++#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
++#define E1000_KMRNCTRLSTA_REN 0x00200000
++#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
++#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
++#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
++#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
++
++#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
++#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
++#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
++#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
++
++/* IFE PHY Extended Status Control */
++#define IFE_PESC_POLARITY_REVERSED 0x0100
++
++/* IFE PHY Special Control */
++#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
++#define IFE_PSC_FORCE_POLARITY 0x0020
++#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100
++
++/* IFE PHY Special Control and LED Control */
++#define IFE_PSCL_PROBE_MODE 0x0020
++#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
++#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
++
++/* IFE PHY MDIX Control */
++#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
++#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
++#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
++
++#endif
+Index: linux-2.6.22/drivers/net/igb/e1000_regs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/e1000_regs.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,484 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_REGS_H_
++#define _E1000_REGS_H_
++
++#define E1000_CTRL 0x00000 /* Device Control - RW */
++#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
++#define E1000_STATUS 0x00008 /* Device Status - RO */
++#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
++#define E1000_EERD 0x00014 /* EEPROM Read - RW */
++#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
++#define E1000_FLA 0x0001C /* Flash Access - RW */
++#define E1000_MDIC 0x00020 /* MDI Control - RW */
++#define E1000_SCTL 0x00024 /* SerDes Control - RW */
++#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
++#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
++#define E1000_FEXT 0x0002C /* Future Extended - RW */
++#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
++#define E1000_FCT 0x00030 /* Flow Control Type - RW */
++#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
++#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
++#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
++#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
++#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
++#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
++#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
++#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
++#define E1000_RCTL 0x00100 /* Rx Control - RW */
++#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
++#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
++#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
++#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
++#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
++#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
++#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
++#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
++#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
++#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
++#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
++#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
++#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
++#define E1000_TCTL 0x00400 /* Tx Control - RW */
++#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
++#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
++#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */
++#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
++#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
++#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
++#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
++#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
++#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
++#define E1000_PBS 0x01008 /* Packet Buffer Size */
++#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
++#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
++#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
++#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
++#define E1000_FLSWCTL 0x01030 /* FLASH control register */
++#define E1000_FLSWDATA 0x01034 /* FLASH data register */
++#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
++#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
++#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
++#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
++#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */
++#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */
++#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
++#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
++#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */
++#define E1000_ICR_V2 0x01500 /* Interrupt Cause - new location - RC */
++#define E1000_ICS_V2 0x01504 /* Interrupt Cause Set - new location - WO */
++#define E1000_IMS_V2 0x01508 /* Interrupt Mask Set/Read - new location - RW */
++#define E1000_IMC_V2 0x0150C /* Interrupt Mask Clear - new location - WO */
++#define E1000_IAM_V2 0x01510 /* Interrupt Ack Auto Mask - new location - RW */
++#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
++#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
++#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
++#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
++#define E1000_RDFPCQ(_n) (0x02430 + (0x4 * (_n)))
++#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */
++#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
++/* Split and Replication Rx Control - RW */
++#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */
++#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */
++#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */
++#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */
++#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */
++#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */
++#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
++#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
++#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
++/*
++ * Convenience macros
++ *
++ * Note: "_n" is the queue number of the register to be written to.
++ *
++ * Example usage:
++ * E1000_RDBAL_REG(current_rx_queue)
++ */
++#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
++ (0x0C000 + ((_n) * 0x40)))
++#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
++ (0x0C004 + ((_n) * 0x40)))
++#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
++ (0x0C008 + ((_n) * 0x40)))
++#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
++ (0x0C00C + ((_n) * 0x40)))
++#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
++ (0x0C010 + ((_n) * 0x40)))
++#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
++ (0x0C014 + ((_n) * 0x40)))
++#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
++#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
++ (0x0C018 + ((_n) * 0x40)))
++#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
++ (0x0C028 + ((_n) * 0x40)))
++#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
++ (0x0C030 + ((_n) * 0x40)))
++#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
++ (0x0E000 + ((_n) * 0x40)))
++#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
++ (0x0E004 + ((_n) * 0x40)))
++#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
++ (0x0E008 + ((_n) * 0x40)))
++#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
++ (0x0E010 + ((_n) * 0x40)))
++#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
++ (0x0E014 + ((_n) * 0x40)))
++#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
++#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
++ (0x0E018 + ((_n) * 0x40)))
++#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
++ (0x0E028 + ((_n) * 0x40)))
++#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
++ (0x0E038 + ((_n) * 0x40)))
++#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
++ (0x0E03C + ((_n) * 0x40)))
++#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
++#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
++#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
++#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */
++#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
++#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
++#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
++ (0x054E0 + ((_i - 16) * 8)))
++#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
++ (0x054E4 + ((_i - 16) * 8)))
++#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
++#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
++#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
++#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
++#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
++#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
++#define E1000_PBSLAC 0x03100 /* Packet Buffer Slave Access Control */
++#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Packet Buffer DWORD (_n) */
++#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
++#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
++#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
++#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
++#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
++#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
++#define E1000_TDPUMB 0x0357C /* DMA Tx Descriptor uC Mail Box - RW */
++#define E1000_TDPUAD 0x03580 /* DMA Tx Descriptor uC Addr Command - RW */
++#define E1000_TDPUWD 0x03584 /* DMA Tx Descriptor uC Data Write - RW */
++#define E1000_TDPURD 0x03588 /* DMA Tx Descriptor uC Data Read - RW */
++#define E1000_TDPUCTL 0x0358C /* DMA Tx Descriptor uC Control - RW */
++#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */
++#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
++#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
++#define E1000_DTXMXSZRQ 0x03540 /* DMA Tx Max Total Allow Size Requests - RW */
++#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
++#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
++#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
++#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
++#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
++#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
++#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
++#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
++#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
++#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
++#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
++#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
++#define E1000_COLC 0x04028 /* Collision Count - R/clr */
++#define E1000_DC 0x04030 /* Defer Count - R/clr */
++#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
++#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
++#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
++#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
++#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
++#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
++#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
++#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
++#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
++#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
++#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
++#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
++#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
++#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
++#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
++#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
++#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
++#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
++#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
++#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
++#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
++#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
++#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
++#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
++#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
++#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
++#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
++#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
++#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
++#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
++#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
++#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
++#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
++#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
++#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
++#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
++#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
++#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
++#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
++#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
++#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
++#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
++#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
++#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
++#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
++#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
++#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
++#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
++#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
++#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
++#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
++#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
++#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
++#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
++#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
++#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
++
++#define E1000_LSECTXUT 0x04300 /* LinkSec Tx Untagged Packet Count - OutPktsUntagged */
++#define E1000_LSECTXPKTE 0x04304 /* LinkSec Encrypted Tx Packets Count - OutPktsEncrypted */
++#define E1000_LSECTXPKTP 0x04308 /* LinkSec Protected Tx Packet Count - OutPktsProtected */
++#define E1000_LSECTXOCTE 0x0430C /* LinkSec Encrypted Tx Octets Count - OutOctetsEncrypted */
++#define E1000_LSECTXOCTP 0x04310 /* LinkSec Protected Tx Octets Count - OutOctetsProtected */
++#define E1000_LSECRXUT 0x04314 /* LinkSec Untagged non-Strict Rx Packet Count - InPktsUntagged/InPktsNoTag */
++#define E1000_LSECRXOCTD 0x0431C /* LinkSec Rx Octets Decrypted Count - InOctetsDecrypted */
++#define E1000_LSECRXOCTV 0x04320 /* LinkSec Rx Octets Validated - InOctetsValidated */
++#define E1000_LSECRXBAD 0x04324 /* LinkSec Rx Bad Tag - InPktsBadTag */
++#define E1000_LSECRXNOSCI 0x04328 /* LinkSec Rx Packet No SCI Count - InPktsNoSci */
++#define E1000_LSECRXUNSCI 0x0432C /* LinkSec Rx Packet Unknown SCI Count - InPktsUnknownSci */
++#define E1000_LSECRXUNCH 0x04330 /* LinkSec Rx Unchecked Packets Count - InPktsUnchecked */
++#define E1000_LSECRXDELAY 0x04340 /* LinkSec Rx Delayed Packet Count - InPktsDelayed */
++#define E1000_LSECRXLATE 0x04350 /* LinkSec Rx Late Packets Count - InPktsLate */
++#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* LinkSec Rx Packet OK Count - InPktsOk */
++#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* LinkSec Rx Invalid Count - InPktsInvalid */
++#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* LinkSec Rx Not Valid Count - InPktsNotValid */
++#define E1000_LSECRXUNSA 0x043C0 /* LinkSec Rx Unused SA Count - InPktsUnusedSa */
++#define E1000_LSECRXNUSA 0x043D0 /* LinkSec Rx Not Using SA Count - InPktsNotUsingSa */
++#define E1000_LSECTXCAP 0x0B000 /* LinkSec Tx Capabilities Register - RO */
++#define E1000_LSECRXCAP 0x0B300 /* LinkSec Rx Capabilities Register - RO */
++#define E1000_LSECTXCTRL 0x0B004 /* LinkSec Tx Control - RW */
++#define E1000_LSECRXCTRL 0x0B304 /* LinkSec Rx Control - RW */
++#define E1000_LSECTXSCL 0x0B008 /* LinkSec Tx SCI Low - RW */
++#define E1000_LSECTXSCH 0x0B00C /* LinkSec Tx SCI High - RW */
++#define E1000_LSECTXSA 0x0B010 /* LinkSec Tx SA0 - RW */
++#define E1000_LSECTXPN0 0x0B018 /* LinkSec Tx SA PN 0 - RW */
++#define E1000_LSECTXPN1 0x0B01C /* LinkSec Tx SA PN 1 - RW */
++#define E1000_LSECRXSCL 0x0B3D0 /* LinkSec Rx SCI Low - RW */
++#define E1000_LSECRXSCH 0x0B3E0 /* LinkSec Rx SCI High - RW */
++#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 0 - WO */
++#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 1 - WO */
++#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
++#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
++/*
++ * LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit
++ * key - RW.
++ */
++#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
++
++#define E1000_SSVPC 0x041A0 /* Switch Security Violation Packet Count */
++#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */
++#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */
++#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */
++#define E1000_IPSRXIPADDR(_n) (0x0B420+ (0x04 * (_n))) /* IPSec Rx IPv4/v6 Address - RW */
++#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) /* IPSec Rx 128-bit Key - RW */
++#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */
++#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */
++#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) /* IPSec Tx 128-bit Key - RW */
++#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */
++#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */
++#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
++#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
++#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
++#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */
++#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
++#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */
++#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */
++#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
++#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */
++#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */
++#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
++#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
++#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
++#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
++#define E1000_LENERRS 0x04138 /* Length Errors Count */
++#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
++#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
++#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
++#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
++#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
++#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */
++#define E1000_1GSTAT_RCV 0x04228 /* 1GSTAT Code Violation Packet Count - RW */
++#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
++#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */
++#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
++#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
++#define E1000_RA 0x05400 /* Receive Address - RW Array */
++#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
++#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
++#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
++#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */
++#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */
++#define E1000_WUC 0x05800 /* Wakeup Control - RW */
++#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
++#define E1000_WUS 0x05810 /* Wakeup Status - RO */
++#define E1000_MANC 0x05820 /* Management Control - RW */
++#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
++#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
++#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
++#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
++#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
++#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */
++#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
++#define E1000_HOST_IF 0x08800 /* Host Interface */
++#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
++#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
++#define E1000_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flexible Host Filter Table */
++#define E1000_FHFT_EXT(_n) (0x09A00 + (_n * 0x100)) /* Ext Flexible Host Filter Table */
++
++
++#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
++#define E1000_MDPHYA 0x0003C /* PHY address - RW */
++#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
++#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
++#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
++#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
++#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
++#define E1000_GCR 0x05B00 /* PCI-Ex Control */
++#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
++#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
++#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
++#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
++#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
++#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
++#define E1000_SWSM 0x05B50 /* SW Semaphore */
++#define E1000_FWSM 0x05B54 /* FW Semaphore */
++#define E1000_SWSM2 0x05B58 /* Driver-only SW semaphore (not used by BOOT agents) */
++#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
++#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
++#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
++#define E1000_HICR 0x08F00 /* Host Interface Control */
++
++/* RSS registers */
++#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
++#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
++#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
++#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/
++#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */
++#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Allocation Register
++ * (_i) - RW */
++#define E1000_MSIXTADD(_i) (0x0C000 + ((_i) * 0x10)) /* MSI-X Table entry addr
++ * low reg - RW */
++#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) /* MSI-X Table entry addr
++ * upper reg - RW */
++#define E1000_MSIXTMSG(_i) (0x0C008 + ((_i) * 0x10)) /* MSI-X Table entry
++ * message reg - RW */
++#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) /* MSI-X Table entry
++ * vector ctrl reg - RW */
++#define E1000_MSIXPBA 0x0E000 /* MSI-X Pending bit array */
++#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
++#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
++#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
++#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
++/* VT Registers */
++#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */
++#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
++#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
++#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
++#define E1000_VFRE 0x00C8C /* VF Receive Enables */
++#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
++#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
++#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
++#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
++#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
++#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
++#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */
++/* These act per VF so an array friendly macro is used */
++#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
++#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
++#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
++#define E1000_VFVMBMEM(_n) (0x00800 + (_n))
++#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
++#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
++ * Filter - RW */
++/* Time Sync */
++#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
++#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
++#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
++#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
++#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
++#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
++#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
++#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
++#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
++#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
++#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
++#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
++
++/* Filtering Registers */
++#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
++#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
++#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
++#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
++#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
++#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
++#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
++
++#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */
++#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */
++#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */
++#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */
++#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */
++#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) /* Tx Desc plane TC Rate-scheduler config */
++#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Config */
++#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Config */
++#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler Status */
++#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler MMW */
++#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Status */
++#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) /* Tx Packet plane TC Rate-scheduler MMW */
++#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Status */
++#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler MMW */
++#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) /* Tx Desc plane VM Rate-Scheduler MMW*/
++#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) /* Tx BCN Rate-Scheduler MMW */
++#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */
++#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */
++#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */
++#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */
++#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */
++#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */
++#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */
++#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */
++#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */
++#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */
++#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */
++#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */
++#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */
++#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */
++
++#endif
+Index: linux-2.6.22/drivers/net/igb/igb.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/igb.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,444 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++
++/* Linux PRO/1000 Ethernet Driver main header file */
++
++#ifndef _IGB_H_
++#define _IGB_H_
++
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/vmalloc.h>
++
++#ifdef SIOCETHTOOL
++#include <linux/ethtool.h>
++#endif
++
++#ifdef SIOCSHWTSTAMP
++#include <linux/clocksource.h>
++#include <linux/timecompare.h>
++#include <linux/net_tstamp.h>
++#endif
++struct igb_adapter;
++
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
++#define IGB_DCA
++#endif
++#ifdef IGB_DCA
++#include <linux/dca.h>
++#endif
++
++
++#ifdef IGB_LRO
++#undef IGB_LRO
++#ifdef NETIF_F_LRO
++#if defined(CONFIG_INET_LRO) || defined(CONFIG_INET_LRO_MODULE)
++#include <linux/inet_lro.h>
++#define MAX_LRO_DESCRIPTORS 8
++#define IGB_LRO
++#endif
++#endif
++#endif /* IGB_LRO */
++
++#include "kcompat.h"
++
++#include "e1000_api.h"
++#include "e1000_82575.h"
++
++#define IGB_ERR(args...) printk(KERN_ERR "igb: " args)
++
++#define PFX "igb: "
++#define DPRINTK(nlevel, klevel, fmt, args...) \
++ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
++ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
++ __FUNCTION__ , ## args))
++
++/* Interrupt defines */
++#define IGB_START_ITR 648 /* ~6000 ints/sec */
++
++/* Interrupt modes, as used by the IntMode paramter */
++#define IGB_INT_MODE_LEGACY 0
++#define IGB_INT_MODE_MSI 1
++#define IGB_INT_MODE_MSIX 2
++
++#define HW_PERF
++/* TX/RX descriptor defines */
++#define IGB_DEFAULT_TXD 256
++#define IGB_MIN_TXD 80
++#define IGB_MAX_TXD 4096
++
++#define IGB_DEFAULT_RXD 256
++#define IGB_MIN_RXD 80
++#define IGB_MAX_RXD 4096
++
++#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */
++#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */
++
++#define NON_Q_VECTORS 1
++#define MAX_Q_VECTORS 8
++
++/* Transmit and receive queues */
++#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
++ (hw->mac.type > e1000_82575 ? 8 : 4))
++#define IGB_ABS_MAX_TX_QUEUES 8
++#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
++
++#define IGB_MAX_VF_MC_ENTRIES 30
++#define IGB_MAX_VF_FUNCTIONS 8
++#define IGB_MAX_VFTA_ENTRIES 128
++#define IGB_MAX_UTA_ENTRIES 128
++#define MAX_EMULATION_MAC_ADDRS 16
++#define OUI_LEN 3
++
++struct vf_data_storage {
++ unsigned char vf_mac_addresses[ETH_ALEN];
++ u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
++ u16 num_vf_mc_hashes;
++ u16 default_vf_vlan_id;
++ u16 vlans_enabled;
++ unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN];
++ u32 uta_table_copy[IGB_MAX_UTA_ENTRIES];
++ u32 flags;
++ unsigned long last_nack;
++};
++
++#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
++#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
++#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
++
++/* RX descriptor control thresholds.
++ * PTHRESH - MAC will consider prefetch if it has fewer than this number of
++ * descriptors available in its onboard memory.
++ * Setting this to 0 disables RX descriptor prefetch.
++ * HTHRESH - MAC will only prefetch if there are at least this many descriptors
++ * available in host memory.
++ * If PTHRESH is 0, this should also be 0.
++ * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
++ * descriptors until either it has this many to write back, or the
++ * ITR timer expires.
++ */
++#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
++#define IGB_RX_HTHRESH 8
++#define IGB_RX_WTHRESH 1
++#define IGB_TX_PTHRESH 8
++#define IGB_TX_HTHRESH 1
++#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
++ adapter->msix_entries) ? 0 : 16)
++
++/* this is the size past which hardware will drop packets when setting LPE=0 */
++#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
++
++/* Supported Rx Buffer Sizes */
++#define IGB_RXBUFFER_128 128 /* Used for packet split */
++#define IGB_RXBUFFER_256 256 /* Used for packet split */
++#define IGB_RXBUFFER_512 512
++#define IGB_RXBUFFER_1024 1024
++#define IGB_RXBUFFER_2048 2048
++#define IGB_RXBUFFER_4096 4096
++#define IGB_RXBUFFER_8192 8192
++#define IGB_RXBUFFER_16384 16384
++
++/* Packet Buffer allocations */
++#define IGB_PBA_BYTES_SHIFT 0xA
++#define IGB_TX_HEAD_ADDR_SHIFT 7
++#define IGB_PBA_TX_MASK 0xFFFF0000
++
++#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */
++
++/* How many Tx Descriptors do we need to call netif_wake_queue ? */
++#define IGB_TX_QUEUE_WAKE 32
++/* How many Rx Buffers do we bundle into one write to the hardware ? */
++#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
++
++#define AUTO_ALL_MODES 0
++#define IGB_EEPROM_APME 0x0400
++
++#ifndef IGB_MASTER_SLAVE
++/* Switch to override PHY master/slave setting */
++#define IGB_MASTER_SLAVE e1000_ms_hw_default
++#endif
++
++#define IGB_MNG_VLAN_NONE -1
++
++/* wrapper around a pointer to a socket buffer,
++ * so a DMA handle can be stored along with the buffer */
++struct igb_buffer {
++ struct sk_buff *skb;
++ dma_addr_t dma;
++ dma_addr_t page_dma;
++ union {
++ /* TX */
++ struct {
++ unsigned long time_stamp;
++ u16 length;
++ u16 next_to_watch;
++ };
++
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ /* RX */
++ struct {
++ unsigned long page_offset;
++ struct page *page;
++ };
++#endif
++ };
++};
++
++struct igb_queue_stats {
++ u64 packets;
++ u64 bytes;
++};
++
++struct igb_q_vector {
++ struct igb_adapter *adapter; /* backlink */
++ struct igb_ring *rx_ring;
++ struct igb_ring *tx_ring;
++ struct napi_struct napi;
++
++ u32 eims_value;
++ u16 cpu;
++
++ u16 itr_val;
++ u8 set_itr;
++ u8 itr_shift;
++ void __iomem *itr_register;
++
++ char name[IFNAMSIZ + 9];
++#ifndef HAVE_NETDEV_NAPI_LIST
++ struct net_device poll_dev;
++#endif
++};
++
++struct igb_ring {
++ struct igb_q_vector *q_vector; /* backlink to q_vector */
++ struct pci_dev *pdev; /* pci device for dma mapping */
++ dma_addr_t dma; /* phys address of the ring */
++ void *desc; /* descriptor ring memory */
++ unsigned int size; /* length of desc. ring in bytes */
++ u16 count; /* number of desc. in the ring */
++ u16 next_to_use;
++ u16 next_to_clean;
++ u8 queue_index;
++ u8 reg_idx;
++ void __iomem *head;
++ void __iomem *tail;
++ struct igb_buffer *buffer_info; /* array of buffer info structs */
++
++ unsigned int total_bytes;
++ unsigned int total_packets;
++
++ struct igb_queue_stats stats;
++
++ union {
++ /* TX */
++ struct {
++ unsigned int restart_queue;
++ u32 ctx_idx;
++ bool detect_tx_hung;
++ };
++ /* RX */
++ struct {
++ u64 hw_csum_err;
++ u64 hw_csum_good;
++ u32 rx_buffer_len;
++ u16 rx_ps_hdr_size;
++ bool rx_csum;
++#ifdef IGB_LRO
++ struct net_lro_mgr lro_mgr;
++ bool lro_used;
++#endif
++ };
++ };
++};
++
++
++#define IGB_ADVTXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
++
++#define IGB_DESC_UNUSED(R) \
++ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
++ (R)->next_to_clean - (R)->next_to_use - 1)
++
++#define E1000_RX_DESC_ADV(R, i) \
++ (&(((union e1000_adv_rx_desc *)((R).desc))[i]))
++#define E1000_TX_DESC_ADV(R, i) \
++ (&(((union e1000_adv_tx_desc *)((R).desc))[i]))
++#define E1000_TX_CTXTDESC_ADV(R, i) \
++ (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
++#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
++#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
++#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
++
++#define MAX_MSIX_COUNT 10
++/* board specific private data structure */
++
++struct igb_adapter {
++ struct timer_list watchdog_timer;
++ struct timer_list phy_info_timer;
++ struct vlan_group *vlgrp;
++ u16 mng_vlan_id;
++ u32 bd_number;
++ u32 wol;
++ u32 en_mng_pt;
++ u16 link_speed;
++ u16 link_duplex;
++
++ unsigned int total_tx_bytes;
++ unsigned int total_tx_packets;
++ unsigned int total_rx_bytes;
++ unsigned int total_rx_packets;
++ /* Interrupt Throttle Rate */
++ u32 itr;
++ u32 itr_setting;
++ u16 tx_itr;
++ u16 rx_itr;
++
++ struct work_struct reset_task;
++ struct work_struct watchdog_task;
++ bool fc_autoneg;
++ u8 tx_timeout_factor;
++#ifdef ETHTOOL_PHYS_ID
++ struct timer_list blink_timer;
++ unsigned long led_status;
++#endif
++
++ /* TX */
++ struct igb_ring *tx_ring; /* One per active queue */
++ unsigned int restart_queue;
++ unsigned long tx_queue_len;
++ u32 tx_timeout_count;
++
++ /* RX */
++ struct igb_ring *rx_ring; /* One per active queue */
++ int num_tx_queues;
++ int num_rx_queues;
++
++ u64 hw_csum_err;
++ u64 hw_csum_good;
++ u32 alloc_rx_buff_failed;
++ u32 max_frame_size;
++ u32 min_frame_size;
++
++ /* OS defined structs */
++ struct net_device *netdev;
++ struct pci_dev *pdev;
++ struct net_device_stats net_stats;
++#ifdef SIOCSHWTSTAMP
++ struct cyclecounter cycles;
++ struct timecounter clock;
++ struct timecompare compare;
++ struct hwtstamp_config hwtstamp_config;
++#endif
++
++ /* structs defined in e1000_hw.h */
++ struct e1000_hw hw;
++ struct e1000_hw_stats stats;
++ struct e1000_phy_info phy_info;
++ struct e1000_phy_stats phy_stats;
++
++#ifdef ETHTOOL_TEST
++ u32 test_icr;
++ struct igb_ring test_tx_ring;
++ struct igb_ring test_rx_ring;
++#endif
++
++
++ int msg_enable;
++ struct msix_entry *msix_entries;
++ int int_mode;
++ u32 eims_enable_mask;
++ u32 eims_other;
++ u32 lli_port;
++ u32 lli_size;
++ unsigned long state;
++ unsigned int flags;
++ u32 eeprom_wol;
++ u32 *config_space;
++#ifdef HAVE_TX_MQ
++ struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
++#endif /* HAVE_TX_MQ */
++#ifdef IGB_LRO
++ unsigned int lro_max_aggr;
++ unsigned int lro_aggregated;
++ unsigned int lro_flushed;
++ unsigned int lro_no_desc;
++#endif
++ u16 tx_ring_count;
++ u16 rx_ring_count;
++ unsigned int vfs_allocated_count;
++ struct vf_data_storage *vf_data;
++ u32 RSS_queues;
++ u32 VMDQ_queues;
++ unsigned int num_q_vectors;
++ struct igb_q_vector *q_vector[MAX_Q_VECTORS];
++};
++
++
++#define IGB_FLAG_HAS_MSI (1 << 0)
++#define IGB_FLAG_MSI_ENABLE (1 << 1)
++#define IGB_FLAG_DCA_ENABLED (1 << 3)
++#define IGB_FLAG_LLI_PUSH (1 << 4)
++#define IGB_FLAG_IN_NETPOLL (1 << 5)
++#define IGB_FLAG_QUAD_PORT_A (1 << 6)
++#define IGB_FLAG_QUEUE_PAIRS (1 << 7)
++
++#define IGB_82576_TSYNC_SHIFT 19
++enum e1000_state_t {
++ __IGB_TESTING,
++ __IGB_RESETTING,
++ __IGB_DOWN
++};
++
++extern char igb_driver_name[];
++extern char igb_driver_version[];
++
++extern int igb_up(struct igb_adapter *);
++extern void igb_down(struct igb_adapter *);
++extern void igb_reinit_locked(struct igb_adapter *);
++extern void igb_reset(struct igb_adapter *);
++extern int igb_set_spd_dplx(struct igb_adapter *, u16);
++extern int igb_setup_tx_resources(struct igb_ring *);
++extern int igb_setup_rx_resources(struct igb_ring *);
++extern void igb_free_tx_resources(struct igb_ring *);
++extern void igb_free_rx_resources(struct igb_ring *);
++extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
++extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
++extern void igb_setup_tctl(struct igb_adapter *);
++extern void igb_setup_rctl(struct igb_adapter *);
++extern int igb_alloc_rx_buffers_adv(struct igb_ring *, int);
++extern void igb_update_stats(struct igb_adapter *);
++extern void igb_set_ethtool_ops(struct net_device *);
++extern void igb_check_options(struct igb_adapter *);
++#ifdef ETHTOOL_OPS_COMPAT
++extern int ethtool_ioctl(struct ifreq *);
++#endif
++extern int igb_set_vf_mac(struct igb_adapter *adapter,
++ int vf, unsigned char *mac_addr);
++extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32);
++extern void igb_configure_vt_default_pool(struct igb_adapter *adapter);
++
++#endif /* _IGB_H_ */
+Index: linux-2.6.22/drivers/net/igb/igb_ethtool.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/igb_ethtool.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,1953 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++/* ethtool support for igb */
++
++#include <linux/netdevice.h>
++#include <linux/vmalloc.h>
++
++#ifdef SIOCETHTOOL
++#include <linux/ethtool.h>
++
++#include "igb.h"
++#include "igb_regtest.h"
++#include <linux/if_vlan.h>
++
++#ifdef ETHTOOL_OPS_COMPAT
++#include "kcompat_ethtool.c"
++#endif
++
++#ifdef ETHTOOL_GSTATS
++struct igb_stats {
++ char stat_string[ETH_GSTRING_LEN];
++ int sizeof_stat;
++ int stat_offset;
++};
++
++#define IGB_STAT(m) sizeof(((struct igb_adapter *)0)->m), \
++ offsetof(struct igb_adapter, m)
++static const struct igb_stats igb_gstrings_stats[] = {
++ { "rx_packets", IGB_STAT(stats.gprc) },
++ { "tx_packets", IGB_STAT(stats.gptc) },
++ { "rx_bytes", IGB_STAT(stats.gorc) },
++ { "tx_bytes", IGB_STAT(stats.gotc) },
++ { "rx_broadcast", IGB_STAT(stats.bprc) },
++ { "tx_broadcast", IGB_STAT(stats.bptc) },
++ { "rx_multicast", IGB_STAT(stats.mprc) },
++ { "tx_multicast", IGB_STAT(stats.mptc) },
++ { "rx_errors", IGB_STAT(net_stats.rx_errors) },
++ { "tx_errors", IGB_STAT(net_stats.tx_errors) },
++ { "tx_dropped", IGB_STAT(net_stats.tx_dropped) },
++ { "multicast", IGB_STAT(stats.mprc) },
++ { "collisions", IGB_STAT(stats.colc) },
++ { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) },
++ { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) },
++ { "rx_crc_errors", IGB_STAT(stats.crcerrs) },
++ { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) },
++ { "rx_no_buffer_count", IGB_STAT(stats.rnbc) },
++ { "rx_missed_errors", IGB_STAT(stats.mpc) },
++ { "tx_aborted_errors", IGB_STAT(stats.ecol) },
++ { "tx_carrier_errors", IGB_STAT(stats.tncrs) },
++ { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) },
++ { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) },
++ { "tx_window_errors", IGB_STAT(stats.latecol) },
++ { "tx_abort_late_coll", IGB_STAT(stats.latecol) },
++ { "tx_deferred_ok", IGB_STAT(stats.dc) },
++ { "tx_single_coll_ok", IGB_STAT(stats.scc) },
++ { "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
++ { "tx_timeout_count", IGB_STAT(tx_timeout_count) },
++ { "tx_restart_queue", IGB_STAT(restart_queue) },
++ { "rx_long_length_errors", IGB_STAT(stats.roc) },
++ { "rx_short_length_errors", IGB_STAT(stats.ruc) },
++ { "rx_align_errors", IGB_STAT(stats.algnerrc) },
++ { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) },
++ { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) },
++ { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) },
++ { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) },
++ { "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
++ { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
++ { "rx_long_byte_count", IGB_STAT(stats.gorc) },
++ { "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
++ { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
++ { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
++ { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
++ { "tx_smbus", IGB_STAT(stats.mgptc) },
++ { "rx_smbus", IGB_STAT(stats.mgprc) },
++ { "dropped_smbus", IGB_STAT(stats.mgpdc) },
++#ifdef IGB_LRO
++ { "lro_aggregated", IGB_STAT(lro_aggregated) },
++ { "lro_flushed", IGB_STAT(lro_flushed) },
++ { "lro_no_desc", IGB_STAT(lro_no_desc) },
++#endif
++};
++
++#define IGB_QUEUE_STATS_LEN \
++ ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues + \
++ ((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
++ (sizeof(struct igb_queue_stats) / sizeof(u64)))
++#define IGB_GLOBAL_STATS_LEN \
++ (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
++#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
++#endif /* ETHTOOL_GSTATS */
++#ifdef ETHTOOL_TEST
++static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
++ "Register test (offline)", "Eeprom test (offline)",
++ "Interrupt test (offline)", "Loopback test (offline)",
++ "Link test (on/offline)"
++};
++#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
++#endif /* ETHTOOL_TEST */
++
++static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ u32 status;
++
++ if (hw->phy.media_type == e1000_media_type_copper) {
++
++ ecmd->supported = (SUPPORTED_10baseT_Half |
++ SUPPORTED_10baseT_Full |
++ SUPPORTED_100baseT_Half |
++ SUPPORTED_100baseT_Full |
++ SUPPORTED_1000baseT_Full|
++ SUPPORTED_Autoneg |
++ SUPPORTED_TP);
++ ecmd->advertising = ADVERTISED_TP;
++
++ if (hw->mac.autoneg == 1) {
++ ecmd->advertising |= ADVERTISED_Autoneg;
++ /* the e1000 autoneg seems to match ethtool nicely */
++ ecmd->advertising |= hw->phy.autoneg_advertised;
++ }
++
++ ecmd->port = PORT_TP;
++ ecmd->phy_address = hw->phy.addr;
++ } else {
++ ecmd->supported = (SUPPORTED_1000baseT_Full |
++ SUPPORTED_FIBRE |
++ SUPPORTED_Autoneg);
++
++ ecmd->advertising = (ADVERTISED_1000baseT_Full |
++ ADVERTISED_FIBRE |
++ ADVERTISED_Autoneg);
++
++ ecmd->port = PORT_FIBRE;
++ }
++
++ ecmd->transceiver = XCVR_INTERNAL;
++
++ status = E1000_READ_REG(hw, E1000_STATUS);
++
++ if (status & E1000_STATUS_LU) {
++
++ if ((status & E1000_STATUS_SPEED_1000) ||
++ hw->phy.media_type != e1000_media_type_copper)
++ ecmd->speed = SPEED_1000;
++ else if (status & E1000_STATUS_SPEED_100)
++ ecmd->speed = SPEED_100;
++ else
++ ecmd->speed = SPEED_10;
++
++ if ((status & E1000_STATUS_FD) ||
++ hw->phy.media_type != e1000_media_type_copper)
++ ecmd->duplex = DUPLEX_FULL;
++ else
++ ecmd->duplex = DUPLEX_HALF;
++ } else {
++ ecmd->speed = -1;
++ ecmd->duplex = -1;
++ }
++
++ ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
++ return 0;
++}
++
++static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++
++ /* When SoL/IDER sessions are active, autoneg/speed/duplex
++ * cannot be changed */
++ if (e1000_check_reset_block(hw)) {
++ DPRINTK(DRV, ERR, "Cannot change link characteristics "
++ "when SoL/IDER is active.\n");
++ return -EINVAL;
++ }
++
++ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
++ msleep(1);
++
++ if (ecmd->autoneg == AUTONEG_ENABLE) {
++ hw->mac.autoneg = 1;
++ hw->phy.autoneg_advertised = ecmd->advertising |
++ ADVERTISED_TP |
++ ADVERTISED_Autoneg;
++ ecmd->advertising = hw->phy.autoneg_advertised;
++ if (adapter->fc_autoneg)
++ hw->fc.requested_mode = e1000_fc_default;
++ } else {
++ if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
++ clear_bit(__IGB_RESETTING, &adapter->state);
++ return -EINVAL;
++ }
++ }
++
++ /* reset the link */
++ if (netif_running(adapter->netdev)) {
++ igb_down(adapter);
++ igb_up(adapter);
++ } else
++ igb_reset(adapter);
++
++ clear_bit(__IGB_RESETTING, &adapter->state);
++ return 0;
++}
++
++static void igb_get_pauseparam(struct net_device *netdev,
++ struct ethtool_pauseparam *pause)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++
++ pause->autoneg =
++ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
++
++ if (hw->fc.current_mode == e1000_fc_rx_pause)
++ pause->rx_pause = 1;
++ else if (hw->fc.current_mode == e1000_fc_tx_pause)
++ pause->tx_pause = 1;
++ else if (hw->fc.current_mode == e1000_fc_full) {
++ pause->rx_pause = 1;
++ pause->tx_pause = 1;
++ }
++}
++
++static int igb_set_pauseparam(struct net_device *netdev,
++ struct ethtool_pauseparam *pause)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ int retval = 0;
++
++ adapter->fc_autoneg = pause->autoneg;
++
++ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
++ msleep(1);
++
++ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
++ hw->fc.requested_mode = e1000_fc_default;
++ if (netif_running(adapter->netdev)) {
++ igb_down(adapter);
++ igb_up(adapter);
++ } else {
++ igb_reset(adapter);
++ }
++ } else {
++ if (pause->rx_pause && pause->tx_pause)
++ hw->fc.requested_mode = e1000_fc_full;
++ else if (pause->rx_pause && !pause->tx_pause)
++ hw->fc.requested_mode = e1000_fc_rx_pause;
++ else if (!pause->rx_pause && pause->tx_pause)
++ hw->fc.requested_mode = e1000_fc_tx_pause;
++ else if (!pause->rx_pause && !pause->tx_pause)
++ hw->fc.requested_mode = e1000_fc_none;
++
++ hw->fc.current_mode = hw->fc.requested_mode;
++
++ retval = ((hw->phy.media_type == e1000_media_type_copper) ?
++ e1000_force_mac_fc(hw) : hw->mac.ops.setup_link(hw));
++ }
++
++ clear_bit(__IGB_RESETTING, &adapter->state);
++ return retval;
++}
++
++static u32 igb_get_rx_csum(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ return adapter->rx_ring[0].rx_csum;
++}
++
++static int igb_set_rx_csum(struct net_device *netdev, u32 data)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ int i;
++
++ for (i = 0; i < adapter->num_rx_queues; i++)
++ adapter->rx_ring[i].rx_csum = !!data;
++
++ return 0;
++}
++
++static u32 igb_get_tx_csum(struct net_device *netdev)
++{
++ return (netdev->features & NETIF_F_IP_CSUM) != 0;
++}
++
++static int igb_set_tx_csum(struct net_device *netdev, u32 data)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++
++ if (data) {
++#ifdef NETIF_F_IPV6_CSUM
++ netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
++ if (adapter->hw.mac.type >= e1000_82576)
++ netdev->features |= NETIF_F_SCTP_CSUM;
++ } else {
++ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
++ NETIF_F_SCTP_CSUM);
++#else
++ netdev->features |= NETIF_F_IP_CSUM;
++ if (adapter->hw.mac.type == e1000_82576)
++ netdev->features |= NETIF_F_SCTP_CSUM;
++ } else {
++ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CSUM);
++#endif
++ }
++
++ return 0;
++}
++
++#ifdef NETIF_F_TSO
++static int igb_set_tso(struct net_device *netdev, u32 data)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ int i;
++ struct net_device *v_netdev;
++
++ if (data) {
++ netdev->features |= NETIF_F_TSO;
++#ifdef NETIF_F_TSO6
++ netdev->features |= NETIF_F_TSO6;
++#endif
++ } else {
++ netdev->features &= ~NETIF_F_TSO;
++#ifdef NETIF_F_TSO6
++ netdev->features &= ~NETIF_F_TSO6;
++#endif
++ /* disable TSO on all VLANs if they're present */
++ if (!adapter->vlgrp)
++ goto tso_out;
++ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
++ v_netdev = vlan_group_get_device(adapter->vlgrp, i);
++ if (!v_netdev)
++ continue;
++
++ v_netdev->features &= ~NETIF_F_TSO;
++#ifdef NETIF_F_TSO6
++ v_netdev->features &= ~NETIF_F_TSO6;
++#endif
++ vlan_group_set_device(adapter->vlgrp, i, v_netdev);
++ }
++ }
++
++tso_out:
++ DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
++ return 0;
++}
++#endif /* NETIF_F_TSO */
++
++static u32 igb_get_msglevel(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ return adapter->msg_enable;
++}
++
++static void igb_set_msglevel(struct net_device *netdev, u32 data)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ adapter->msg_enable = data;
++}
++
++static int igb_get_regs_len(struct net_device *netdev)
++{
++#define IGB_REGS_LEN 551
++ return IGB_REGS_LEN * sizeof(u32);
++}
++
++static void igb_get_regs(struct net_device *netdev,
++ struct ethtool_regs *regs, void *p)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ u32 *regs_buff = p;
++ u8 i;
++
++ memset(p, 0, IGB_REGS_LEN * sizeof(u32));
++
++ regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
++
++ /* General Registers */
++ regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
++ regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
++ regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
++ regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
++ regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
++ regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
++ regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
++ regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
++ regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
++ regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
++ regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
++
++ /* NVM Register */
++ regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
++
++ /* Interrupt */
++ /* Reading EICS for EICR because they read the
++ * same but EICS does not clear on read */
++ regs_buff[13] = E1000_READ_REG(hw, E1000_EICS);
++ regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
++ regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
++ regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
++ regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
++ regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
++ /* Reading ICS for ICR because they read the
++ * same but ICS does not clear on read */
++ regs_buff[19] = E1000_READ_REG(hw, E1000_ICS);
++ regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
++ regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
++ regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
++ regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
++ regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
++ regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
++
++ /* Flow Control */
++ regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
++ regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
++ regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
++ regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
++ regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
++ regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
++
++ /* Receive */
++ regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
++ regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
++ regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
++ regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
++ regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
++ regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL);
++
++ /* Transmit */
++ regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
++ regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
++ regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
++ regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
++
++ /* Wake Up */
++ regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
++ regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
++ regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
++ regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
++ regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
++
++ /* MAC */
++ regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
++ regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
++ regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
++ regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
++ regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
++ regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
++ regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
++
++ /* Statistics */
++ regs_buff[54] = adapter->stats.crcerrs;
++ regs_buff[55] = adapter->stats.algnerrc;
++ regs_buff[56] = adapter->stats.symerrs;
++ regs_buff[57] = adapter->stats.rxerrc;
++ regs_buff[58] = adapter->stats.mpc;
++ regs_buff[59] = adapter->stats.scc;
++ regs_buff[60] = adapter->stats.ecol;
++ regs_buff[61] = adapter->stats.mcc;
++ regs_buff[62] = adapter->stats.latecol;
++ regs_buff[63] = adapter->stats.colc;
++ regs_buff[64] = adapter->stats.dc;
++ regs_buff[65] = adapter->stats.tncrs;
++ regs_buff[66] = adapter->stats.sec;
++ regs_buff[67] = adapter->stats.htdpmc;
++ regs_buff[68] = adapter->stats.rlec;
++ regs_buff[69] = adapter->stats.xonrxc;
++ regs_buff[70] = adapter->stats.xontxc;
++ regs_buff[71] = adapter->stats.xoffrxc;
++ regs_buff[72] = adapter->stats.xofftxc;
++ regs_buff[73] = adapter->stats.fcruc;
++ regs_buff[74] = adapter->stats.prc64;
++ regs_buff[75] = adapter->stats.prc127;
++ regs_buff[76] = adapter->stats.prc255;
++ regs_buff[77] = adapter->stats.prc511;
++ regs_buff[78] = adapter->stats.prc1023;
++ regs_buff[79] = adapter->stats.prc1522;
++ regs_buff[80] = adapter->stats.gprc;
++ regs_buff[81] = adapter->stats.bprc;
++ regs_buff[82] = adapter->stats.mprc;
++ regs_buff[83] = adapter->stats.gptc;
++ regs_buff[84] = adapter->stats.gorc;
++ regs_buff[86] = adapter->stats.gotc;
++ regs_buff[88] = adapter->stats.rnbc;
++ regs_buff[89] = adapter->stats.ruc;
++ regs_buff[90] = adapter->stats.rfc;
++ regs_buff[91] = adapter->stats.roc;
++ regs_buff[92] = adapter->stats.rjc;
++ regs_buff[93] = adapter->stats.mgprc;
++ regs_buff[94] = adapter->stats.mgpdc;
++ regs_buff[95] = adapter->stats.mgptc;
++ regs_buff[96] = adapter->stats.tor;
++ regs_buff[98] = adapter->stats.tot;
++ regs_buff[100] = adapter->stats.tpr;
++ regs_buff[101] = adapter->stats.tpt;
++ regs_buff[102] = adapter->stats.ptc64;
++ regs_buff[103] = adapter->stats.ptc127;
++ regs_buff[104] = adapter->stats.ptc255;
++ regs_buff[105] = adapter->stats.ptc511;
++ regs_buff[106] = adapter->stats.ptc1023;
++ regs_buff[107] = adapter->stats.ptc1522;
++ regs_buff[108] = adapter->stats.mptc;
++ regs_buff[109] = adapter->stats.bptc;
++ regs_buff[110] = adapter->stats.tsctc;
++ regs_buff[111] = adapter->stats.iac;
++ regs_buff[112] = adapter->stats.rpthc;
++ regs_buff[113] = adapter->stats.hgptc;
++ regs_buff[114] = adapter->stats.hgorc;
++ regs_buff[116] = adapter->stats.hgotc;
++ regs_buff[118] = adapter->stats.lenerrs;
++ regs_buff[119] = adapter->stats.scvpc;
++ regs_buff[120] = adapter->stats.hrmpc;
++
++ for (i = 0; i < 4; i++)
++ regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
++
++ for (i = 0; i < 10; i++)
++ regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
++ for (i = 0; i < 8; i++)
++ regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
++ for (i = 0; i < 8; i++)
++ regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
++ for (i = 0; i < 16; i++)
++ regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
++ for (i = 0; i < 16; i++)
++ regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
++
++ for (i = 0; i < 4; i++)
++ regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
++
++ for (i = 0; i < 4; i++)
++ regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
++ for (i = 0; i < 32; i++)
++ regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
++ for (i = 0; i < 128; i++)
++ regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
++ for (i = 0; i < 128; i++)
++ regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
++ for (i = 0; i < 4; i++)
++ regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
++
++ regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
++ regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
++ regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
++ regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
++
++}
++
++static int igb_get_eeprom_len(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ return adapter->hw.nvm.word_size * 2;
++}
++
++static int igb_get_eeprom(struct net_device *netdev,
++ struct ethtool_eeprom *eeprom, u8 *bytes)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ u16 *eeprom_buff;
++ int first_word, last_word;
++ int ret_val = 0;
++ u16 i;
++
++ if (eeprom->len == 0)
++ return -EINVAL;
++
++ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
++
++ first_word = eeprom->offset >> 1;
++ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
++
++ eeprom_buff = kmalloc(sizeof(u16) *
++ (last_word - first_word + 1), GFP_KERNEL);
++ if (!eeprom_buff)
++ return -ENOMEM;
++
++ if (hw->nvm.type == e1000_nvm_eeprom_spi)
++ ret_val = e1000_read_nvm(hw, first_word,
++ last_word - first_word + 1,
++ eeprom_buff);
++ else {
++ for (i = 0; i < last_word - first_word + 1; i++) {
++ ret_val = e1000_read_nvm(hw, first_word + i, 1,
++ &eeprom_buff[i]);
++ if (ret_val)
++ break;
++ }
++ }
++
++ /* Device's eeprom is always little-endian, word addressable */
++ for (i = 0; i < last_word - first_word + 1; i++)
++ le16_to_cpus(&eeprom_buff[i]);
++
++ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
++ eeprom->len);
++ kfree(eeprom_buff);
++
++ return ret_val;
++}
++
++static int igb_set_eeprom(struct net_device *netdev,
++ struct ethtool_eeprom *eeprom, u8 *bytes)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ u16 *eeprom_buff;
++ void *ptr;
++ int max_len, first_word, last_word, ret_val = 0;
++ u16 i;
++
++ if (eeprom->len == 0)
++ return -EOPNOTSUPP;
++
++ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
++ return -EFAULT;
++
++ max_len = hw->nvm.word_size * 2;
++
++ first_word = eeprom->offset >> 1;
++ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
++ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
++ if (!eeprom_buff)
++ return -ENOMEM;
++
++ ptr = (void *)eeprom_buff;
++
++ if (eeprom->offset & 1) {
++ /* need read/modify/write of first changed EEPROM word */
++ /* only the second byte of the word is being modified */
++ ret_val = e1000_read_nvm(hw, first_word, 1,
++ &eeprom_buff[0]);
++ ptr++;
++ }
++ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
++ /* need read/modify/write of last changed EEPROM word */
++ /* only the first byte of the word is being modified */
++ ret_val = e1000_read_nvm(hw, last_word, 1,
++ &eeprom_buff[last_word - first_word]);
++ }
++
++ /* Device's eeprom is always little-endian, word addressable */
++ for (i = 0; i < last_word - first_word + 1; i++)
++ le16_to_cpus(&eeprom_buff[i]);
++
++ memcpy(ptr, bytes, eeprom->len);
++
++ for (i = 0; i < last_word - first_word + 1; i++)
++ cpu_to_le16s(&eeprom_buff[i]);
++
++ ret_val = e1000_write_nvm(hw, first_word,
++ last_word - first_word + 1, eeprom_buff);
++
++ /* Update the checksum over the first part of the EEPROM if needed
++ * and flush shadow RAM for 82573 controllers */
++ if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
++ e1000_update_nvm_checksum(hw);
++
++ kfree(eeprom_buff);
++ return ret_val;
++}
++
++static void igb_get_drvinfo(struct net_device *netdev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ u16 eeprom_data;
++
++ strncpy(drvinfo->driver, igb_driver_name, 32);
++ strncpy(drvinfo->version, igb_driver_version, 32);
++
++ /* EEPROM image version # is reported as firmware version # for
++ * 82575 controllers */
++ e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
++ snprintf(drvinfo->fw_version, 32, "%d.%d-%d",
++ (eeprom_data & 0xF000) >> 12,
++ (eeprom_data & 0x0FF0) >> 4,
++ eeprom_data & 0x000F);
++
++ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
++ drvinfo->n_stats = IGB_STATS_LEN;
++ drvinfo->testinfo_len = IGB_TEST_LEN;
++ drvinfo->regdump_len = igb_get_regs_len(netdev);
++ drvinfo->eedump_len = igb_get_eeprom_len(netdev);
++}
++
++static void igb_get_ringparam(struct net_device *netdev,
++ struct ethtool_ringparam *ring)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++
++ ring->rx_max_pending = IGB_MAX_RXD;
++ ring->tx_max_pending = IGB_MAX_TXD;
++ ring->rx_mini_max_pending = 0;
++ ring->rx_jumbo_max_pending = 0;
++ ring->rx_pending = adapter->rx_ring_count;
++ ring->tx_pending = adapter->tx_ring_count;
++ ring->rx_mini_pending = 0;
++ ring->rx_jumbo_pending = 0;
++}
++
++static int igb_set_ringparam(struct net_device *netdev,
++ struct ethtool_ringparam *ring)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct igb_ring *temp_ring;
++ int i, err;
++ u16 new_rx_count, new_tx_count;
++
++ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
++ return -EINVAL;
++
++ new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD);
++ new_rx_count = max(new_rx_count, (u16)IGB_MIN_RXD);
++ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
++
++ new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD);
++ new_tx_count = max(new_tx_count, (u16)IGB_MIN_TXD);
++ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
++
++ if ((new_tx_count == adapter->tx_ring_count) &&
++ (new_rx_count == adapter->rx_ring_count)) {
++ /* nothing to do */
++ return 0;
++ }
++
++ if (adapter->num_tx_queues > adapter->num_rx_queues)
++ temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
++ else
++ temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
++ if (!temp_ring)
++ return -ENOMEM;
++
++ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
++ msleep(1);
++
++ if (netif_running(adapter->netdev))
++ igb_down(adapter);
++
++ /*
++ * We can't just free everything and then setup again,
++ * because the ISRs in MSI-X mode get passed pointers
++ * to the tx and rx ring structs.
++ */
++ if (new_tx_count != adapter->tx_ring_count) {
++ memcpy(temp_ring, adapter->tx_ring,
++ adapter->num_tx_queues * sizeof(struct igb_ring));
++
++ for (i = 0; i < adapter->num_tx_queues; i++) {
++ temp_ring[i].count = new_tx_count;
++ err = igb_setup_tx_resources(&temp_ring[i]);
++ if (err) {
++ while (i) {
++ i--;
++ igb_free_tx_resources(&temp_ring[i]);
++ }
++ goto err_setup;
++ }
++ }
++
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ igb_free_tx_resources(&adapter->tx_ring[i]);
++
++ memcpy(adapter->tx_ring, temp_ring,
++ adapter->num_tx_queues * sizeof(struct igb_ring));
++
++ adapter->tx_ring_count = new_tx_count;
++ }
++
++ if (new_rx_count != adapter->rx_ring->count) {
++ memcpy(temp_ring, adapter->rx_ring,
++ adapter->num_rx_queues * sizeof(struct igb_ring));
++
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ temp_ring[i].count = new_rx_count;
++ err = igb_setup_rx_resources(&temp_ring[i]);
++ if (err) {
++ while (i) {
++ i--;
++ igb_free_rx_resources(&temp_ring[i]);
++ }
++ goto err_setup;
++ }
++
++ }
++
++ for (i = 0; i < adapter->num_rx_queues; i++)
++ igb_free_rx_resources(&adapter->rx_ring[i]);
++
++ memcpy(adapter->rx_ring, temp_ring,
++ adapter->num_rx_queues * sizeof(struct igb_ring));
++
++ adapter->rx_ring_count = new_rx_count;
++ }
++
++ err = 0;
++err_setup:
++ if (netif_running(adapter->netdev))
++ igb_up(adapter);
++
++ clear_bit(__IGB_RESETTING, &adapter->state);
++ vfree(temp_ring);
++ return err;
++}
++
++static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
++ int reg, u32 mask, u32 write)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 pat, val;
++ static const u32 _test[] =
++ {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
++ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
++ E1000_WRITE_REG(hw, reg, (_test[pat] & write));
++ val = E1000_READ_REG(hw, reg);
++ if (val != (_test[pat] & write & mask)) {
++ DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "
++ "0x%08X expected 0x%08X\n",
++ E1000_REGISTER(hw, reg), val,
++ (_test[pat] & write & mask));
++ *data = E1000_REGISTER(hw, reg);
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
++ int reg, u32 mask, u32 write)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 val;
++ E1000_WRITE_REG(hw, reg, write & mask);
++ val = E1000_READ_REG(hw, reg);
++ if ((write & mask) != (val & mask)) {
++ DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "
++ "expected 0x%08X\n", reg, (val & mask), (write & mask));
++ *data = E1000_REGISTER(hw, reg);
++ return 1;
++ }
++
++ return 0;
++}
++
++#define REG_PATTERN_TEST(reg, mask, write) \
++ do { \
++ if (reg_pattern_test(adapter, data, reg, mask, write)) \
++ return 1; \
++ } while (0)
++
++#define REG_SET_AND_CHECK(reg, mask, write) \
++ do { \
++ if (reg_set_and_check(adapter, data, reg, mask, write)) \
++ return 1; \
++ } while (0)
++
++static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ struct igb_reg_test *test;
++ u32 value, before, after;
++ u32 i, toggle;
++
++ switch (adapter->hw.mac.type) {
++ case e1000_82576:
++ test = reg_test_82576;
++ toggle = 0x7FFFF3FF;
++ break;
++ default:
++ test = reg_test_82575;
++ toggle = 0x7FFFF3FF;
++ break;
++ }
++
++ /* Because the status register is such a special case,
++ * we handle it separately from the rest of the register
++ * tests. Some bits are read-only, some toggle, and some
++ * are writable on newer MACs.
++ */
++ before = E1000_READ_REG(hw, E1000_STATUS);
++ value = (E1000_READ_REG(hw, E1000_STATUS) & toggle);
++ E1000_WRITE_REG(hw, E1000_STATUS, toggle);
++ after = E1000_READ_REG(hw, E1000_STATUS) & toggle;
++ if (value != after) {
++ DPRINTK(DRV, ERR, "failed STATUS register test got: "
++ "0x%08X expected: 0x%08X\n", after, value);
++ *data = 1;
++ return 1;
++ }
++ /* restore previous status */
++ E1000_WRITE_REG(hw, E1000_STATUS, before);
++
++ /* Perform the remainder of the register test, looping through
++ * the test table until we either fail or reach the null entry.
++ */
++ while (test->reg) {
++ for (i = 0; i < test->array_len; i++) {
++ switch (test->test_type) {
++ case PATTERN_TEST:
++ REG_PATTERN_TEST(test->reg +
++ (i * test->reg_offset),
++ test->mask,
++ test->write);
++ break;
++ case SET_READ_TEST:
++ REG_SET_AND_CHECK(test->reg +
++ (i * test->reg_offset),
++ test->mask,
++ test->write);
++ break;
++ case WRITE_NO_TEST:
++ writel(test->write,
++ (adapter->hw.hw_addr + test->reg)
++ + (i * test->reg_offset));
++ break;
++ case TABLE32_TEST:
++ REG_PATTERN_TEST(test->reg + (i * 4),
++ test->mask,
++ test->write);
++ break;
++ case TABLE64_TEST_LO:
++ REG_PATTERN_TEST(test->reg + (i * 8),
++ test->mask,
++ test->write);
++ break;
++ case TABLE64_TEST_HI:
++ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
++ test->mask,
++ test->write);
++ break;
++ }
++ }
++ test++;
++ }
++
++ *data = 0;
++ return 0;
++}
++
++static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
++{
++ u16 temp;
++ u16 checksum = 0;
++ u16 i;
++
++ *data = 0;
++ /* Read and add up the contents of the EEPROM */
++ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
++ if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
++ *data = 1;
++ break;
++ }
++ checksum += temp;
++ }
++
++ /* If Checksum is not Correct return error else test passed */
++ if ((checksum != (u16) NVM_SUM) && !(*data))
++ *data = 2;
++
++ return *data;
++}
++
++static irqreturn_t igb_test_intr(int irq, void *data)
++{
++ struct igb_adapter *adapter = (struct igb_adapter *) data;
++ struct e1000_hw *hw = &adapter->hw;
++
++ adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
++
++ return IRQ_HANDLED;
++}
++
++static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ struct net_device *netdev = adapter->netdev;
++ u32 mask, ics_mask, i = 0, shared_int = TRUE;
++ u32 irq = adapter->pdev->irq;
++
++ *data = 0;
++
++ /* Hook up test interrupt handler just for this test */
++ if (adapter->msix_entries) {
++ if (request_irq(adapter->msix_entries[0].vector,
++ &igb_test_intr, 0, netdev->name, adapter)) {
++ *data = 1;
++ return -1;
++ }
++ } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
++ shared_int = FALSE;
++ if (request_irq(irq, &igb_test_intr, 0, netdev->name, adapter)) {
++ *data = 1;
++ return -1;
++ }
++ } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
++ netdev->name, adapter)) {
++ shared_int = FALSE;
++ } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
++ netdev->name, adapter)) {
++ *data = 1;
++ return -1;
++ }
++ DPRINTK(HW, INFO, "testing %s interrupt\n",
++ (shared_int ? "shared" : "unshared"));
++
++ /* Disable all the interrupts */
++ E1000_WRITE_REG(hw, E1000_IMC, ~0);
++ msleep(10);
++
++ /* Define all writable bits for ICS */
++ switch (hw->mac.type) {
++ case e1000_82575:
++ ics_mask = 0x37F47EDD;
++ break;
++ case e1000_82576:
++ ics_mask = 0x77D4FBFD;
++ break;
++ default:
++ ics_mask = 0x7FFFFFFF;
++ break;
++ }
++
++ /* Test each interrupt */
++ for (; i < 31; i++) {
++ /* Interrupt to test */
++ mask = 1 << i;
++
++ if (!(mask & ics_mask))
++ continue;
++
++ if (!shared_int) {
++ /* Disable the interrupt to be reported in
++ * the cause register and then force the same
++ * interrupt and see if one gets posted. If
++ * an interrupt was posted to the bus, the
++ * test failed.
++ */
++ adapter->test_icr = 0;
++
++ /* Flush any pending interrupts */
++ E1000_WRITE_REG(hw, E1000_ICR, ~0);
++
++ E1000_WRITE_REG(hw, E1000_IMC, mask);
++ E1000_WRITE_REG(hw, E1000_ICS, mask);
++ msleep(10);
++
++ if (adapter->test_icr & mask) {
++ *data = 3;
++ break;
++ }
++ }
++
++ /* Enable the interrupt to be reported in
++ * the cause register and then force the same
++ * interrupt and see if one gets posted. If
++ * an interrupt was not posted to the bus, the
++ * test failed.
++ */
++ adapter->test_icr = 0;
++
++ /* Flush any pending interrupts */
++ E1000_WRITE_REG(hw, E1000_ICR, ~0);
++
++ E1000_WRITE_REG(hw, E1000_IMS, mask);
++ E1000_WRITE_REG(hw, E1000_ICS, mask);
++ msleep(10);
++
++ if (!(adapter->test_icr & mask)) {
++ *data = 4;
++ break;
++ }
++
++ if (!shared_int) {
++ /* Disable the other interrupts to be reported in
++ * the cause register and then force the other
++ * interrupts and see if any get posted. If
++ * an interrupt was posted to the bus, the
++ * test failed.
++ */
++ adapter->test_icr = 0;
++
++ /* Flush any pending interrupts */
++ E1000_WRITE_REG(hw, E1000_ICR, ~0);
++
++ E1000_WRITE_REG(hw, E1000_IMC, ~mask);
++ E1000_WRITE_REG(hw, E1000_ICS, ~mask);
++ msleep(10);
++
++ if (adapter->test_icr & mask) {
++ *data = 5;
++ break;
++ }
++ }
++ }
++
++ /* Disable all the interrupts */
++ E1000_WRITE_REG(hw, E1000_IMC, ~0);
++ msleep(10);
++
++ /* Unhook test interrupt handler */
++ if (adapter->msix_entries)
++ free_irq(adapter->msix_entries[0].vector, adapter);
++ else
++ free_irq(irq, adapter);
++
++ return *data;
++}
++
++static void igb_free_desc_rings(struct igb_adapter *adapter)
++{
++ igb_free_tx_resources(&adapter->test_tx_ring);
++ igb_free_rx_resources(&adapter->test_rx_ring);
++}
++
++static int igb_setup_desc_rings(struct igb_adapter *adapter)
++{
++ struct igb_ring *tx_ring = &adapter->test_tx_ring;
++ struct igb_ring *rx_ring = &adapter->test_rx_ring;
++ int i, ret_val;
++
++ /* Setup Tx descriptor ring and Tx buffers */
++ tx_ring->count = IGB_DEFAULT_TXD;
++ tx_ring->pdev = adapter->pdev;
++ tx_ring->reg_idx = adapter->vfs_allocated_count;
++
++ if (igb_setup_tx_resources(tx_ring)) {
++ ret_val = 1;
++ goto err_nomem;
++ }
++
++ igb_setup_tctl(adapter);
++ igb_configure_tx_ring(adapter, tx_ring);
++
++ for (i = 0; i < tx_ring->count; i++) {
++ union e1000_adv_tx_desc *tx_desc;
++ unsigned int size = 1024;
++ struct sk_buff *skb = alloc_skb(size, GFP_KERNEL);
++
++ if (!skb) {
++ ret_val = 2;
++ goto err_nomem;
++ }
++ skb_put(skb, size);
++ tx_ring->buffer_info[i].skb = skb;
++ tx_ring->buffer_info[i].length = skb->len;
++ tx_ring->buffer_info[i].dma =
++ pci_map_single(tx_ring->pdev, skb->data, skb->len,
++ PCI_DMA_TODEVICE);
++ tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
++ tx_desc->read.buffer_addr =
++ cpu_to_le64(tx_ring->buffer_info[i].dma);
++ tx_desc->read.olinfo_status =
++ cpu_to_le32(skb->len << E1000_ADVTXD_PAYLEN_SHIFT);
++ tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
++ tx_desc->read.cmd_type_len |=
++ cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
++ E1000_ADVTXD_DCMD_DEXT);
++ tx_desc->read.cmd_type_len |=
++ cpu_to_le32(IGB_ADVTXD_DCMD |
++ E1000_ADVTXD_DTYP_DATA |
++ E1000_ADVTXD_DCMD_IFCS |
++ E1000_ADVTXD_DCMD_DEXT);
++ }
++
++ /* Setup Rx descriptor ring and Rx buffers */
++ rx_ring->count = IGB_DEFAULT_RXD;
++ rx_ring->pdev = adapter->pdev;
++ rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
++ rx_ring->reg_idx = adapter->vfs_allocated_count;
++
++ if (igb_setup_rx_resources(rx_ring)) {
++ ret_val = 3;
++ goto err_nomem;
++ }
++
++ /* set the default queue to queue 0 of PF */
++ E1000_WRITE_REG(&adapter->hw, E1000_MRQC,
++ adapter->vfs_allocated_count << 3);
++
++ /* enable receive ring */
++ igb_setup_rctl(adapter);
++ igb_configure_rx_ring(adapter, rx_ring);
++
++ if (igb_alloc_rx_buffers_adv(rx_ring, rx_ring->count)) {
++ ret_val = 4;
++ goto err_nomem;
++ }
++
++
++ return 0;
++
++err_nomem:
++ igb_free_desc_rings(adapter);
++ return ret_val;
++}
++
++static void igb_phy_disable_receiver(struct igb_adapter *adapter)
++{
++ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
++ e1000_write_phy_reg(&adapter->hw, 29, 0x001F);
++ e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC);
++ e1000_write_phy_reg(&adapter->hw, 29, 0x001A);
++ e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0);
++}
++
++static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 ctrl_reg = 0;
++
++ hw->mac.autoneg = FALSE;
++
++ if (hw->phy.type == e1000_phy_m88) {
++ /* Auto-MDI/MDIX Off */
++ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
++ /* reset to update Auto-MDI/MDIX */
++ e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
++ /* autoneg off */
++ e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
++ }
++
++ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
++
++ /* force 1000, set loopback */
++ e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
++
++ /* Now set up the MAC to the same speed/duplex as the PHY. */
++ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
++ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
++ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
++ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
++ E1000_CTRL_FD | /* Force Duplex to FULL */
++ E1000_CTRL_SLU); /* Set link up enable bit */
++
++ if (hw->phy.type == e1000_phy_m88)
++ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
++
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
++
++ /* Disable the receiver on the PHY so when a cable is plugged in, the
++ * PHY does not begin to autoneg when a cable is reconnected to the NIC.
++ */
++ if (hw->phy.type == e1000_phy_m88)
++ igb_phy_disable_receiver(adapter);
++
++ udelay(500);
++
++ return 0;
++}
++
++static int igb_set_phy_loopback(struct igb_adapter *adapter)
++{
++ return igb_integrated_phy_loopback(adapter);
++}
++
++static int igb_setup_loopback_test(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 reg;
++
++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
++
++ /* use CTRL_EXT to identify link type as SGMII can appear as copper */
++ if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
++ reg = E1000_READ_REG(hw, E1000_RCTL);
++ reg |= E1000_RCTL_LBM_TCVR;
++ E1000_WRITE_REG(hw, E1000_RCTL, reg);
++
++ E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
++
++ reg = E1000_READ_REG(hw, E1000_CTRL);
++ reg &= ~(E1000_CTRL_RFCE |
++ E1000_CTRL_TFCE |
++ E1000_CTRL_LRST);
++ reg |= E1000_CTRL_SLU |
++ E1000_CTRL_FD;
++ E1000_WRITE_REG(hw, E1000_CTRL, reg);
++
++ /* Unset switch control to serdes energy detect */
++ reg = E1000_READ_REG(hw, E1000_CONNSW);
++ reg &= ~E1000_CONNSW_ENRGSRC;
++ E1000_WRITE_REG(hw, E1000_CONNSW, reg);
++
++ /* Set PCS register for forced speed */
++ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
++ reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
++ reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
++ E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
++ E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
++ E1000_PCS_LCTL_FSD | /* Force Speed */
++ E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
++ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
++
++ return 0;
++ }
++
++ return igb_set_phy_loopback(adapter);
++}
++
++static void igb_loopback_cleanup(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 rctl;
++ u16 phy_reg;
++
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
++ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
++
++ hw->mac.autoneg = TRUE;
++ e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
++ if (phy_reg & MII_CR_LOOPBACK) {
++ phy_reg &= ~MII_CR_LOOPBACK;
++ e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
++ e1000_phy_commit(hw);
++ }
++}
++
++static void igb_create_lbtest_frame(struct sk_buff *skb,
++ unsigned int frame_size)
++{
++ memset(skb->data, 0xFF, frame_size);
++ frame_size &= ~1;
++ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
++ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
++ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
++}
++
++static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
++{
++ frame_size &= ~1;
++ if (*(skb->data + 3) == 0xFF) {
++ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
++ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
++ return 0;
++ }
++ }
++ return 13;
++}
++
++static int igb_run_loopback_test(struct igb_adapter *adapter)
++{
++ struct igb_ring *tx_ring = &adapter->test_tx_ring;
++ struct igb_ring *rx_ring = &adapter->test_rx_ring;
++ int i, j, k, l, lc, good_cnt, ret_val = 0;
++ unsigned long time;
++
++ writel(rx_ring->count - 1, rx_ring->tail);
++
++ /* Calculate the loop count based on the largest descriptor ring
++ * The idea is to wrap the largest ring a number of times using 64
++ * send/receive pairs during each loop
++ */
++
++ if (rx_ring->count <= tx_ring->count)
++ lc = ((tx_ring->count / 64) * 2) + 1;
++ else
++ lc = ((rx_ring->count / 64) * 2) + 1;
++
++ k = l = 0;
++ for (j = 0; j <= lc; j++) { /* loop count loop */
++ for (i = 0; i < 64; i++) { /* send the packets */
++ igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
++ 1024);
++ pci_dma_sync_single_for_device(tx_ring->pdev,
++ tx_ring->buffer_info[k].dma,
++ tx_ring->buffer_info[k].length,
++ PCI_DMA_TODEVICE);
++ if (unlikely(++k == tx_ring->count))
++ k = 0;
++ }
++ writel(k, tx_ring->tail);
++ msleep(200);
++
++ time = jiffies; /* set the start time for the receive */
++ good_cnt = 0;
++ do { /* receive the sent packets */
++ pci_dma_sync_single_for_cpu(rx_ring->pdev,
++ rx_ring->buffer_info[l].dma,
++ rx_ring->rx_buffer_len,
++ PCI_DMA_FROMDEVICE);
++
++ ret_val = igb_check_lbtest_frame(
++ rx_ring->buffer_info[l].skb, 1024);
++ if (!ret_val)
++ good_cnt++;
++ if (unlikely(++l == rx_ring->count))
++ l = 0;
++ /* time + 20 msecs (200 msecs on 2.4) is more than
++ * enough time to complete the receives, if it's
++ * exceeded, break and error off
++ */
++ } while (good_cnt < 64 && jiffies < (time + 20));
++ if (good_cnt != 64) {
++ ret_val = 13; /* ret_val is the same as mis-compare */
++ break;
++ }
++ if (jiffies >= (time + 20)) {
++ ret_val = 14; /* error code for time out error */
++ break;
++ }
++ } /* end loop count loop */
++ return ret_val;
++}
++
++static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
++{
++ /* PHY loopback cannot be performed if SoL/IDER
++ * sessions are active */
++ if (e1000_check_reset_block(&adapter->hw)) {
++ DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
++ "when SoL/IDER is active.\n");
++ *data = 0;
++ goto out;
++ }
++ *data = igb_setup_desc_rings(adapter);
++ if (*data)
++ goto out;
++ *data = igb_setup_loopback_test(adapter);
++ if (*data)
++ goto err_loopback;
++ *data = igb_run_loopback_test(adapter);
++ igb_loopback_cleanup(adapter);
++
++err_loopback:
++ igb_free_desc_rings(adapter);
++out:
++ return *data;
++}
++
++static int igb_link_test(struct igb_adapter *adapter, u64 *data)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ *data = 0;
++ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
++ int i = 0;
++ adapter->hw.mac.serdes_has_link = FALSE;
++
++ /* On some blade server designs, link establishment
++ * could take as long as 2-3 minutes */
++ do {
++ e1000_check_for_link(&adapter->hw);
++ if (adapter->hw.mac.serdes_has_link)
++ return *data;
++ msleep(20);
++ } while (i++ < 3750);
++
++ *data = 1;
++ } else {
++ e1000_check_for_link(&adapter->hw);
++ if (adapter->hw.mac.autoneg)
++ msleep(4000);
++
++ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
++ *data = 1;
++ }
++ return *data;
++}
++
++static void igb_diag_test(struct net_device *netdev,
++ struct ethtool_test *eth_test, u64 *data)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ u16 autoneg_advertised;
++ u8 forced_speed_duplex, autoneg;
++ bool if_running = netif_running(netdev);
++
++ set_bit(__IGB_TESTING, &adapter->state);
++ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
++ /* Offline tests */
++
++ /* save speed, duplex, autoneg settings */
++ autoneg_advertised = adapter->hw.phy.autoneg_advertised;
++ forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
++ autoneg = adapter->hw.mac.autoneg;
++
++ DPRINTK(HW, INFO, "offline testing starting\n");
++
++ /* Link test performed before hardware reset so autoneg doesn't
++ * interfere with test result */
++ if (igb_link_test(adapter, &data[4]))
++ eth_test->flags |= ETH_TEST_FL_FAILED;
++
++ if (if_running)
++ /* indicate we're in test mode */
++ dev_close(netdev);
++ else
++ igb_reset(adapter);
++
++ if (igb_reg_test(adapter, &data[0]))
++ eth_test->flags |= ETH_TEST_FL_FAILED;
++
++ igb_reset(adapter);
++ if (igb_eeprom_test(adapter, &data[1]))
++ eth_test->flags |= ETH_TEST_FL_FAILED;
++
++ igb_reset(adapter);
++ if (igb_intr_test(adapter, &data[2]))
++ eth_test->flags |= ETH_TEST_FL_FAILED;
++
++ igb_reset(adapter);
++ if (igb_loopback_test(adapter, &data[3]))
++ eth_test->flags |= ETH_TEST_FL_FAILED;
++
++ /* restore speed, duplex, autoneg settings */
++ adapter->hw.phy.autoneg_advertised = autoneg_advertised;
++ adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
++ adapter->hw.mac.autoneg = autoneg;
++
++ /* force this routine to wait until autoneg complete/timeout */
++ adapter->hw.phy.autoneg_wait_to_complete = TRUE;
++ igb_reset(adapter);
++ adapter->hw.phy.autoneg_wait_to_complete = FALSE;
++
++ clear_bit(__IGB_TESTING, &adapter->state);
++ if (if_running)
++ dev_open(netdev);
++ } else {
++ DPRINTK(HW, INFO, "online testing starting\n");
++ /* Online tests */
++ if (igb_link_test(adapter, &data[4]))
++ eth_test->flags |= ETH_TEST_FL_FAILED;
++
++ /* Online tests aren't run; pass by default */
++ data[0] = 0;
++ data[1] = 0;
++ data[2] = 0;
++ data[3] = 0;
++
++ clear_bit(__IGB_TESTING, &adapter->state);
++ }
++ msleep_interruptible(4 * 1000);
++}
++
++static int igb_wol_exclusion(struct igb_adapter *adapter,
++ struct ethtool_wolinfo *wol)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ int retval = 1; /* fail by default */
++
++ switch (hw->device_id) {
++ case E1000_DEV_ID_82575GB_QUAD_COPPER:
++ /* WoL not supported */
++ wol->supported = 0;
++ break;
++ case E1000_DEV_ID_82575EB_FIBER_SERDES:
++ case E1000_DEV_ID_82576_FIBER:
++ case E1000_DEV_ID_82576_SERDES:
++ /* Wake events not supported on port B */
++ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) {
++ wol->supported = 0;
++ break;
++ }
++ /* return success for non excluded adapter ports */
++ retval = 0;
++ break;
++ case E1000_DEV_ID_82576_QUAD_COPPER:
++ /* quad port adapters only support WoL on port A */
++ if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
++ wol->supported = 0;
++ break;
++ }
++ /* return success for non excluded adapter ports */
++ retval = 0;
++ break;
++ default:
++ /* dual port cards only support WoL on port A from now on
++ * unless it was enabled in the eeprom for port B
++ * so exclude FUNC_1 ports from having WoL enabled */
++ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1 &&
++ !adapter->eeprom_wol) {
++ wol->supported = 0;
++ break;
++ }
++
++ retval = 0;
++ }
++
++ return retval;
++}
++
++static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++
++ wol->supported = WAKE_UCAST | WAKE_MCAST |
++ WAKE_BCAST | WAKE_MAGIC;
++ wol->wolopts = 0;
++
++ /* this function will set ->supported = 0 and return 1 if wol is not
++ * supported by this hardware */
++ if (igb_wol_exclusion(adapter, wol) ||
++ !device_can_wakeup(&adapter->pdev->dev))
++ return;
++
++ /* apply any specific unsupported masks here */
++ switch (adapter->hw.device_id) {
++ default:
++ break;
++ }
++
++ if (adapter->wol & E1000_WUFC_EX)
++ wol->wolopts |= WAKE_UCAST;
++ if (adapter->wol & E1000_WUFC_MC)
++ wol->wolopts |= WAKE_MCAST;
++ if (adapter->wol & E1000_WUFC_BC)
++ wol->wolopts |= WAKE_BCAST;
++ if (adapter->wol & E1000_WUFC_MAG)
++ wol->wolopts |= WAKE_MAGIC;
++
++ return;
++}
++
++static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++
++ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
++ return -EOPNOTSUPP;
++
++ if (igb_wol_exclusion(adapter, wol))
++ return wol->wolopts ? -EOPNOTSUPP : 0;
++
++ /* these settings will always override what we currently have */
++ adapter->wol = 0;
++
++ if (wol->wolopts & WAKE_UCAST)
++ adapter->wol |= E1000_WUFC_EX;
++ if (wol->wolopts & WAKE_MCAST)
++ adapter->wol |= E1000_WUFC_MC;
++ if (wol->wolopts & WAKE_BCAST)
++ adapter->wol |= E1000_WUFC_BC;
++ if (wol->wolopts & WAKE_MAGIC)
++ adapter->wol |= E1000_WUFC_MAG;
++ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
++
++ return 0;
++}
++
++/* bit defines for adapter->led_status */
++#define IGB_LED_ON 0
++
++static int igb_phys_id(struct net_device *netdev, u32 data)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ unsigned long timeout;
++
++ timeout = data * 1000;
++
++ /*
++ * msleep_interruptable only accepts unsigned int so we are limited
++ * in how long a duration we can wait
++ */
++ if (!timeout || timeout > UINT_MAX)
++ timeout = UINT_MAX;
++
++ e1000_blink_led(hw);
++ msleep_interruptible(timeout);
++
++ e1000_led_off(hw);
++ clear_bit(IGB_LED_ON, &adapter->led_status);
++ e1000_cleanup_led(hw);
++
++ return 0;
++}
++
++static int igb_set_coalesce(struct net_device *netdev,
++ struct ethtool_coalesce *ec)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ int i;
++
++ if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
++ ((ec->rx_coalesce_usecs > 3) &&
++ (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
++ (ec->rx_coalesce_usecs == 2))
++ return -EINVAL;
++
++ /* convert to rate of irq's per second */
++ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
++ adapter->itr = IGB_START_ITR;
++ adapter->itr_setting = ec->rx_coalesce_usecs;
++ } else {
++ adapter->itr = ec->rx_coalesce_usecs << 2;
++ adapter->itr_setting = adapter->itr;
++ }
++
++ for (i = 0; i < adapter->num_q_vectors; i++) {
++ struct igb_q_vector *q_vector = adapter->q_vector[i];
++ q_vector->itr_val = adapter->itr;
++ q_vector->set_itr = 1;
++ }
++
++ return 0;
++}
++
++static int igb_get_coalesce(struct net_device *netdev,
++ struct ethtool_coalesce *ec)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++
++ if (adapter->itr_setting <= 3)
++ ec->rx_coalesce_usecs = adapter->itr_setting;
++ else
++ ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
++
++ return 0;
++}
++
++static int igb_nway_reset(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ if (netif_running(netdev))
++ igb_reinit_locked(adapter);
++ return 0;
++}
++
++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
++static int igb_get_sset_count(struct net_device *netdev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS:
++ return IGB_STATS_LEN;
++ case ETH_SS_TEST:
++ return IGB_TEST_LEN;
++ default:
++ return -ENOTSUPP;
++ }
++}
++#else
++static int igb_get_stats_count(struct net_device *netdev)
++{
++ return IGB_STATS_LEN;
++}
++
++static int igb_diag_test_count(struct net_device *netdev)
++{
++ return IGB_TEST_LEN;
++}
++#endif
++
++static void igb_get_ethtool_stats(struct net_device *netdev,
++ struct ethtool_stats *stats, u64 *data)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ u64 *queue_stat;
++ int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
++ int j;
++ int i;
++ u64 restart_queue = 0, hw_csum_err = 0, hw_csum_good = 0;
++#ifdef IGB_LRO
++ int aggregated = 0, flushed = 0, no_desc = 0;
++#endif
++
++ /* collect tx ring stats */
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ restart_queue += adapter->tx_ring[i].restart_queue;
++ adapter->restart_queue = restart_queue;
++
++
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ hw_csum_err += adapter->rx_ring[i].hw_csum_err;
++ hw_csum_good += adapter->rx_ring[i].hw_csum_good;
++#ifdef IGB_LRO
++ aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
++ flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
++ no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
++ }
++ adapter->lro_aggregated = aggregated;
++ adapter->lro_flushed = flushed;
++ adapter->lro_no_desc = no_desc;
++#else
++ }
++#endif
++ adapter->hw_csum_err = hw_csum_err;
++ adapter->hw_csum_good = hw_csum_good;
++
++ igb_update_stats(adapter);
++
++ for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
++ char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset;
++ data[i] = (igb_gstrings_stats[i].sizeof_stat ==
++ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
++ }
++ for (j = 0; j < adapter->num_tx_queues; j++) {
++ int k;
++ queue_stat = (u64 *)&adapter->tx_ring[j].stats;
++ for (k = 0; k < stat_count; k++)
++ data[i + k] = queue_stat[k];
++ i += k;
++ }
++ for (j = 0; j < adapter->num_rx_queues; j++) {
++ int k;
++ queue_stat = (u64 *)&adapter->rx_ring[j].stats;
++ for (k = 0; k < stat_count; k++)
++ data[i + k] = queue_stat[k];
++ i += k;
++ }
++}
++
++static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ u8 *p = data;
++ int i;
++
++ switch (stringset) {
++ case ETH_SS_TEST:
++ memcpy(data, *igb_gstrings_test,
++ IGB_TEST_LEN*ETH_GSTRING_LEN);
++ break;
++ case ETH_SS_STATS:
++ for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
++ memcpy(p, igb_gstrings_stats[i].stat_string,
++ ETH_GSTRING_LEN);
++ p += ETH_GSTRING_LEN;
++ }
++ for (i = 0; i < adapter->num_tx_queues; i++) {
++ sprintf(p, "tx_queue_%u_packets", i);
++ p += ETH_GSTRING_LEN;
++ sprintf(p, "tx_queue_%u_bytes", i);
++ p += ETH_GSTRING_LEN;
++ }
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ sprintf(p, "rx_queue_%u_packets", i);
++ p += ETH_GSTRING_LEN;
++ sprintf(p, "rx_queue_%u_bytes", i);
++ p += ETH_GSTRING_LEN;
++ }
++/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
++ break;
++ }
++}
++
++static struct ethtool_ops igb_ethtool_ops = {
++ .get_settings = igb_get_settings,
++ .set_settings = igb_set_settings,
++ .get_drvinfo = igb_get_drvinfo,
++ .get_regs_len = igb_get_regs_len,
++ .get_regs = igb_get_regs,
++ .get_wol = igb_get_wol,
++ .set_wol = igb_set_wol,
++ .get_msglevel = igb_get_msglevel,
++ .set_msglevel = igb_set_msglevel,
++ .nway_reset = igb_nway_reset,
++ .get_link = ethtool_op_get_link,
++ .get_eeprom_len = igb_get_eeprom_len,
++ .get_eeprom = igb_get_eeprom,
++ .set_eeprom = igb_set_eeprom,
++ .get_ringparam = igb_get_ringparam,
++ .set_ringparam = igb_set_ringparam,
++ .get_pauseparam = igb_get_pauseparam,
++ .set_pauseparam = igb_set_pauseparam,
++ .get_rx_csum = igb_get_rx_csum,
++ .set_rx_csum = igb_set_rx_csum,
++ .get_tx_csum = igb_get_tx_csum,
++ .set_tx_csum = igb_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = ethtool_op_set_sg,
++#ifdef NETIF_F_TSO
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = igb_set_tso,
++#endif
++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
++ .get_sset_count = igb_get_sset_count,
++#else
++ .get_stats_count = igb_get_stats_count,
++ .self_test_count = igb_diag_test_count,
++#endif
++ .self_test = igb_diag_test,
++ .get_strings = igb_get_strings,
++ .phys_id = igb_phys_id,
++ .get_ethtool_stats = igb_get_ethtool_stats,
++#ifdef ETHTOOL_GPERMADDR
++ .get_perm_addr = ethtool_op_get_perm_addr,
++#endif
++ .get_coalesce = igb_get_coalesce,
++ .set_coalesce = igb_set_coalesce,
++#ifdef NETIF_F_LRO
++ .get_flags = ethtool_op_get_flags,
++ .set_flags = ethtool_op_set_flags,
++#endif
++};
++
++void igb_set_ethtool_ops(struct net_device *netdev)
++{
++ SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
++}
++#endif /* SIOCETHTOOL */
+Index: linux-2.6.22/drivers/net/igb/igb_main.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/igb_main.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,6250 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/vmalloc.h>
++#include <linux/pagemap.h>
++#include <linux/netdevice.h>
++#include <linux/tcp.h>
++#ifdef NETIF_F_TSO
++#include <net/checksum.h>
++#ifdef NETIF_F_TSO6
++#include <linux/ipv6.h>
++#include <net/ip6_checksum.h>
++#endif
++#endif
++#ifdef SIOCGMIIPHY
++#include <linux/mii.h>
++#endif
++#ifdef SIOCETHTOOL
++#include <linux/ethtool.h>
++#endif
++#include <linux/if_vlan.h>
++
++#include "igb.h"
++
++#define DRV_DEBUG
++#define DRV_HW_PERF
++#define VERSION_SUFFIX
++
++#define DRV_VERSION "2.0.6" VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF
++
++char igb_driver_name[] = "igb";
++char igb_driver_version[] = DRV_VERSION;
++static const char igb_driver_string[] =
++ "Intel(R) Gigabit Ethernet Network Driver";
++static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
++
++static struct pci_device_id igb_pci_tbl[] = {
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) },
++ /* required last entry */
++ {0, }
++};
++
++MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
++
++void igb_reset(struct igb_adapter *);
++static int igb_setup_all_tx_resources(struct igb_adapter *);
++static int igb_setup_all_rx_resources(struct igb_adapter *);
++static void igb_free_all_tx_resources(struct igb_adapter *);
++static void igb_free_all_rx_resources(struct igb_adapter *);
++static void igb_setup_mrqc(struct igb_adapter *);
++void igb_update_stats(struct igb_adapter *);
++static int igb_probe(struct pci_dev *, const struct pci_device_id *);
++static void __devexit igb_remove(struct pci_dev *pdev);
++static int igb_sw_init(struct igb_adapter *);
++static int igb_open(struct net_device *);
++static int igb_close(struct net_device *);
++static void igb_configure_tx(struct igb_adapter *);
++static void igb_configure_rx(struct igb_adapter *);
++static void igb_clean_all_tx_rings(struct igb_adapter *);
++static void igb_clean_all_rx_rings(struct igb_adapter *);
++static void igb_clean_tx_ring(struct igb_ring *);
++static void igb_clean_rx_ring(struct igb_ring *);
++static void igb_set_rx_mode(struct net_device *);
++static void igb_update_phy_info(unsigned long);
++static void igb_watchdog(unsigned long);
++static void igb_watchdog_task(struct work_struct *);
++static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
++ struct igb_ring *);
++static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
++static struct net_device_stats *igb_get_stats(struct net_device *);
++static int igb_change_mtu(struct net_device *, int);
++static int igb_set_mac(struct net_device *, void *);
++static void igb_set_uta(struct igb_adapter *adapter);
++static irqreturn_t igb_intr(int irq, void *);
++static irqreturn_t igb_intr_msi(int irq, void *);
++static irqreturn_t igb_msix_other(int irq, void *);
++static irqreturn_t igb_msix_ring(int irq, void *);
++#ifdef IGB_DCA
++static void igb_update_dca(struct igb_q_vector *);
++static void igb_setup_dca(struct igb_adapter *);
++#endif /* IGB_DCA */
++static bool igb_clean_tx_irq(struct igb_q_vector *);
++static int igb_poll(struct napi_struct *, int);
++static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
++#ifdef IGB_LRO
++static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
++#endif
++static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
++static void igb_tx_timeout(struct net_device *);
++static void igb_reset_task(struct work_struct *);
++static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
++static void igb_vlan_rx_add_vid(struct net_device *, u16);
++static void igb_vlan_rx_kill_vid(struct net_device *, u16);
++static void igb_restore_vlan(struct igb_adapter *);
++static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
++static void igb_ping_all_vfs(struct igb_adapter *);
++static void igb_msg_task(struct igb_adapter *);
++static void igb_vmm_control(struct igb_adapter *);
++static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
++static void igb_vf_configuration(struct pci_dev *, unsigned int);
++
++#ifdef CONFIG_PM
++static int igb_suspend(struct pci_dev *, pm_message_t);
++static int igb_resume(struct pci_dev *);
++#endif
++#ifndef USE_REBOOT_NOTIFIER
++static void igb_shutdown(struct pci_dev *);
++#else
++static int igb_notify_reboot(struct notifier_block *, unsigned long, void *);
++static struct notifier_block igb_notifier_reboot = {
++ .notifier_call = igb_notify_reboot,
++ .next = NULL,
++ .priority = 0
++};
++#endif
++#ifdef IGB_DCA
++static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
++static struct notifier_block dca_notifier = {
++ .notifier_call = igb_notify_dca,
++ .next = NULL,
++ .priority = 0
++};
++#endif
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++/* for netdump / net console */
++static void igb_netpoll (struct net_device *);
++#endif
++
++#ifdef HAVE_PCI_ERS
++static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
++ pci_channel_state_t);
++static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
++static void igb_io_resume(struct pci_dev *);
++
++static struct pci_error_handlers igb_err_handler = {
++ .error_detected = igb_io_error_detected,
++ .slot_reset = igb_io_slot_reset,
++ .resume = igb_io_resume,
++};
++#endif
++
++
++static struct pci_driver igb_driver = {
++ .name = igb_driver_name,
++ .id_table = igb_pci_tbl,
++ .probe = igb_probe,
++ .remove = __devexit_p(igb_remove),
++#ifdef CONFIG_PM
++ /* Power Managment Hooks */
++ .suspend = igb_suspend,
++ .resume = igb_resume,
++#endif
++#ifndef USE_REBOOT_NOTIFIER
++ .shutdown = igb_shutdown,
++#endif
++#ifdef HAVE_PCI_ERS
++ .err_handler = &igb_err_handler,
++#endif
++};
++
++MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
++MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
++MODULE_LICENSE("GPL");
++MODULE_VERSION(DRV_VERSION);
++
++static void igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
++{
++ struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie;
++ u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
++ u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
++ u32 vfta;
++
++ /*
++ * if this is the management vlan the only option is to add it in so
++ * that the management pass through will continue to work
++ */
++ if ((mng_cookie->status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
++ (vid == mng_cookie->vlan_id))
++ add = TRUE;
++
++ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
++ if (add)
++ vfta |= mask;
++ else
++ vfta &= ~mask;
++
++ e1000_write_vfta(hw, index, vfta);
++}
++
++#ifdef SIOCSHWTSTAMP
++/**
++ * igb_read_clock - read raw cycle counter (to be used by time counter)
++ */
++static cycle_t igb_read_clock(const struct cyclecounter *tc)
++{
++ struct igb_adapter *adapter =
++ container_of(tc, struct igb_adapter, cycles);
++ struct e1000_hw *hw = &adapter->hw;
++ u64 stamp = 0;
++ int shift = 0;
++
++ stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIML) << shift;
++ stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIMH) << (shift + 32);
++ return stamp;
++}
++
++#endif /* SIOCSHWTSTAMP */
++static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
++module_param(debug, int, 0);
++MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)");
++
++/**
++ * igb_init_module - Driver Registration Routine
++ *
++ * igb_init_module is the first routine called when the driver is
++ * loaded. All it does is register with the PCI subsystem.
++ **/
++static int __init igb_init_module(void)
++{
++ int ret;
++ printk(KERN_INFO "%s - version %s\n",
++ igb_driver_string, igb_driver_version);
++
++ printk(KERN_INFO "%s\n", igb_copyright);
++
++#ifdef IGB_DCA
++ dca_register_notify(&dca_notifier);
++#endif
++ ret = pci_register_driver(&igb_driver);
++#ifdef USE_REBOOT_NOTIFIER
++ if (ret >= 0) {
++ register_reboot_notifier(&igb_notifier_reboot);
++ }
++#endif
++ return ret;
++}
++
++module_init(igb_init_module);
++
++/**
++ * igb_exit_module - Driver Exit Cleanup Routine
++ *
++ * igb_exit_module is called just before the driver is removed
++ * from memory.
++ **/
++static void __exit igb_exit_module(void)
++{
++#ifdef IGB_DCA
++ dca_unregister_notify(&dca_notifier);
++#endif
++#ifdef USE_REBOOT_NOTIFIER
++ unregister_reboot_notifier(&igb_notifier_reboot);
++#endif
++ pci_unregister_driver(&igb_driver);
++}
++
++module_exit(igb_exit_module);
++
++/**
++ * igb_cache_ring_register - Descriptor ring to register mapping
++ * @adapter: board private structure to initialize
++ *
++ * Once we know the feature-set enabled for the device, we'll cache
++ * the register offset the descriptor ring is assigned to.
++ **/
++static void igb_cache_ring_register(struct igb_adapter *adapter)
++{
++ int i = 0, j = 0;
++ u32 rbase_offset = adapter->vfs_allocated_count;
++
++ switch (adapter->hw.mac.type) {
++ case e1000_82576:
++ /* The queues are allocated for virtualization such that VF 0
++ * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
++ * In order to avoid collision we start at the first free queue
++ * and continue consuming queues in the same sequence
++ */
++ if ((adapter->RSS_queues > 1) && adapter->VMDQ_queues) {
++ for (; i < adapter->RSS_queues; i++)
++ adapter->rx_ring[i].reg_idx = rbase_offset +
++ ((i & 0x1) << 3) + (i >> 1);
++#ifdef HAVE_TX_MQ
++ for (; j < adapter->RSS_queues; j++)
++ adapter->tx_ring[j].reg_idx = rbase_offset +
++ ((j & 0x1) << 3) + (j >> 1);
++#endif
++ }
++ case e1000_82575:
++ default:
++ for (; i < adapter->num_rx_queues; i++)
++ adapter->rx_ring[i].reg_idx = rbase_offset + i;
++ for (; j < adapter->num_tx_queues; j++)
++ adapter->tx_ring[j].reg_idx = rbase_offset + j;
++ break;
++ }
++}
++
++static void igb_free_queues(struct igb_adapter *adapter)
++{
++ kfree(adapter->tx_ring);
++ kfree(adapter->rx_ring);
++
++ adapter->tx_ring = NULL;
++ adapter->rx_ring = NULL;
++
++ adapter->num_rx_queues = 0;
++ adapter->num_tx_queues = 0;
++
++}
++
++/**
++ * igb_alloc_queues - Allocate memory for all rings
++ * @adapter: board private structure to initialize
++ *
++ * We allocate one ring per queue at run-time since we don't know the
++ * number of queues at compile-time.
++ **/
++static int igb_alloc_queues(struct igb_adapter *adapter)
++{
++ int i;
++
++ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
++ sizeof(struct igb_ring), GFP_KERNEL);
++ if (!adapter->tx_ring)
++ goto err;
++
++ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
++ sizeof(struct igb_ring), GFP_KERNEL);
++ if (!adapter->rx_ring)
++ goto err;
++
++ for (i = 0; i < adapter->num_tx_queues; i++) {
++ struct igb_ring *ring = &(adapter->tx_ring[i]);
++ ring->count = adapter->tx_ring_count;
++ ring->queue_index = i;
++ ring->pdev = adapter->pdev;
++ /* For 82575, context index must be unique per ring. */
++ if (adapter->hw.mac.type == e1000_82575)
++ ring->ctx_idx = i << 4;
++
++ }
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ struct igb_ring *ring = &(adapter->rx_ring[i]);
++ ring->count = adapter->rx_ring_count;
++ ring->queue_index = i;
++ ring->pdev = adapter->pdev;
++ ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
++ ring->rx_ps_hdr_size = 0; /* disable packet split */
++ ring->rx_csum = true; /* enable rx checksum */
++
++#ifdef IGB_LRO
++ /* Intitial LRO Settings */
++ ring->lro_mgr.max_aggr = adapter->lro_max_aggr;
++ ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
++ ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
++ ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
++ ring->lro_mgr.dev = adapter->netdev;
++ ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
++ ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
++#endif
++ }
++
++ igb_cache_ring_register(adapter);
++
++ return E1000_SUCCESS;
++
++err:
++ igb_free_queues(adapter);
++
++ return -ENOMEM;
++}
++
++static void igb_configure_lli(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u16 port;
++
++ /* LLI should only be enabled for MSI-X or MSI interrupts */
++ if (!adapter->msix_entries && !(adapter->flags & IGB_FLAG_HAS_MSI))
++ return;
++
++ if (adapter->lli_port) {
++ /* use filter 0 for port */
++ port = htons((u16)adapter->lli_port);
++ E1000_WRITE_REG(hw, E1000_IMIR(0),
++ (port | E1000_IMIR_PORT_IM_EN));
++ E1000_WRITE_REG(hw, E1000_IMIREXT(0),
++ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
++ }
++
++ if (adapter->flags & IGB_FLAG_LLI_PUSH) {
++ /* use filter 1 for push flag */
++ E1000_WRITE_REG(hw, E1000_IMIR(1),
++ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
++ E1000_WRITE_REG(hw, E1000_IMIREXT(1),
++ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH));
++ }
++
++ if (adapter->lli_size) {
++ /* use filter 2 for size */
++ E1000_WRITE_REG(hw, E1000_IMIR(2),
++ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
++ E1000_WRITE_REG(hw, E1000_IMIREXT(2),
++ (adapter->lli_size | E1000_IMIREXT_CTRL_BP));
++ }
++
++}
++
++#define IGB_N0_QUEUE -1
++static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
++{
++ u32 msixbm = 0;
++ struct igb_adapter *adapter = q_vector->adapter;
++ struct e1000_hw *hw = &adapter->hw;
++ u32 ivar, index;
++ int rx_queue = IGB_N0_QUEUE;
++ int tx_queue = IGB_N0_QUEUE;
++
++ if (q_vector->rx_ring)
++ rx_queue = q_vector->rx_ring->reg_idx;
++ if (q_vector->tx_ring)
++ tx_queue = q_vector->tx_ring->reg_idx;
++
++ switch (hw->mac.type) {
++ case e1000_82575:
++ /* The 82575 assigns vectors using a bitmask, which matches the
++ bitmask for the EICR/EIMS/EIMC registers. To assign one
++ or more queues to a vector, we write the appropriate bits
++ into the MSIXBM register for that vector. */
++ if (rx_queue > IGB_N0_QUEUE)
++ msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
++ if (tx_queue > IGB_N0_QUEUE)
++ msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
++ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm);
++ q_vector->eims_value = msixbm;
++ break;
++ case e1000_82576:
++ /* 82576 uses a table-based method for assigning vectors.
++ Each queue has a single entry in the table to which we write
++ a vector number along with a "valid" bit. Sadly, the layout
++ of the table is somewhat counterintuitive. */
++ if (rx_queue > IGB_N0_QUEUE) {
++ index = (rx_queue & 0x7);
++ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
++ if (rx_queue < 8) {
++ /* vector goes into low byte of register */
++ ivar = ivar & 0xFFFFFF00;
++ ivar |= msix_vector | E1000_IVAR_VALID;
++ } else {
++ /* vector goes into third byte of register */
++ ivar = ivar & 0xFF00FFFF;
++ ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
++ }
++ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
++ }
++ if (tx_queue > IGB_N0_QUEUE) {
++ index = (tx_queue & 0x7);
++ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
++ if (tx_queue < 8) {
++ /* vector goes into second byte of register */
++ ivar = ivar & 0xFFFF00FF;
++ ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
++ } else {
++ /* vector goes into high byte of register */
++ ivar = ivar & 0x00FFFFFF;
++ ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
++ }
++ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
++ }
++ q_vector->eims_value = 1 << msix_vector;
++ break;
++ default:
++ BUG();
++ break;
++ }
++}
++
++/**
++ * igb_configure_msix - Configure MSI-X hardware
++ *
++ * igb_configure_msix sets up the hardware to properly
++ * generate MSI-X interrupts.
++ **/
++static void igb_configure_msix(struct igb_adapter *adapter)
++{
++ u32 tmp;
++ int i, vector = 0;
++ struct e1000_hw *hw = &adapter->hw;
++
++ adapter->eims_enable_mask = 0;
++
++ /* set vector for other causes, i.e. link changes */
++ switch (hw->mac.type) {
++ case e1000_82575:
++ tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ /* enable MSI-X PBA support*/
++ tmp |= E1000_CTRL_EXT_PBA_CLR;
++
++ /* Auto-Mask interrupts upon ICR read. */
++ tmp |= E1000_CTRL_EXT_EIAME;
++ tmp |= E1000_CTRL_EXT_IRCA;
++
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
++
++ /* enable msix_other interrupt */
++ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++,
++ E1000_EIMS_OTHER);
++ adapter->eims_other = E1000_EIMS_OTHER;
++
++ break;
++
++ case e1000_82576:
++ /* Turn on MSI-X capability first, or our settings
++ * won't stick. And it will take days to debug. */
++ E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
++ E1000_GPIE_PBA | E1000_GPIE_EIAME |
++ E1000_GPIE_NSICR);
++
++ /* enable msix_other interrupt */
++ adapter->eims_other = 1 << vector;
++ tmp = (vector++ | E1000_IVAR_VALID) << 8;
++
++ E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmp);
++ break;
++ default:
++ /* do nothing, since nothing else supports MSI-X */
++ break;
++ } /* switch (hw->mac.type) */
++
++ adapter->eims_enable_mask |= adapter->eims_other;
++
++ for (i = 0; i < adapter->num_q_vectors; i++) {
++ struct igb_q_vector *q_vector = adapter->q_vector[i];
++ igb_assign_vector(q_vector, vector++);
++ adapter->eims_enable_mask |= q_vector->eims_value;
++ }
++
++ E1000_WRITE_FLUSH(hw);
++}
++
++/**
++ * igb_request_msix - Initialize MSI-X interrupts
++ *
++ * igb_request_msix allocates MSI-X vectors and requests interrupts from the
++ * kernel.
++ **/
++static int igb_request_msix(struct igb_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++ struct e1000_hw *hw = &adapter->hw;
++ int i, err = 0, vector = 0;
++
++ err = request_irq(adapter->msix_entries[vector].vector,
++ &igb_msix_other, 0, netdev->name, adapter);
++ if (err)
++ goto out;
++ vector++;
++
++ for (i = 0; i < adapter->num_q_vectors; i++) {
++ struct igb_q_vector *q_vector = adapter->q_vector[i];
++
++ q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
++
++ if (q_vector->rx_ring && q_vector->tx_ring)
++ sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
++ q_vector->rx_ring->queue_index);
++ else if (q_vector->tx_ring)
++ sprintf(q_vector->name, "%s-tx-%u", netdev->name,
++ q_vector->tx_ring->queue_index);
++ else if (q_vector->rx_ring)
++ sprintf(q_vector->name, "%s-rx-%u", netdev->name,
++ q_vector->rx_ring->queue_index);
++ else
++ sprintf(q_vector->name, "%s-unused", netdev->name);
++
++ err = request_irq(adapter->msix_entries[vector].vector,
++ &igb_msix_ring, 0, q_vector->name,
++ q_vector);
++ if (err)
++ goto out;
++ vector++;
++ }
++
++ igb_configure_msix(adapter);
++ return 0;
++out:
++ return err;
++}
++
++static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
++{
++ if (adapter->msix_entries) {
++ pci_disable_msix(adapter->pdev);
++ kfree(adapter->msix_entries);
++ adapter->msix_entries = NULL;
++ } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
++ pci_disable_msi(adapter->pdev);
++ }
++
++ adapter->num_rx_queues = 0;
++ adapter->num_tx_queues = 0;
++
++ return;
++}
++
++/**
++ * igb_free_q_vectors - Free memory allocated for interrupt vectors
++ * @adapter: board private structure to initialize
++ *
++ * This function frees the memory allocated to the q_vectors. In addition if
++ * NAPI is enabled it will delete any references to the NAPI struct prior
++ * to freeing the q_vector.
++ **/
++static void igb_free_q_vectors(struct igb_adapter *adapter)
++{
++ int v_idx;
++
++ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
++ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
++ adapter->q_vector[v_idx] = NULL;
++ netif_napi_del(&q_vector->napi);
++ kfree(q_vector);
++ }
++ adapter->num_q_vectors = 0;
++}
++
++/**
++ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
++ *
++ * This function resets the device so that it has 0 rx queues, tx queues, and
++ * MSI-X interrupts allocated.
++ */
++static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
++{
++ igb_free_queues(adapter);
++ igb_free_q_vectors(adapter);
++ igb_reset_interrupt_capability(adapter);
++}
++
++/**
++ * igb_set_interrupt_capability - set MSI or MSI-X if supported
++ *
++ * Attempt to configure interrupts using the best available
++ * capabilities of the hardware and kernel.
++ **/
++static void igb_set_interrupt_capability(struct igb_adapter *adapter)
++{
++ int err;
++ int numvecs, i;
++
++ /* Number of supported queues. */
++ adapter->num_rx_queues = adapter->RSS_queues;
++
++ if (adapter->VMDQ_queues > 1)
++ adapter->num_rx_queues += adapter->VMDQ_queues - 1;
++
++#ifdef HAVE_TX_MQ
++ adapter->num_tx_queues = adapter->num_rx_queues;
++#else
++ adapter->num_tx_queues = max_t(u32, 1, adapter->VMDQ_queues);
++#endif
++
++ switch (adapter->int_mode) {
++ case IGB_INT_MODE_MSIX:
++ /* start with one vector for every rx queue */
++ numvecs = adapter->num_rx_queues;
++
++ /* if tx handler is seperate add 1 for every tx queue */
++ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
++ numvecs += adapter->num_tx_queues;
++
++ /* store the number of vectors reserved for queues */
++ adapter->num_q_vectors = numvecs;
++
++ /* add 1 vector for link status interrupts */
++ numvecs++;
++ adapter->msix_entries = kcalloc(numvecs,
++ sizeof(struct msix_entry),
++ GFP_KERNEL);
++ if (adapter->msix_entries) {
++ for (i = 0; i < numvecs; i++)
++ adapter->msix_entries[i].entry = i;
++
++ err = pci_enable_msix(adapter->pdev,
++ adapter->msix_entries, numvecs);
++ if (err == 0)
++ break;
++ }
++ /* MSI-X failed, so fall through and try MSI */
++ DPRINTK(PROBE, WARNING, "Failed to initialize MSI-X interrupts."
++ " Falling back to MSI interrupts.\n");
++ igb_reset_interrupt_capability(adapter);
++ case IGB_INT_MODE_MSI:
++ if (!pci_enable_msi(adapter->pdev))
++ adapter->flags |= IGB_FLAG_HAS_MSI;
++ else
++ DPRINTK(PROBE, WARNING, "Failed to initialize MSI "
++ "interrupts. Falling back to legacy interrupts.\n");
++ /* Fall through */
++ case IGB_INT_MODE_LEGACY:
++ /* disable advanced features and set number of queues to 1 */
++ adapter->vfs_allocated_count = 0;
++ adapter->VMDQ_queues = 0;
++ adapter->RSS_queues = 1;
++ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
++ adapter->num_rx_queues = 1;
++ adapter->num_tx_queues = 1;
++ adapter->num_q_vectors = 1;
++ /* Don't do anything; this is system default */
++ break;
++ }
++
++#ifdef HAVE_TX_MQ
++ /* Notify the stack of the (possibly) reduced Tx Queue count. */
++#ifdef CONFIG_NETDEVICES_MULTIQUEUE
++ adapter->netdev->egress_subqueue_count =
++ min_t(u32, adapter->num_tx_queues, adapter->RSS_queues);
++#else
++ adapter->netdev->real_num_tx_queues =
++ min_t(u32, adapter->num_tx_queues, adapter->RSS_queues);
++#endif
++#endif
++
++ return;
++}
++
++/**
++ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
++ * @adapter: board private structure to initialize
++ *
++ * We allocate one q_vector per queue interrupt. If allocation fails we
++ * return -ENOMEM.
++ **/
++static int igb_alloc_q_vectors(struct igb_adapter *adapter)
++{
++ struct igb_q_vector *q_vector;
++ struct e1000_hw *hw = &adapter->hw;
++ int v_idx;
++
++ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
++ q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
++ if (!q_vector)
++ goto err_out;
++ q_vector->adapter = adapter;
++ q_vector->itr_val = adapter->itr;
++ q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
++ q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
++ q_vector->set_itr = 1;
++ netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
++ adapter->q_vector[v_idx] = q_vector;
++ }
++ return 0;
++
++err_out:
++ while (v_idx) {
++ v_idx--;
++ q_vector = adapter->q_vector[v_idx];
++ netif_napi_del(&q_vector->napi);
++ kfree(q_vector);
++ adapter->q_vector[v_idx] = NULL;
++ }
++ return -ENOMEM;
++}
++
++/**
++ * igb_map_ring_to_vector - maps allocated queues to vectors
++ *
++ * This function maps the recently allocated queues to vectors.
++ **/
++static int igb_map_ring_to_vector(struct igb_adapter *adapter)
++{
++ struct igb_q_vector *q_vector;
++ int i;
++ int v_idx = 0;
++
++ if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
++ (adapter->num_q_vectors < adapter->num_tx_queues))
++ return -ENOMEM;
++
++ if (adapter->num_q_vectors == (adapter->num_rx_queues + adapter->num_tx_queues)) {
++ for (i = 0; i < adapter->num_tx_queues; i++) {
++ q_vector = adapter->q_vector[v_idx++];
++ adapter->tx_ring[i].q_vector = q_vector;
++ q_vector->tx_ring = &adapter->tx_ring[i];
++ }
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ q_vector = adapter->q_vector[v_idx++];
++ adapter->rx_ring[i].q_vector = q_vector;
++ q_vector->rx_ring = &adapter->rx_ring[i];
++ q_vector->rx_ring->q_vector = q_vector;
++ }
++ } else {
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ q_vector = adapter->q_vector[v_idx++];
++ adapter->rx_ring[i].q_vector = q_vector;
++ q_vector->rx_ring = &adapter->rx_ring[i];
++ if (i < adapter->num_tx_queues) {
++ adapter->tx_ring[i].q_vector = q_vector;
++ q_vector->tx_ring = &adapter->tx_ring[i];
++ }
++ }
++ for (; i < adapter->num_tx_queues; i++) {
++ q_vector = adapter->q_vector[v_idx++];
++ adapter->tx_ring[i].q_vector = q_vector;
++ q_vector->tx_ring = &adapter->tx_ring[i];
++ }
++ }
++ return 0;
++}
++
++/**
++ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
++ *
++ * This function initializes the interrupts and allocates all of the queues.
++ **/
++static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
++{
++ int err;
++
++ igb_set_interrupt_capability(adapter);
++
++ err = igb_alloc_q_vectors(adapter);
++ if (err) {
++ DPRINTK(PROBE, ERR, "Unable to allocate memory for q_vectors\n");
++ goto err_alloc_q_vectors;
++ }
++
++ err = igb_alloc_queues(adapter);
++ if (err) {
++ DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
++ goto err_alloc_queues;
++ }
++
++ err = igb_map_ring_to_vector(adapter);
++ if (err) {
++ DPRINTK(PROBE, ERR, "Invalid q_vector to ring mapping\n");
++ goto err_map_queues;
++ }
++
++
++ return 0;
++err_map_queues:
++ igb_free_queues(adapter);
++err_alloc_queues:
++ igb_free_q_vectors(adapter);
++err_alloc_q_vectors:
++ igb_reset_interrupt_capability(adapter);
++ return err;
++}
++
++/**
++ * igb_request_irq - initialize interrupts
++ *
++ * Attempts to configure interrupts using the best available
++ * capabilities of the hardware and kernel.
++ **/
++static int igb_request_irq(struct igb_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++ struct e1000_hw *hw = &adapter->hw;
++ int err = 0;
++
++ if (adapter->msix_entries) {
++ err = igb_request_msix(adapter);
++ if (!err)
++ goto request_done;
++ /* fall back to MSI */
++ igb_clear_interrupt_scheme(adapter);
++ if (!pci_enable_msi(adapter->pdev))
++ adapter->flags |= IGB_FLAG_HAS_MSI;
++ igb_free_all_tx_resources(adapter);
++ igb_free_all_rx_resources(adapter);
++ adapter->num_tx_queues = 1;
++ adapter->num_rx_queues = 1;
++ adapter->num_q_vectors = 1;
++ err = igb_alloc_q_vectors(adapter);
++ if (err) {
++ DPRINTK(PROBE, ERR, "Unable to allocate memory for q_vectors\n");
++ goto request_done;
++ }
++ err = igb_alloc_queues(adapter);
++ if (err) {
++ DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
++ igb_free_q_vectors(adapter);
++ goto request_done;
++ }
++ igb_setup_all_tx_resources(adapter);
++ igb_setup_all_rx_resources(adapter);
++ } else {
++ switch (hw->mac.type) {
++ case e1000_82575:
++ E1000_WRITE_REG(hw, E1000_MSIXBM(0),
++ (E1000_EICR_RX_QUEUE0 |
++ E1000_EICR_TX_QUEUE0 |
++ E1000_EIMS_OTHER));
++ break;
++ case e1000_82576:
++ E1000_WRITE_REG(hw, E1000_IVAR0, E1000_IVAR_VALID);
++ break;
++ default:
++ break;
++ }
++ }
++ if (adapter->flags & IGB_FLAG_HAS_MSI) {
++ err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
++ netdev->name, adapter);
++ if (!err)
++ goto request_done;
++
++ /* fall back to legacy interrupts */
++ igb_reset_interrupt_capability(adapter);
++ adapter->flags &= ~IGB_FLAG_HAS_MSI;
++ }
++
++ err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
++ netdev->name, adapter);
++
++ if (err) {
++ DPRINTK(PROBE, ERR, "Error %d getting interrupt\n", err);
++ goto request_done;
++ }
++
++request_done:
++ return err;
++}
++
++static void igb_free_irq(struct igb_adapter *adapter)
++{
++ if (adapter->msix_entries) {
++ int vector = 0, i;
++
++ free_irq(adapter->msix_entries[vector++].vector, adapter);
++
++ for (i = 0; i < adapter->num_q_vectors; i++) {
++ struct igb_q_vector *q_vector = adapter->q_vector[i];
++ free_irq(adapter->msix_entries[vector++].vector,
++ q_vector);
++ }
++ } else {
++ free_irq(adapter->pdev->irq, adapter);
++ }
++}
++
++/**
++ * igb_irq_disable - Mask off interrupt generation on the NIC
++ * @adapter: board private structure
++ **/
++static void igb_irq_disable(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++
++ /*
++ * we need to be careful when disabling interrupts. The VFs are also
++ * mapped into these registers and so clearing the bits can cause
++ * issues on the VF drivers so we only need to clear what we set
++ */
++ if (adapter->msix_entries) {
++ u32 regval = E1000_READ_REG(hw, E1000_EIAM);
++ regval &= ~adapter->eims_enable_mask;
++ E1000_WRITE_REG(hw, E1000_EIAM, regval);
++ E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask);
++ regval = E1000_READ_REG(hw, E1000_EIAC);
++ regval &= ~adapter->eims_enable_mask;
++ E1000_WRITE_REG(hw, E1000_EIAC, regval);
++ }
++
++ E1000_WRITE_REG(hw, E1000_IAM, 0);
++ E1000_WRITE_REG(hw, E1000_IMC, ~0);
++ E1000_WRITE_FLUSH(hw);
++
++ synchronize_irq(adapter->pdev->irq);
++}
++
++/**
++ * igb_irq_enable - Enable default interrupt generation settings
++ * @adapter: board private structure
++ **/
++static void igb_irq_enable(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++
++ if (adapter->msix_entries) {
++ u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
++ u32 regval = E1000_READ_REG(hw, E1000_EIAC);
++ E1000_WRITE_REG(hw, E1000_EIAC,
++ regval | adapter->eims_enable_mask);
++ regval = E1000_READ_REG(hw, E1000_EIAM);
++ E1000_WRITE_REG(hw, E1000_EIAM,
++ regval | adapter->eims_enable_mask);
++ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask);
++ if (adapter->vfs_allocated_count) {
++ E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF);
++ ims |= E1000_IMS_VMMB;
++ }
++ E1000_WRITE_REG(hw, E1000_IMS, ims);
++ } else {
++ E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
++ E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK);
++ }
++}
++
++static void igb_update_mng_vlan(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u16 vid = adapter->hw.mng_cookie.vlan_id;
++ u16 old_vid = adapter->mng_vlan_id;
++
++ if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
++ /* add VID to filter table */
++ igb_vfta_set(hw, vid, TRUE);
++ adapter->mng_vlan_id = vid;
++ } else {
++ adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
++ }
++
++ if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
++ (vid != old_vid) &&
++ !vlan_group_get_device(adapter->vlgrp, old_vid)) {
++ /* remove VID from filter table */
++ igb_vfta_set(hw, old_vid, FALSE);
++ }
++}
++
++/**
++ * igb_release_hw_control - release control of the h/w to f/w
++ * @adapter: address of board private structure
++ *
++ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
++ * For ASF and Pass Through versions of f/w this means that the
++ * driver is no longer loaded.
++ *
++ **/
++static void igb_release_hw_control(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 ctrl_ext;
++
++ /* Let firmware take over control of h/w */
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
++ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
++}
++
++/**
++ * igb_get_hw_control - get control of the h/w from f/w
++ * @adapter: address of board private structure
++ *
++ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
++ * For ASF and Pass Through versions of f/w this means that
++ * the driver is loaded.
++ *
++ **/
++static void igb_get_hw_control(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 ctrl_ext;
++
++ /* Let firmware know the driver has taken over */
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
++ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
++}
++
++/**
++ * igb_configure - configure the hardware for RX and TX
++ * @adapter: private board structure
++ **/
++static void igb_configure(struct igb_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++ int i;
++
++ igb_get_hw_control(adapter);
++ igb_set_rx_mode(netdev);
++
++ igb_restore_vlan(adapter);
++
++ igb_setup_tctl(adapter);
++ igb_setup_mrqc(adapter);
++ igb_setup_rctl(adapter);
++
++ igb_configure_tx(adapter);
++ igb_configure_rx(adapter);
++
++ e1000_rx_fifo_flush_82575(&adapter->hw);
++#ifdef CONFIG_NETDEVICES_MULTIQUEUE
++ if (adapter->num_tx_queues > 1)
++ netdev->features |= NETIF_F_MULTI_QUEUE;
++ else
++ netdev->features &= ~NETIF_F_MULTI_QUEUE;
++
++#endif
++ /* call IGB_DESC_UNUSED which always leaves
++ * at least 1 descriptor unused to make sure
++ * next_to_use != next_to_clean */
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ struct igb_ring *ring = &adapter->rx_ring[i];
++ if (igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring)))
++ adapter->alloc_rx_buff_failed++;
++ }
++
++
++ adapter->tx_queue_len = netdev->tx_queue_len;
++}
++
++
++/**
++ * igb_up - Open the interface and prepare it to handle traffic
++ * @adapter: board private structure
++ **/
++int igb_up(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ int i;
++
++ /* hardware has been reset, we need to reload some things */
++ igb_configure(adapter);
++
++ clear_bit(__IGB_DOWN, &adapter->state);
++
++ for (i = 0; i < adapter->num_q_vectors; i++) {
++ struct igb_q_vector *q_vector = adapter->q_vector[i];
++ napi_enable(&q_vector->napi);
++ }
++ if (adapter->msix_entries)
++ igb_configure_msix(adapter);
++
++ igb_configure_lli(adapter);
++
++ /* Clear any pending interrupts. */
++ E1000_READ_REG(hw, E1000_ICR);
++ igb_irq_enable(adapter);
++
++ /* notify VFs that reset has been completed */
++ if (adapter->vfs_allocated_count) {
++ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ reg_data |= E1000_CTRL_EXT_PFRSTD;
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
++ }
++
++ /* start the watchdog. */
++ hw->mac.get_link_status = 1;
++ mod_timer(&adapter->watchdog_timer, jiffies + 1);
++
++ return 0;
++}
++
++void igb_down(struct igb_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++ struct e1000_hw *hw = &adapter->hw;
++ u32 tctl, rctl;
++ int i;
++
++ /* signal that we're down so the interrupt handler does not
++ * reschedule our watchdog timer */
++ set_bit(__IGB_DOWN, &adapter->state);
++
++ /* disable receives in the hardware */
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
++ /* flush and sleep below */
++
++ netif_tx_stop_all_queues(netdev);
++
++ /* disable transmits in the hardware */
++ tctl = E1000_READ_REG(hw, E1000_TCTL);
++ tctl &= ~E1000_TCTL_EN;
++ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
++ /* flush both disables and wait for them to finish */
++ E1000_WRITE_FLUSH(hw);
++ msleep(10);
++
++ for (i = 0; i < adapter->num_q_vectors; i++) {
++ struct igb_q_vector *q_vector = adapter->q_vector[i];
++ napi_disable(&q_vector->napi);
++ }
++
++ igb_irq_disable(adapter);
++
++ del_timer_sync(&adapter->watchdog_timer);
++ del_timer_sync(&adapter->phy_info_timer);
++
++ netdev->tx_queue_len = adapter->tx_queue_len;
++ netif_carrier_off(netdev);
++
++ /* record the stats before reset*/
++ igb_update_stats(adapter);
++
++ adapter->link_speed = 0;
++ adapter->link_duplex = 0;
++#ifdef HAVE_PCI_ERS
++ if (!pci_channel_offline(adapter->pdev))
++ igb_reset(adapter);
++#else
++ igb_reset(adapter);
++#endif
++ igb_clean_all_tx_rings(adapter);
++ igb_clean_all_rx_rings(adapter);
++#ifdef IGB_DCA
++
++ /* since we reset the hardware DCA settings were cleared */
++ igb_setup_dca(adapter);
++#endif
++}
++
++void igb_reinit_locked(struct igb_adapter *adapter)
++{
++ WARN_ON(in_interrupt());
++ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
++ msleep(1);
++ igb_down(adapter);
++ igb_up(adapter);
++ clear_bit(__IGB_RESETTING, &adapter->state);
++}
++
++void igb_reset(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ struct e1000_mac_info *mac = &hw->mac;
++ struct e1000_fc_info *fc = &hw->fc;
++ u32 pba = 0, tx_space, min_tx_space, min_rx_space;
++ u16 hwm;
++
++ /* Repartition Pba for greater than 9k mtu
++ * To take effect CTRL.RST is required.
++ */
++ switch (mac->type) {
++ case e1000_82576:
++ pba = E1000_READ_REG(hw, E1000_RXPBS);
++ pba &= E1000_RXPBS_SIZE_MASK_82576;
++ break;
++ case e1000_82575:
++ default:
++ pba = E1000_PBA_34K;
++ break;
++ }
++
++ if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
++ (mac->type < e1000_82576)) {
++ /* adjust PBA for jumbo frames */
++ E1000_WRITE_REG(hw, E1000_PBA, pba);
++
++ /* To maintain wire speed transmits, the Tx FIFO should be
++ * large enough to accommodate two full transmit packets,
++ * rounded up to the next 1KB and expressed in KB. Likewise,
++ * the Rx FIFO should be large enough to accommodate at least
++ * one full receive packet and is similarly rounded up and
++ * expressed in KB. */
++ pba = E1000_READ_REG(hw, E1000_PBA);
++ /* upper 16 bits has Tx packet buffer allocation size in KB */
++ tx_space = pba >> 16;
++ /* lower 16 bits has Rx packet buffer allocation size in KB */
++ pba &= 0xffff;
++ /* the tx fifo also stores 16 bytes of information about the tx
++ * but don't include ethernet FCS because hardware appends it */
++ min_tx_space = (adapter->max_frame_size +
++ sizeof(struct e1000_tx_desc) -
++ ETH_FCS_LEN) * 2;
++ min_tx_space = ALIGN(min_tx_space, 1024);
++ min_tx_space >>= 10;
++ /* software strips receive CRC, so leave room for it */
++ min_rx_space = adapter->max_frame_size;
++ min_rx_space = ALIGN(min_rx_space, 1024);
++ min_rx_space >>= 10;
++
++ /* If current Tx allocation is less than the min Tx FIFO size,
++ * and the min Tx FIFO size is less than the current Rx FIFO
++ * allocation, take space away from current Rx allocation */
++ if (tx_space < min_tx_space &&
++ ((min_tx_space - tx_space) < pba)) {
++ pba = pba - (min_tx_space - tx_space);
++
++ /* if short on rx space, rx wins and must trump tx
++ * adjustment */
++ if (pba < min_rx_space)
++ pba = min_rx_space;
++ }
++ E1000_WRITE_REG(hw, E1000_PBA, pba);
++ }
++
++ /* flow control settings */
++ /* The high water mark must be low enough to fit one full frame
++ * (or the size used for early receive) above it in the Rx FIFO.
++ * Set it to the lower of:
++ * - 90% of the Rx FIFO size, or
++ * - the full Rx FIFO size minus one full frame */
++ hwm = min(((pba << 10) * 9 / 10),
++ ((pba << 10) - 2 * adapter->max_frame_size));
++
++ if (mac->type < e1000_82576) {
++ fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
++ fc->low_water = fc->high_water - 8;
++ } else {
++ fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
++ fc->low_water = fc->high_water - 16;
++ }
++ fc->pause_time = 0xFFFF;
++ fc->send_xon = 1;
++ fc->current_mode = fc->requested_mode;
++
++ /* disable receive for all VFs and wait one second */
++ if (adapter->vfs_allocated_count) {
++ int i;
++ for (i = 0 ; i < adapter->vfs_allocated_count; i++)
++ adapter->vf_data[i].flags = 0;
++
++ /* ping all the active vfs to let them know we are going down */
++ igb_ping_all_vfs(adapter);
++
++ /* disable transmits and receives */
++ E1000_WRITE_REG(hw, E1000_VFRE, 0);
++ E1000_WRITE_REG(hw, E1000_VFTE, 0);
++ }
++
++ /* Allow time for pending master requests to run */
++ e1000_reset_hw(hw);
++ E1000_WRITE_REG(hw, E1000_WUC, 0);
++
++ if (e1000_init_hw(hw))
++ DPRINTK(PROBE, ERR, "Hardware Error\n");
++
++ igb_update_mng_vlan(adapter);
++
++ /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
++ E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
++
++ e1000_get_phy_info(hw);
++}
++
++#ifdef HAVE_NET_DEVICE_OPS
++static const struct net_device_ops igb_netdev_ops = {
++ .ndo_open = igb_open,
++ .ndo_stop = igb_close,
++ .ndo_start_xmit = igb_xmit_frame_adv,
++ .ndo_get_stats = igb_get_stats,
++ .ndo_set_rx_mode = igb_set_rx_mode,
++ .ndo_set_multicast_list = igb_set_rx_mode,
++ .ndo_set_mac_address = igb_set_mac,
++ .ndo_change_mtu = igb_change_mtu,
++ .ndo_do_ioctl = igb_ioctl,
++ .ndo_tx_timeout = igb_tx_timeout,
++ .ndo_validate_addr = eth_validate_addr,
++ .ndo_vlan_rx_register = igb_vlan_rx_register,
++ .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
++ .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ .ndo_poll_controller = igb_netpoll,
++#endif
++};
++#endif /* HAVE_NET_DEVICE_OPS */
++
++/**
++ * igb_probe - Device Initialization Routine
++ * @pdev: PCI device information struct
++ * @ent: entry in igb_pci_tbl
++ *
++ * Returns 0 on success, negative on failure
++ *
++ * igb_probe initializes an adapter identified by a pci_dev structure.
++ * The OS initialization, configuring of the adapter private structure,
++ * and a hardware reset occur.
++ **/
++static int __devinit igb_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ struct net_device *netdev;
++ struct igb_adapter *adapter;
++ struct e1000_hw *hw;
++ int i, err, pci_using_dac;
++ u16 eeprom_data = 0;
++ static int cards_found;
++ static int global_quad_port_a; /* global quad port a indication */
++
++ err = pci_enable_device_mem(pdev);
++ if (err)
++ return err;
++
++ pci_using_dac = 0;
++ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
++ if (!err) {
++ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
++ if (!err)
++ pci_using_dac = 1;
++ } else {
++ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (err) {
++ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (err) {
++ IGB_ERR("No usable DMA configuration, "
++ "aborting\n");
++ goto err_dma;
++ }
++ }
++ }
++
++#ifndef HAVE_ASPM_QUIRKS
++ /* 82575 requires that the pci-e link partner disable the L0s state */
++ switch (pdev->device) {
++ case E1000_DEV_ID_82575EB_COPPER:
++ case E1000_DEV_ID_82575EB_FIBER_SERDES:
++ case E1000_DEV_ID_82575GB_QUAD_COPPER:
++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
++ default:
++ break;
++ }
++
++#endif /* HAVE_ASPM_QUIRKS */
++ err = pci_request_selected_regions(pdev,
++ pci_select_bars(pdev,
++ IORESOURCE_MEM),
++ igb_driver_name);
++ if (err)
++ goto err_pci_reg;
++
++ pci_enable_pcie_error_reporting(pdev);
++
++ pci_set_master(pdev);
++
++ err = -ENOMEM;
++#ifdef HAVE_TX_MQ
++ netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_ABS_MAX_TX_QUEUES);
++#else
++ netdev = alloc_etherdev(sizeof(struct igb_adapter));
++#endif /* HAVE_TX_MQ */
++ if (!netdev)
++ goto err_alloc_etherdev;
++
++ SET_MODULE_OWNER(netdev);
++ SET_NETDEV_DEV(netdev, &pdev->dev);
++
++ pci_set_drvdata(pdev, netdev);
++ adapter = netdev_priv(netdev);
++ adapter->netdev = netdev;
++ adapter->pdev = pdev;
++ hw = &adapter->hw;
++ hw->back = adapter;
++ adapter->msg_enable = (1 << debug) - 1;
++
++#ifdef HAVE_PCI_ERS
++ err = pci_save_state(pdev);
++ if (err)
++ goto err_ioremap;
++#endif
++ err = -EIO;
++ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
++ pci_resource_len(pdev, 0));
++ if (!hw->hw_addr)
++ goto err_ioremap;
++
++#ifdef HAVE_NET_DEVICE_OPS
++ netdev->netdev_ops = &igb_netdev_ops;
++#else /* HAVE_NET_DEVICE_OPS */
++ netdev->open = &igb_open;
++ netdev->stop = &igb_close;
++ netdev->get_stats = &igb_get_stats;
++#ifdef HAVE_SET_RX_MODE
++ netdev->set_rx_mode = &igb_set_rx_mode;
++#endif
++ netdev->set_multicast_list = &igb_set_rx_mode;
++ netdev->set_mac_address = &igb_set_mac;
++ netdev->change_mtu = &igb_change_mtu;
++ netdev->do_ioctl = &igb_ioctl;
++#ifdef HAVE_TX_TIMEOUT
++ netdev->tx_timeout = &igb_tx_timeout;
++#endif
++ netdev->vlan_rx_register = igb_vlan_rx_register;
++ netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
++ netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ netdev->poll_controller = igb_netpoll;
++#endif
++ netdev->hard_start_xmit = &igb_xmit_frame_adv;
++#endif /* HAVE_NET_DEVICE_OPS */
++ igb_set_ethtool_ops(netdev);
++#ifdef HAVE_TX_TIMEOUT
++ netdev->watchdog_timeo = 5 * HZ;
++#endif
++
++ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
++
++ adapter->bd_number = cards_found;
++
++ /* setup the private structure */
++ err = igb_sw_init(adapter);
++ if (err)
++ goto err_sw_init;
++
++ e1000_get_bus_info(hw);
++
++ hw->phy.autoneg_wait_to_complete = FALSE;
++ hw->mac.adaptive_ifs = FALSE;
++
++ /* Copper options */
++ if (hw->phy.media_type == e1000_media_type_copper) {
++ hw->phy.mdix = AUTO_ALL_MODES;
++ hw->phy.disable_polarity_correction = FALSE;
++ hw->phy.ms_type = e1000_ms_hw_default;
++ }
++
++ if (e1000_check_reset_block(hw))
++ DPRINTK(PROBE, INFO,
++ "PHY reset is blocked due to SOL/IDER session.\n");
++
++ netdev->features = NETIF_F_SG |
++ NETIF_F_IP_CSUM |
++ NETIF_F_HW_VLAN_TX |
++ NETIF_F_HW_VLAN_RX |
++ NETIF_F_HW_VLAN_FILTER;
++
++#ifdef NETIF_F_IPV6_CSUM
++ netdev->features |= NETIF_F_IPV6_CSUM;
++#endif
++#ifdef NETIF_F_TSO
++ netdev->features |= NETIF_F_TSO;
++#ifdef NETIF_F_TSO6
++ netdev->features |= NETIF_F_TSO6;
++#endif
++#endif /* NETIF_F_TSO */
++
++#ifdef IGB_LRO
++ netdev->features |= NETIF_F_LRO;
++#endif
++#ifdef NETIF_F_GRO
++ netdev->features |= NETIF_F_GRO;
++#endif
++
++#ifdef HAVE_NETDEV_VLAN_FEATURES
++ netdev->vlan_features |= NETIF_F_TSO;
++ netdev->vlan_features |= NETIF_F_TSO6;
++ netdev->vlan_features |= NETIF_F_IP_CSUM;
++ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
++ netdev->vlan_features |= NETIF_F_SG;
++
++#endif
++ if (pci_using_dac)
++ netdev->features |= NETIF_F_HIGHDMA;
++
++ if (hw->mac.type >= e1000_82576)
++ netdev->features |= NETIF_F_SCTP_CSUM;
++
++ adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
++
++ /* before reading the NVM, reset the controller to put the device in a
++ * known good starting state */
++ e1000_reset_hw(hw);
++
++ /* make sure the NVM is good */
++ if (e1000_validate_nvm_checksum(hw) < 0) {
++ DPRINTK(PROBE, ERR, "The NVM Checksum Is Not Valid\n");
++ err = -EIO;
++ goto err_eeprom;
++ }
++
++ /* copy the MAC address out of the NVM */
++ if (e1000_read_mac_addr(hw))
++ DPRINTK(PROBE, ERR, "NVM Read Error\n");
++ memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
++#ifdef ETHTOOL_GPERMADDR
++ memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
++
++ if (!is_valid_ether_addr(netdev->perm_addr)) {
++#else
++ if (!is_valid_ether_addr(netdev->dev_addr)) {
++#endif
++ DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
++ err = -EIO;
++ goto err_eeprom;
++ }
++
++ init_timer(&adapter->watchdog_timer);
++ adapter->watchdog_timer.function = &igb_watchdog;
++ adapter->watchdog_timer.data = (unsigned long) adapter;
++
++ init_timer(&adapter->phy_info_timer);
++ adapter->phy_info_timer.function = &igb_update_phy_info;
++ adapter->phy_info_timer.data = (unsigned long) adapter;
++
++ INIT_WORK(&adapter->reset_task, igb_reset_task);
++ INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
++
++ /* Initialize link properties that are user-changeable */
++ adapter->fc_autoneg = true;
++ hw->mac.autoneg = true;
++ hw->phy.autoneg_advertised = 0x2f;
++
++ hw->fc.requested_mode = e1000_fc_default;
++ hw->fc.current_mode = e1000_fc_default;
++
++ e1000_validate_mdi_setting(hw);
++
++ /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
++ * enable the ACPI Magic Packet filter
++ */
++
++ if (hw->bus.func == 0)
++ e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
++ else if (hw->bus.func == 1)
++ e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
++
++ if (eeprom_data & IGB_EEPROM_APME)
++ adapter->eeprom_wol |= E1000_WUFC_MAG;
++
++ /* now that we have the eeprom settings, apply the special cases where
++ * the eeprom may be wrong or the board simply won't support wake on
++ * lan on a particular port */
++ switch (pdev->device) {
++ case E1000_DEV_ID_82575GB_QUAD_COPPER:
++ adapter->eeprom_wol = 0;
++ break;
++ case E1000_DEV_ID_82575EB_FIBER_SERDES:
++ case E1000_DEV_ID_82576_FIBER:
++ case E1000_DEV_ID_82576_SERDES:
++ /* Wake events only supported on port A for dual fiber
++ * regardless of eeprom setting */
++ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
++ adapter->eeprom_wol = 0;
++ break;
++ case E1000_DEV_ID_82576_QUAD_COPPER:
++ /* if quad port adapter, disable WoL on all but port A */
++ if (global_quad_port_a != 0)
++ adapter->eeprom_wol = 0;
++ else
++ adapter->flags |= IGB_FLAG_QUAD_PORT_A;
++ /* Reset for multiple quad port adapters */
++ if (++global_quad_port_a == 4)
++ global_quad_port_a = 0;
++ break;
++ }
++
++ /* initialize the wol settings based on the eeprom settings */
++ adapter->wol = adapter->eeprom_wol;
++ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
++
++ /* reset the hardware with the new settings */
++ igb_reset(adapter);
++
++ /* let the f/w know that the h/w is now under the control of the
++ * driver. */
++ igb_get_hw_control(adapter);
++
++ /* tell the stack to leave us alone until igb_open() is called */
++ netif_carrier_off(netdev);
++ netif_tx_stop_all_queues(netdev);
++
++ strncpy(netdev->name, "eth%d", IFNAMSIZ);
++ err = register_netdev(netdev);
++ if (err)
++ goto err_register;
++
++#ifdef IGB_DCA
++ if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) {
++ adapter->flags |= IGB_FLAG_DCA_ENABLED;
++ DPRINTK(PROBE, INFO, "DCA enabled\n");
++ igb_setup_dca(adapter);
++ }
++
++#endif
++#ifdef SIOCSHWTSTAMP
++ switch (hw->mac.type) {
++ case e1000_82576:
++ /*
++ * Initialize hardware timer: we keep it running just in case
++ * that some program needs it later on.
++ */
++ memset(&adapter->cycles, 0, sizeof(adapter->cycles));
++ adapter->cycles.read = igb_read_clock;
++ adapter->cycles.mask = CLOCKSOURCE_MASK(64);
++ adapter->cycles.mult = 1;
++ /**
++ * Scale the NIC clock cycle by a large factor so that
++ * relatively small clock corrections can be added or
++ * substracted at each clock tick. The drawbacks of a large
++ * factor are a) that the clock register overflows more quickly
++ * (not such a big deal) and b) that the increment per tick has
++ * to fit into 24 bits. As a result we need to use a shift of
++ * 19 so we can fit a value of 16 into the TIMINCA register.
++ */
++ adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
++ E1000_WRITE_REG(hw, E1000_TIMINCA,
++ (1 << E1000_TIMINCA_16NS_SHIFT) |
++ (16 << IGB_82576_TSYNC_SHIFT));
++
++ /* Set registers so that rollover occurs soon to test this. */
++ E1000_WRITE_REG(hw, E1000_SYSTIML, 0x00000000);
++ E1000_WRITE_REG(hw, E1000_SYSTIMH, 0xFF800000);
++ E1000_WRITE_FLUSH(hw);
++
++ timecounter_init(&adapter->clock,
++ &adapter->cycles,
++ ktime_to_ns(ktime_get_real()));
++ /*
++ * Synchronize our NIC clock against system wall clock. NIC
++ * time stamp reading requires ~3us per sample, each sample
++ * was pretty stable even under load => only require 10
++ * samples for each offset comparison.
++ */
++ memset(&adapter->compare, 0, sizeof(adapter->compare));
++ adapter->compare.source = &adapter->clock;
++ adapter->compare.target = ktime_get_real;
++ adapter->compare.num_samples = 10;
++ timecompare_update(&adapter->compare, 0);
++ break;
++ case e1000_82575:
++ /* 82575 does not support timesync */
++ default:
++ break;
++ }
++
++#endif /* SIOCSHWTSTAMP */
++ DPRINTK(PROBE, INFO, "Intel(R) Gigabit Ethernet Network Connection\n");
++ /* print bus type/speed/width info */
++ DPRINTK(PROBE, INFO, "(PCIe:%s:%s) ",
++ ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : "unknown"),
++ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
++ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
++ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
++ "unknown"));
++
++ for (i = 0; i < 6; i++)
++ printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
++
++ for (i = 0; i < adapter->vfs_allocated_count; i++)
++ igb_vf_configuration(pdev, (i | 0x10000000));
++
++ DPRINTK(PROBE, INFO,
++ "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
++ adapter->msix_entries ? "MSI-X" :
++ adapter->flags & IGB_FLAG_HAS_MSI ? "MSI" :
++ "legacy",
++ adapter->num_rx_queues, adapter->num_tx_queues);
++
++ cards_found++;
++ return 0;
++
++err_register:
++ igb_release_hw_control(adapter);
++err_eeprom:
++ if (!e1000_check_reset_block(hw))
++ e1000_phy_hw_reset(hw);
++
++ if (hw->flash_address)
++ iounmap(hw->flash_address);
++err_sw_init:
++ igb_clear_interrupt_scheme(adapter);
++ iounmap(hw->hw_addr);
++err_ioremap:
++ free_netdev(netdev);
++err_alloc_etherdev:
++ pci_release_selected_regions(pdev,
++ pci_select_bars(pdev, IORESOURCE_MEM));
++err_pci_reg:
++err_dma:
++ pci_disable_device(pdev);
++ return err;
++}
++
++/**
++ * igb_remove - Device Removal Routine
++ * @pdev: PCI device information struct
++ *
++ * igb_remove is called by the PCI subsystem to alert the driver
++ * that it should release a PCI device. The could be caused by a
++ * Hot-Plug event, or because the driver is going to be removed from
++ * memory.
++ **/
++static void __devexit igb_remove(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++
++ /* flush_scheduled work may reschedule our watchdog task, so
++ * explicitly disable watchdog tasks from being rescheduled */
++ set_bit(__IGB_DOWN, &adapter->state);
++ del_timer_sync(&adapter->watchdog_timer);
++ del_timer_sync(&adapter->phy_info_timer);
++
++ flush_scheduled_work();
++
++
++#ifdef IGB_DCA
++ if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
++ DPRINTK(PROBE, INFO, "DCA disabled\n");
++ dca_remove_requester(&pdev->dev);
++ adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
++ E1000_WRITE_REG(hw, E1000_DCA_CTRL, 1);
++ }
++#endif
++
++ /* Release control of h/w to f/w. If f/w is AMT enabled, this
++ * would have already happened in close and is redundant. */
++ igb_release_hw_control(adapter);
++
++ unregister_netdev(netdev);
++
++ if (!e1000_check_reset_block(hw))
++ e1000_phy_hw_reset(hw);
++
++ igb_clear_interrupt_scheme(adapter);
++
++#ifdef CONFIG_PCI_IOV
++ if (adapter->vf_data) {
++ /* disable iov and allow time for transactions to clear */
++ pci_disable_sriov(pdev);
++ msleep(500);
++
++ kfree(adapter->vf_data);
++ adapter->vf_data = NULL;
++ E1000_WRITE_REG(&adapter->hw, E1000_IOVCTL,
++ E1000_IOVCTL_REUSE_VFQ);
++ msleep(100);
++ dev_info(&adapter->pdev->dev, "IOV Disabled\n");
++ }
++#endif
++
++ iounmap(hw->hw_addr);
++ if (hw->flash_address)
++ iounmap(adapter->hw.flash_address);
++ pci_release_selected_regions(pdev,
++ pci_select_bars(pdev, IORESOURCE_MEM));
++
++ free_netdev(netdev);
++
++ pci_disable_pcie_error_reporting(pdev);
++
++ pci_disable_device(pdev);
++}
++
++/**
++ * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
++ * @adapter: board private structure to initialize
++ *
++ * This function initializes the vf specific data storage and then attempts to
++ * allocate the VFs. The reason for ordering it this way is because it is much
++ * more expensive time wise to disable SR-IOV than it is to allocate and free
++ * the memory for the VFs.
++ **/
++static void __devinit igb_probe_vfs(struct igb_adapter *adapter)
++{
++#ifdef CONFIG_PCI_IOV
++ struct pci_dev *pdev = adapter->pdev;
++
++ if (adapter->vfs_allocated_count) {
++ adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
++ sizeof(struct vf_data_storage),
++ GFP_KERNEL);
++ /* if allocation failed then we do not support SR-IOV */
++ if (!adapter->vf_data) {
++ adapter->vfs_allocated_count = 0;
++ dev_err(&pdev->dev, "Unable to allocate memory for VF "
++ "Data Storage\n");
++ }
++ }
++
++ if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
++ kfree(adapter->vf_data);
++ adapter->vf_data = NULL;
++#endif /* CONFIG_PCI_IOV */
++ adapter->vfs_allocated_count = 0;
++#ifdef CONFIG_PCI_IOV
++ } else {
++ dev_info(&pdev->dev, "IOV1 VFs enabled := %d\n",
++ adapter->vfs_allocated_count);
++ }
++
++#endif /* CONFIG_PCI_IOV */
++}
++/**
++ * igb_sw_init - Initialize general software structures (struct igb_adapter)
++ * @adapter: board private structure to initialize
++ *
++ * igb_sw_init initializes the Adapter private data structure.
++ * Fields are initialized based on PCI device information and
++ * OS network device settings (MTU size).
++ **/
++static int __devinit igb_sw_init(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ struct net_device *netdev = adapter->netdev;
++ struct pci_dev *pdev = adapter->pdev;
++
++ /* PCI config space info */
++
++ hw->vendor_id = pdev->vendor;
++ hw->device_id = pdev->device;
++ hw->subsystem_vendor_id = pdev->subsystem_vendor;
++ hw->subsystem_device_id = pdev->subsystem_device;
++
++ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
++
++ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
++
++ adapter->tx_ring_count = IGB_DEFAULT_TXD;
++ adapter->rx_ring_count = IGB_DEFAULT_RXD;
++ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
++ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
++
++ /* Initialize the hardware-specific values */
++ if (e1000_setup_init_funcs(hw, TRUE)) {
++ DPRINTK(PROBE, ERR, "Hardware Initialization Failure\n");
++ return -EIO;
++ }
++
++ igb_check_options(adapter);
++
++ if (igb_init_interrupt_scheme(adapter)) {
++ DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
++ return -ENOMEM;
++ }
++
++ igb_probe_vfs(adapter);
++
++ /* Explicitly disable IRQ since the NIC can be in any state. */
++ igb_irq_disable(adapter);
++
++ set_bit(__IGB_DOWN, &adapter->state);
++ return 0;
++}
++
++/**
++ * igb_open - Called when a network interface is made active
++ * @netdev: network interface device structure
++ *
++ * Returns 0 on success, negative value on failure
++ *
++ * The open entry point is called when a network interface is made
++ * active by the system (IFF_UP). At this point all resources needed
++ * for transmit and receive operations are allocated, the interrupt
++ * handler is registered with the OS, the watchdog timer is started,
++ * and the stack is notified that the interface is ready.
++ **/
++static int igb_open(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ int err;
++ int i;
++
++ /* disallow open during test */
++ if (test_bit(__IGB_TESTING, &adapter->state))
++ return -EBUSY;
++
++ /* allocate transmit descriptors */
++ err = igb_setup_all_tx_resources(adapter);
++ if (err)
++ goto err_setup_tx;
++
++ /* allocate receive descriptors */
++ err = igb_setup_all_rx_resources(adapter);
++ if (err)
++ goto err_setup_rx;
++
++ /* e1000_power_up_phy(adapter); */
++
++ /* before we allocate an interrupt, we must be ready to handle it.
++ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
++ * as soon as we call pci_request_irq, so we have to setup our
++ * clean_rx handler before we do so. */
++ igb_configure(adapter);
++
++ err = igb_request_irq(adapter);
++ if (err)
++ goto err_req_irq;
++
++ /* From here on the code is the same as igb_up() */
++ clear_bit(__IGB_DOWN, &adapter->state);
++
++ for (i = 0; i < adapter->num_q_vectors; i++) {
++ struct igb_q_vector *q_vector = adapter->q_vector[i];
++ napi_enable(&q_vector->napi);
++ }
++ igb_configure_lli(adapter);
++
++ /* Clear any pending interrupts. */
++ E1000_READ_REG(hw, E1000_ICR);
++
++ igb_irq_enable(adapter);
++
++ /* notify VFs that reset has been completed */
++ if (adapter->vfs_allocated_count) {
++ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ reg_data |= E1000_CTRL_EXT_PFRSTD;
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
++ }
++
++ netif_tx_start_all_queues(netdev);
++
++ /* start the watchdog. */
++ hw->mac.get_link_status = 1;
++ mod_timer(&adapter->watchdog_timer, jiffies + 1);
++
++ return E1000_SUCCESS;
++
++err_req_irq:
++ igb_release_hw_control(adapter);
++ /* e1000_power_down_phy(adapter); */
++ igb_free_all_rx_resources(adapter);
++err_setup_rx:
++ igb_free_all_tx_resources(adapter);
++err_setup_tx:
++ igb_reset(adapter);
++
++ return err;
++}
++
++/**
++ * igb_close - Disables a network interface
++ * @netdev: network interface device structure
++ *
++ * Returns 0, this is not allowed to fail
++ *
++ * The close entry point is called when an interface is de-activated
++ * by the OS. The hardware is still under the driver's control, but
++ * needs to be disabled. A global MAC reset is issued to stop the
++ * hardware, and all transmit and receive resources are freed.
++ **/
++static int igb_close(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++
++ WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
++ igb_down(adapter);
++
++ igb_free_irq(adapter);
++
++ igb_free_all_tx_resources(adapter);
++ igb_free_all_rx_resources(adapter);
++
++ return 0;
++}
++
++/**
++ * igb_setup_tx_resources - allocate Tx resources (Descriptors)
++ * @tx_ring: tx descriptor ring (for a specific queue) to setup
++ *
++ * Return 0 on success, negative on failure
++ **/
++int igb_setup_tx_resources(struct igb_ring *tx_ring)
++{
++ struct pci_dev *pdev = tx_ring->pdev;
++ int size;
++
++ size = sizeof(struct igb_buffer) * tx_ring->count;
++ tx_ring->buffer_info = vmalloc(size);
++ if (!tx_ring->buffer_info)
++ goto err;
++ memset(tx_ring->buffer_info, 0, size);
++
++ /* round up to nearest 4K */
++ tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
++ tx_ring->size = ALIGN(tx_ring->size, 4096);
++
++ tx_ring->desc = pci_alloc_consistent(pdev,
++ tx_ring->size,
++ &tx_ring->dma);
++
++ if (!tx_ring->desc)
++ goto err;
++
++ tx_ring->next_to_use = 0;
++ tx_ring->next_to_clean = 0;
++ return 0;
++
++err:
++ vfree(tx_ring->buffer_info);
++ dev_err(&pdev->dev, "Unable to allocate memory for the "
++ "transmit descriptor ring\n");
++ return -ENOMEM;
++}
++
++/**
++ * igb_setup_all_tx_resources - wrapper to allocate Tx resources
++ * (Descriptors) for all queues
++ * @adapter: board private structure
++ *
++ * Return 0 on success, negative on failure
++ **/
++static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
++{
++ int i, err = 0;
++
++ for (i = 0; i < adapter->num_tx_queues; i++) {
++ err = igb_setup_tx_resources(&adapter->tx_ring[i]);
++ if (err) {
++ DPRINTK(PROBE, ERR,
++ "Allocation for Tx Queue %u failed\n", i);
++ for (i--; i >= 0; i--)
++ igb_free_tx_resources(&adapter->tx_ring[i]);
++ break;
++ }
++ }
++
++#ifdef HAVE_TX_MQ
++ for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
++#ifdef CONFIG_NETDEVICES_MULTIQUEUE
++ int r_idx = i % adapter->netdev->egress_subqueue_count;
++#else
++ int r_idx = i % adapter->netdev->real_num_tx_queues;
++#endif
++ adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
++ }
++#endif
++ return err;
++}
++
++/**
++ * igb_setup_tctl - configure the transmit control registers
++ * @adapter: Board private structure
++ **/
++void igb_setup_tctl(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 tctl;
++
++ /* disable queue 0 which is enabled by default on 82575 and 82576 */
++ E1000_WRITE_REG(hw, E1000_TXDCTL(0), 0);
++
++ /* Program the Transmit Control Register */
++ tctl = E1000_READ_REG(hw, E1000_TCTL);
++ tctl &= ~E1000_TCTL_CT;
++ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
++ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
++
++ e1000_config_collision_dist(hw);
++
++ /* Enable transmits */
++ tctl |= E1000_TCTL_EN;
++
++ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
++}
++
++/**
++ * igb_configure_tx_ring - Configure transmit ring after Reset
++ * @adapter: board private structure
++ * @ring: tx ring to configure
++ *
++ * Configure a transmit ring after a reset.
++ **/
++void igb_configure_tx_ring(struct igb_adapter *adapter,
++ struct igb_ring *ring)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 txdctl;
++ u64 tdba = ring->dma;
++ int reg_idx = ring->reg_idx;
++
++ /* disable the queue */
++ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(reg_idx));
++ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx),
++ txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
++ E1000_WRITE_FLUSH(hw);
++ mdelay(10);
++
++ E1000_WRITE_REG(hw, E1000_TDLEN(reg_idx),
++ ring->count * sizeof(struct e1000_tx_desc));
++ E1000_WRITE_REG(hw, E1000_TDBAL(reg_idx),
++ tdba & 0x00000000ffffffffULL);
++ E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32);
++
++ ring->head = hw->hw_addr + E1000_TDH(reg_idx);
++ ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
++ writel(0, ring->head);
++ writel(0, ring->tail);
++
++ txdctl |= IGB_TX_PTHRESH;
++ txdctl |= IGB_TX_HTHRESH << 8;
++ txdctl |= IGB_TX_WTHRESH << 16;
++
++ txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
++ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl);
++}
++
++/**
++ * igb_configure_tx - Configure transmit Unit after Reset
++ * @adapter: board private structure
++ *
++ * Configure the Tx unit of the MAC after a reset.
++ **/
++static void igb_configure_tx(struct igb_adapter *adapter)
++{
++ int i;
++
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
++
++}
++
++/**
++ * igb_setup_rx_resources - allocate Rx resources (Descriptors)
++ * @rx_ring: rx descriptor ring (for a specific queue) to setup
++ *
++ * Returns 0 on success, negative on failure
++ **/
++int igb_setup_rx_resources(struct igb_ring *rx_ring)
++{
++ struct pci_dev *pdev = rx_ring->pdev;
++ int size, desc_len;
++
++#ifdef IGB_LRO
++ size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
++ rx_ring->lro_mgr.lro_arr = vmalloc(size);
++ if (!rx_ring->lro_mgr.lro_arr)
++ goto err;
++ memset(rx_ring->lro_mgr.lro_arr, 0, size);
++#endif /* IGB_LRO */
++
++ size = sizeof(struct igb_buffer) * rx_ring->count;
++ rx_ring->buffer_info = vmalloc(size);
++ if (!rx_ring->buffer_info)
++ goto err;
++ memset(rx_ring->buffer_info, 0, size);
++
++ desc_len = sizeof(union e1000_adv_rx_desc);
++
++ /* Round up to nearest 4K */
++ rx_ring->size = rx_ring->count * desc_len;
++ rx_ring->size = ALIGN(rx_ring->size, 4096);
++
++ rx_ring->desc = pci_alloc_consistent(pdev,
++ rx_ring->size,
++ &rx_ring->dma);
++
++ if (!rx_ring->desc)
++ goto err;
++
++ rx_ring->next_to_clean = 0;
++ rx_ring->next_to_use = 0;
++
++
++ return 0;
++
++err:
++#ifdef IGB_LRO
++ vfree(rx_ring->lro_mgr.lro_arr);
++ rx_ring->lro_mgr.lro_arr = NULL;
++#endif
++ vfree(rx_ring->buffer_info);
++ rx_ring->buffer_info = NULL;
++ dev_err(&pdev->dev, "Unable to allocate memory for the "
++ "receive descriptor ring\n");
++ return -ENOMEM;
++}
++
++/**
++ * igb_setup_all_rx_resources - wrapper to allocate Rx resources
++ * (Descriptors) for all queues
++ * @adapter: board private structure
++ *
++ * Return 0 on success, negative on failure
++ **/
++static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
++{
++ int i, err = 0;
++
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ err = igb_setup_rx_resources(&adapter->rx_ring[i]);
++ if (err) {
++ DPRINTK(PROBE, ERR,
++ "Allocation for Rx Queue %u failed\n", i);
++ for (i--; i >= 0; i--)
++ igb_free_rx_resources(&adapter->rx_ring[i]);
++ break;
++ }
++ }
++
++ return err;
++}
++
++/**
++ * igb_setup_mrqc - configure the multiple receive queue control registers
++ * @adapter: Board private structure
++ **/
++static void igb_setup_mrqc(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 mrqc, rxcsum;
++ u32 j, num_rx_queues, shift = 0, shift2 = 0;
++ union e1000_reta {
++ u32 dword;
++ u8 bytes[4];
++ } reta;
++ static const u8 rsshash[40] = {
++ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
++ 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
++ 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
++ 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
++
++ /* Fill out hash function seeds */
++ for (j = 0; j < 10; j++) {
++ u32 rsskey = rsshash[(j * 4)];
++ rsskey |= rsshash[(j * 4) + 1] << 8;
++ rsskey |= rsshash[(j * 4) + 2] << 16;
++ rsskey |= rsshash[(j * 4) + 3] << 24;
++ E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), j, rsskey);
++ }
++
++ num_rx_queues = adapter->RSS_queues;
++
++ if (adapter->vfs_allocated_count || adapter->VMDQ_queues) {
++ /* 82575 and 82576 supports 2 RSS queues for VMDq */
++ switch (hw->mac.type) {
++ case e1000_82576:
++ shift = 3;
++ num_rx_queues = 2;
++ break;
++ case e1000_82575:
++ shift = 2;
++ shift2 = 6;
++ default:
++ break;
++ }
++ } else {
++ if (hw->mac.type == e1000_82575)
++ shift = 6;
++ }
++
++ for (j = 0; j < (32 * 4); j++) {
++ reta.bytes[j & 3] = (j % num_rx_queues) << shift;
++ if (shift2)
++ reta.bytes[j & 3] |= num_rx_queues << shift2;
++ if ((j & 3) == 3)
++ E1000_WRITE_REG(hw, E1000_RETA(j >> 2), reta.dword);
++ }
++
++ /*
++ * Disable raw packet checksumming so that RSS hash is placed in
++ * descriptor on writeback. No need to enable TCP/UDP/IP checksum
++ * offloads as they are enabled by default
++ */
++ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
++ rxcsum |= E1000_RXCSUM_PCSD;
++
++ if (adapter->hw.mac.type >= e1000_82576)
++ /* Enable Receive Checksum Offload for SCTP */
++ rxcsum |= E1000_RXCSUM_CRCOFL;
++
++ /* Don't need to set TUOFL or IPOFL, they default to 1 */
++ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
++
++ /* If VMDq is enabled then we set the appropriate mode for that, else
++ * we default to RSS so that an RSS hash is calculated per packet even
++ * if we are only using one queue */
++ if (adapter->vfs_allocated_count || adapter->VMDQ_queues) {
++ if (hw->mac.type > e1000_82575) {
++ /* Set the default pool for the PF's first queue */
++ u32 vtctl = E1000_READ_REG(hw, E1000_VT_CTL);
++ vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
++ E1000_VT_CTL_DISABLE_DEF_POOL);
++ vtctl |= adapter->vfs_allocated_count <<
++ E1000_VT_CTL_DEFAULT_POOL_SHIFT;
++ E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl);
++ } else if (adapter->RSS_queues > 1) {
++ /* set default queue for pool 1 to queue 2 */
++ E1000_WRITE_REG(hw, E1000_VT_CTL,
++ adapter->RSS_queues << 7);
++ }
++ if (adapter->RSS_queues > 1)
++ mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
++ else
++ mrqc = E1000_MRQC_ENABLE_VMDQ;
++ } else {
++ mrqc = E1000_MRQC_ENABLE_RSS_4Q;
++ }
++ igb_vmm_control(adapter);
++
++ mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
++ E1000_MRQC_RSS_FIELD_IPV4_TCP);
++ mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
++ E1000_MRQC_RSS_FIELD_IPV6_TCP);
++ mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
++ E1000_MRQC_RSS_FIELD_IPV6_UDP);
++ mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
++ E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
++
++ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
++}
++
++/**
++ * igb_setup_rctl - configure the receive control registers
++ * @adapter: Board private structure
++ **/
++void igb_setup_rctl(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 rctl;
++
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
++
++ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
++ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
++
++ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
++ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
++
++ /*
++ * enable stripping of CRC. It's unlikely this will break BMC
++ * redirection as it did with e1000. Newer features require
++ * that the HW strips the CRC.
++ */
++ rctl |= E1000_RCTL_SECRC;
++
++
++ /* disable store bad packets and clear size bits. */
++ rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
++
++ /* enable LPE to prevent packets larger than max_frame_size */
++ rctl |= E1000_RCTL_LPE;
++
++ /* disable rx queue 0 which is enabled by default on 82575 and 82576 */
++ E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0);
++
++ /* Attention!!! For SR-IOV PF driver operations you must enable
++ * queue drop for all VF and PF queues to prevent head of line blocking
++ * if an un-trusted VF does not provide descriptors to hardware.
++ */
++ if (adapter->vfs_allocated_count) {
++ /* set all queue drop enable bits */
++ E1000_WRITE_REG(hw, E1000_QDE, 0xFF);
++
++ }
++
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
++}
++
++static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, int vfn)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 vmolr;
++
++ /* if it isn't the PF check to see if VFs are enabled and
++ * increase the size to support vlan tags */
++ if (vfn < adapter->vfs_allocated_count &&
++ adapter->vf_data[vfn].vlans_enabled)
++ size += VLAN_TAG_SIZE;
++
++ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
++ vmolr &= ~E1000_VMOLR_RLPML_MASK;
++ vmolr |= size | E1000_VMOLR_LPE;
++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
++
++ return 0;
++}
++
++
++/**
++ * igb_set_rlpml - set receive large packet maximum length
++ * @adapter: board private structure
++ *
++ * Configure the maximum size of packets that will be received
++ */
++static void igb_set_rlpml(struct igb_adapter *adapter)
++{
++ int max_frame_size = adapter->max_frame_size;
++ struct e1000_hw *hw = &adapter->hw;
++ u16 pf_id = adapter->vfs_allocated_count;
++
++ if (adapter->vlgrp)
++ max_frame_size += VLAN_TAG_SIZE;
++ if (adapter->VMDQ_queues) {
++ int i;
++ for (i = 0; i < adapter->VMDQ_queues; i++)
++ igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i);
++ max_frame_size = MAX_JUMBO_FRAME_SIZE;
++ }
++ E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size);
++
++}
++
++static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 vmolr;
++
++ /*
++ * This register exists only on 82576 and newer so if we are older then
++ * we should exit and do nothing
++ */
++ if (hw->mac.type < e1000_82576)
++ return;
++
++ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
++ vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
++ E1000_VMOLR_STRVLAN; /* Strip vlan tags */
++
++ /* clear all bits that might not be set */
++ vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
++
++ if (adapter->RSS_queues > 1 && vfn == adapter->vfs_allocated_count)
++ vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
++ /*
++ * for VMDq only allow the VFs and pool 0 to accept broadcast and
++ * multicast packets
++ */
++ if (vfn <= adapter->vfs_allocated_count)
++ vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
++
++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
++}
++
++/**
++ * igb_configure_rx_ring - Configure a receive ring after Reset
++ * @adapter: board private structure
++ * @ring: receive ring to be configured
++ *
++ * Configure the Rx unit of the MAC after a reset.
++ **/
++void igb_configure_rx_ring(struct igb_adapter *adapter,
++ struct igb_ring *ring)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u64 rdba = ring->dma;
++ int reg_idx = ring->reg_idx;
++ u32 srrctl, rxdctl;
++
++ /* disable the queue */
++ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(reg_idx));
++ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx),
++ rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
++
++ /* Set DMA base address registers */
++ E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx),
++ rdba & 0x00000000ffffffffULL);
++ E1000_WRITE_REG(hw, E1000_RDBAH(reg_idx), rdba >> 32);
++ E1000_WRITE_REG(hw, E1000_RDLEN(reg_idx),
++ ring->count * sizeof(union e1000_adv_rx_desc));
++
++ /* initialize head and tail */
++ ring->head = hw->hw_addr + E1000_RDH(reg_idx);
++ ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
++ writel(0, ring->head);
++ writel(0, ring->tail);
++
++ /* set descriptor configuration */
++ srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
++ E1000_SRRCTL_BSIZEPKT_SHIFT;
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ srrctl |= ALIGN(ring->rx_ps_hdr_size, 64) <<
++ E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
++ if (ring->rx_ps_hdr_size)
++ srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
++ else
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
++ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
++
++ E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl);
++
++ /* set filtering for VMDQ pools */
++ igb_set_vmolr(adapter, reg_idx & 0x7);
++
++ /* enable receive descriptor fetching */
++ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(reg_idx));
++ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
++ rxdctl &= 0xFFF00000;
++ rxdctl |= IGB_RX_PTHRESH;
++ rxdctl |= IGB_RX_HTHRESH << 8;
++ rxdctl |= IGB_RX_WTHRESH << 16;
++ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl);
++}
++
++static inline void igb_set_vlan_stripping(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 reg;
++
++ /* enable replication vlan tag stripping */
++ reg = E1000_READ_REG(hw, E1000_RPLOLR);
++ reg |= E1000_RPLOLR_STRVLAN;
++ E1000_WRITE_REG(hw, E1000_RPLOLR, reg);
++
++ /* notify HW that the MAC is adding vlan tags */
++ reg = E1000_READ_REG(hw, E1000_DTXCTL);
++ reg |= E1000_DTXCTL_VLAN_ADDED;
++ E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
++}
++
++/**
++ * igb_configure_rx - Configure receive Unit after Reset
++ * @adapter: board private structure
++ *
++ * Configure the Rx unit of the MAC after a reset.
++ **/
++static void igb_configure_rx(struct igb_adapter *adapter)
++{
++ int i;
++
++ /* enable vlan tag stripping for replicated packets */
++ igb_set_vlan_stripping(adapter);
++
++ /* set UTA to appropriate mode */
++ igb_set_uta(adapter);
++
++ /* set the correct pool for the PF default MAC address in entry 0 */
++ igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
++ adapter->vfs_allocated_count);
++
++ /* Setup the HW Rx Head and Tail Descriptor Pointers and
++ * the Base and Length of the Rx Descriptor Ring */
++ for (i = 0; i < adapter->num_rx_queues; i++)
++ igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
++}
++
++/**
++ * igb_free_tx_resources - Free Tx Resources per Queue
++ * @tx_ring: Tx descriptor ring for a specific queue
++ *
++ * Free all transmit software resources
++ **/
++void igb_free_tx_resources(struct igb_ring *tx_ring)
++{
++ igb_clean_tx_ring(tx_ring);
++
++ vfree(tx_ring->buffer_info);
++ tx_ring->buffer_info = NULL;
++
++ /* if not set, then don't free */
++ if (!tx_ring->desc)
++ return;
++
++ pci_free_consistent(tx_ring->pdev, tx_ring->size,
++ tx_ring->desc, tx_ring->dma);
++
++ tx_ring->desc = NULL;
++}
++
++/**
++ * igb_free_all_tx_resources - Free Tx Resources for All Queues
++ * @adapter: board private structure
++ *
++ * Free all transmit software resources
++ **/
++static void igb_free_all_tx_resources(struct igb_adapter *adapter)
++{
++ int i;
++
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ igb_free_tx_resources(&adapter->tx_ring[i]);
++}
++
++static void igb_unmap_and_free_tx_resource(struct pci_dev *pdev,
++ struct igb_buffer *buffer_info)
++{
++ if (buffer_info->page_dma) {
++ pci_unmap_page(pdev,
++ buffer_info->page_dma,
++ buffer_info->length,
++ PCI_DMA_TODEVICE);
++ buffer_info->page_dma = 0;
++ }
++ if (buffer_info->dma) {
++ pci_unmap_single(pdev,
++ buffer_info->dma,
++ buffer_info->length,
++ PCI_DMA_TODEVICE);
++ buffer_info->dma = 0;
++ }
++ if (buffer_info->skb) {
++ dev_kfree_skb_any(buffer_info->skb);
++ buffer_info->skb = NULL;
++ }
++ buffer_info->time_stamp = 0;
++ buffer_info->next_to_watch = 0;
++ /* buffer_info must be completely set up in the transmit path */
++}
++
++/**
++ * igb_clean_tx_ring - Free Tx Buffers
++ * @tx_ring: ring to be cleaned
++ **/
++static void igb_clean_tx_ring(struct igb_ring *tx_ring)
++{
++ struct igb_buffer *buffer_info;
++ unsigned long size;
++ unsigned int i;
++
++ if (!tx_ring->buffer_info)
++ return;
++ /* Free all the Tx ring sk_buffs */
++
++ for (i = 0; i < tx_ring->count; i++) {
++ buffer_info = &tx_ring->buffer_info[i];
++ igb_unmap_and_free_tx_resource(tx_ring->pdev, buffer_info);
++ }
++
++ size = sizeof(struct igb_buffer) * tx_ring->count;
++ memset(tx_ring->buffer_info, 0, size);
++
++ /* Zero out the descriptor ring */
++ memset(tx_ring->desc, 0, tx_ring->size);
++
++ tx_ring->next_to_use = 0;
++ tx_ring->next_to_clean = 0;
++}
++
++/**
++ * igb_clean_all_tx_rings - Free Tx Buffers for all queues
++ * @adapter: board private structure
++ **/
++static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
++{
++ int i;
++
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ igb_clean_tx_ring(&adapter->tx_ring[i]);
++}
++
++/**
++ * igb_free_rx_resources - Free Rx Resources
++ * @rx_ring: ring to clean the resources from
++ *
++ * Free all receive software resources
++ **/
++void igb_free_rx_resources(struct igb_ring *rx_ring)
++{
++ igb_clean_rx_ring(rx_ring);
++
++ vfree(rx_ring->buffer_info);
++ rx_ring->buffer_info = NULL;
++
++#ifdef IGB_LRO
++ vfree(rx_ring->lro_mgr.lro_arr);
++ rx_ring->lro_mgr.lro_arr = NULL;
++#endif /* IGB_LRO */
++
++ /* if not set, then don't free */
++ if (!rx_ring->desc)
++ return;
++
++ pci_free_consistent(rx_ring->pdev, rx_ring->size,
++ rx_ring->desc, rx_ring->dma);
++
++ rx_ring->desc = NULL;
++}
++
++/**
++ * igb_free_all_rx_resources - Free Rx Resources for All Queues
++ * @adapter: board private structure
++ *
++ * Free all receive software resources
++ **/
++static void igb_free_all_rx_resources(struct igb_adapter *adapter)
++{
++ int i;
++
++ for (i = 0; i < adapter->num_rx_queues; i++)
++ igb_free_rx_resources(&adapter->rx_ring[i]);
++}
++
++/**
++ * igb_clean_rx_ring - Free Rx Buffers per Queue
++ * @rx_ring: ring to free buffers from
++ **/
++static void igb_clean_rx_ring(struct igb_ring *rx_ring)
++{
++ struct igb_buffer *buffer_info;
++ unsigned long size;
++ unsigned int i;
++
++ if (!rx_ring->buffer_info)
++ return;
++
++ /* Free all the Rx ring sk_buffs */
++ for (i = 0; i < rx_ring->count; i++) {
++ buffer_info = &rx_ring->buffer_info[i];
++ if (buffer_info->dma) {
++ if (rx_ring->rx_ps_hdr_size)
++ pci_unmap_single(rx_ring->pdev,
++ buffer_info->dma,
++ rx_ring->rx_ps_hdr_size,
++ PCI_DMA_FROMDEVICE);
++ else
++ pci_unmap_single(rx_ring->pdev,
++ buffer_info->dma,
++ rx_ring->rx_buffer_len,
++ PCI_DMA_FROMDEVICE);
++ buffer_info->dma = 0;
++ }
++
++ if (buffer_info->skb) {
++ dev_kfree_skb(buffer_info->skb);
++ buffer_info->skb = NULL;
++ }
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ if (buffer_info->page) {
++ if (buffer_info->page_dma)
++ pci_unmap_page(rx_ring->pdev,
++ buffer_info->page_dma,
++ rx_ring->rx_buffer_len,
++ PCI_DMA_FROMDEVICE);
++ put_page(buffer_info->page);
++ buffer_info->page = NULL;
++ buffer_info->page_dma = 0;
++ buffer_info->page_offset = 0;
++ }
++#endif
++ }
++
++ size = sizeof(struct igb_buffer) * rx_ring->count;
++ memset(rx_ring->buffer_info, 0, size);
++
++ /* Zero out the descriptor ring */
++ memset(rx_ring->desc, 0, rx_ring->size);
++
++ rx_ring->next_to_clean = 0;
++ rx_ring->next_to_use = 0;
++}
++
++/**
++ * igb_clean_all_rx_rings - Free Rx Buffers for all queues
++ * @adapter: board private structure
++ **/
++static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
++{
++ int i;
++
++ for (i = 0; i < adapter->num_rx_queues; i++)
++ igb_clean_rx_ring(&adapter->rx_ring[i]);
++}
++
++/**
++ * igb_set_mac - Change the Ethernet Address of the NIC
++ * @netdev: network interface device structure
++ * @p: pointer to an address structure
++ *
++ * Returns 0 on success, negative on failure
++ **/
++static int igb_set_mac(struct net_device *netdev, void *p)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ struct sockaddr *addr = p;
++
++ if (!is_valid_ether_addr(addr->sa_data))
++ return -EADDRNOTAVAIL;
++
++ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
++ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
++
++ /* set the correct pool for the new PF MAC address in entry 0 */
++ igb_rar_set_qsel(adapter, hw->mac.addr, 0,
++ adapter->vfs_allocated_count);
++
++ return 0;
++}
++
++/**
++ * igb_write_mc_addr_list - write multicast addresses to MTA
++ * @netdev: network interface device structure
++ *
++ * Writes multicast address list to the MTA hash table.
++ * Returns: -ENOMEM on failure
++ * 0 on no addresses written
++ * X on writing X addresses to MTA
++ **/
++static int igb_write_mc_addr_list(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ struct dev_mc_list *mc_ptr = netdev->mc_list;
++ u8 *mta_list;
++ u32 vmolr = 0;
++ int i;
++
++ if (!netdev->mc_count) {
++ /* nothing to program, so clear mc list */
++ e1000_update_mc_addr_list(hw, NULL, 0);
++ igb_restore_vf_multicasts(adapter);
++ return 0;
++ }
++
++ mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
++ if (!mta_list)
++ return -ENOMEM;
++
++ /* set vmolr receive overflow multicast bit */
++ vmolr |= E1000_VMOLR_ROMPE;
++
++ /* The shared function expects a packed array of only addresses. */
++ mc_ptr = netdev->mc_list;
++
++ for (i = 0; i < netdev->mc_count; i++) {
++ if (!mc_ptr)
++ break;
++ memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
++ mc_ptr = mc_ptr->next;
++ }
++ e1000_update_mc_addr_list(hw, mta_list, i);
++ kfree(mta_list);
++
++ return netdev->mc_count;
++}
++
++#ifdef HAVE_SET_RX_MODE
++/**
++ * igb_write_uc_addr_list - write unicast addresses to RAR table
++ * @netdev: network interface device structure
++ *
++ * Writes unicast address list to the RAR table.
++ * Returns: -ENOMEM on failure/insufficient address space
++ * 0 on no addresses written
++ * X on writing X addresses to the RAR table
++ **/
++static int igb_write_uc_addr_list(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ unsigned int vfn = adapter->vfs_allocated_count;
++ unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
++#ifndef HAVE_NETDEV_HW_ADDR
++ struct dev_mc_list *uc_ptr = netdev->uc_list;
++#endif
++ int count = 0;
++
++ /* return ENOMEM indicating insufficient memory for addresses */
++#ifndef HAVE_NETDEV_HW_ADDR
++ if (netdev->uc_count > rar_entries)
++#else
++ if (netdev->uc.count > rar_entries)
++#endif
++ return -ENOMEM;
++
++#ifdef HAVE_NETDEV_HW_ADDR
++ if (netdev->uc.count && rar_entries) {
++ struct netdev_hw_addr *ha;
++ list_for_each_entry(ha, &netdev->uc.list, list) {
++ if (!rar_entries)
++ break;
++ igb_rar_set_qsel(adapter, ha->addr,
++ rar_entries--,
++ vfn);
++ count++;
++ }
++ }
++#else
++ while (uc_ptr) {
++ igb_rar_set_qsel(adapter, uc_ptr->da_addr,
++ rar_entries--, vfn);
++ uc_ptr = uc_ptr->next;
++ count++;
++ }
++#endif
++ /* write the addresses in reverse order to avoid write combining */
++ for (; rar_entries > 0 ; rar_entries--) {
++ E1000_WRITE_REG(hw, E1000_RAH(rar_entries), 0);
++ E1000_WRITE_REG(hw, E1000_RAL(rar_entries), 0);
++ }
++ E1000_WRITE_FLUSH(hw);
++
++ return count;
++}
++
++#endif
++/**
++ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
++ * @netdev: network interface device structure
++ *
++ * The set_rx_mode entry point is called whenever the unicast or multicast
++ * address lists or the network interface flags are updated. This routine is
++ * responsible for configuring the hardware for proper unicast, multicast,
++ * promiscuous mode, and all-multi behavior.
++ **/
++static void igb_set_rx_mode(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ unsigned int vfn = adapter->vfs_allocated_count;
++ u32 rctl, vmolr = 0;
++ int count;
++
++ /* Check for Promiscuous and All Multicast modes */
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
++
++ /* clear the effected bits */
++ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
++
++ if (netdev->flags & IFF_PROMISC) {
++ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
++ vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
++ } else {
++ if (netdev->flags & IFF_ALLMULTI) {
++ rctl |= E1000_RCTL_MPE;
++ vmolr |= E1000_VMOLR_MPME;
++ } else {
++ /*
++ * Write addresses to the MTA, if the attempt fails
++ * then we should just turn on promiscous mode so
++ * that we can at least receive multicast traffic
++ */
++ count = igb_write_mc_addr_list(netdev);
++ if (count < 0) {
++ rctl |= E1000_RCTL_MPE;
++ vmolr |= E1000_VMOLR_MPME;
++ } else if (count) {
++ vmolr |= E1000_VMOLR_ROMPE;
++ }
++ }
++#ifdef HAVE_SET_RX_MODE
++ /*
++ * Write addresses to available RAR registers, if there is not
++ * sufficient space to store all the addresses then enable
++ * unicast promiscous mode
++ */
++ count = igb_write_uc_addr_list(netdev);
++ if (count < 0) {
++ rctl |= E1000_RCTL_UPE;
++ vmolr |= E1000_VMOLR_ROPE;
++ }
++#endif
++ rctl |= E1000_RCTL_VFE;
++ }
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
++
++ /*
++ * In order to support SR-IOV and eventually VMDq it is necessary to set
++ * the VMOLR to enable the appropriate modes. Without this workaround
++ * we will have issues with VLAN tag stripping not being done for frames
++ * that are only arriving because we are the default pool
++ */
++ if (hw->mac.type < e1000_82576)
++ return;
++
++ vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) &
++ ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
++ igb_restore_vf_multicasts(adapter);
++}
++
++/* Need to wait a few seconds after link up to get diagnostic information from
++ * the phy */
++static void igb_update_phy_info(unsigned long data)
++{
++ struct igb_adapter *adapter = (struct igb_adapter *) data;
++ e1000_get_phy_info(&adapter->hw);
++}
++
++/**
++ * igb_has_link - check shared code for link and determine up/down
++ * @adapter: pointer to driver private info
++ **/
++static bool igb_has_link(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ bool link_active = FALSE;
++ s32 ret_val = 0;
++
++ /* get_link_status is set on LSC (link status) interrupt or
++ * rx sequence error interrupt. get_link_status will stay
++ * false until the e1000_check_for_link establishes link
++ * for copper adapters ONLY
++ */
++ switch (hw->phy.media_type) {
++ case e1000_media_type_copper:
++ if (hw->mac.get_link_status) {
++ ret_val = e1000_check_for_link(hw);
++ link_active = !hw->mac.get_link_status;
++ } else {
++ link_active = TRUE;
++ }
++ break;
++ case e1000_media_type_internal_serdes:
++ ret_val = e1000_check_for_link(hw);
++ link_active = hw->mac.serdes_has_link;
++ break;
++ default:
++ case e1000_media_type_unknown:
++ break;
++ }
++
++ return link_active;
++}
++
++/**
++ * igb_watchdog - Timer Call-back
++ * @data: pointer to adapter cast into an unsigned long
++ **/
++static void igb_watchdog(unsigned long data)
++{
++ struct igb_adapter *adapter = (struct igb_adapter *)data;
++ /* Do the rest outside of interrupt context */
++ schedule_work(&adapter->watchdog_task);
++}
++
++static void igb_watchdog_task(struct work_struct *work)
++{
++ struct igb_adapter *adapter = container_of(work,
++ struct igb_adapter, watchdog_task);
++ struct e1000_hw *hw = &adapter->hw;
++ struct net_device *netdev = adapter->netdev;
++ struct igb_ring *tx_ring = adapter->tx_ring;
++ u32 link;
++ int i;
++
++ link = igb_has_link(adapter);
++
++ if (link) {
++ if (!netif_carrier_ok(netdev)) {
++ u32 ctrl;
++ e1000_get_speed_and_duplex(hw, &adapter->link_speed,
++ &adapter->link_duplex);
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
++ "Flow Control: %s\n",
++ adapter->link_speed,
++ adapter->link_duplex == FULL_DUPLEX ?
++ "Full Duplex" : "Half Duplex",
++ ((ctrl & E1000_CTRL_TFCE) && (ctrl &
++ E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
++ E1000_CTRL_RFCE) ? "RX" : ((ctrl &
++ E1000_CTRL_TFCE) ? "TX" : "None")));
++
++ /* tweak tx_queue_len according to speed/duplex and
++ * adjust the timeout factor */
++ netdev->tx_queue_len = adapter->tx_queue_len;
++ adapter->tx_timeout_factor = 1;
++ switch (adapter->link_speed) {
++ case SPEED_10:
++ netdev->tx_queue_len = 10;
++ adapter->tx_timeout_factor = 14;
++ break;
++ case SPEED_100:
++ netdev->tx_queue_len = 100;
++ /* maybe add some timeout factor ? */
++ break;
++ }
++
++ netif_carrier_on(netdev);
++ netif_tx_wake_all_queues(netdev);
++
++ igb_ping_all_vfs(adapter);
++
++ /* link state has changed, schedule phy info update */
++ if (!test_bit(__IGB_DOWN, &adapter->state))
++ mod_timer(&adapter->phy_info_timer,
++ round_jiffies(jiffies + 2 * HZ));
++ }
++ } else {
++ if (netif_carrier_ok(netdev)) {
++ adapter->link_speed = 0;
++ adapter->link_duplex = 0;
++ DPRINTK(LINK, INFO, "NIC Link is Down\n");
++ netif_carrier_off(netdev);
++ netif_tx_stop_all_queues(netdev);
++
++ igb_ping_all_vfs(adapter);
++
++ /* link state has changed, schedule phy info update */
++ if (!test_bit(__IGB_DOWN, &adapter->state))
++ mod_timer(&adapter->phy_info_timer,
++ round_jiffies(jiffies + 2 * HZ));
++ }
++ }
++
++ igb_update_stats(adapter);
++
++ if (!netif_carrier_ok(netdev)) {
++ if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
++ /* We've lost link, so the controller stops DMA,
++ * but we've got queued Tx work that's never going
++ * to get done, so reset controller to flush Tx.
++ * (Do the reset outside of interrupt context). */
++ adapter->tx_timeout_count++;
++ schedule_work(&adapter->reset_task);
++ }
++ }
++
++ /* Force detection of hung controller every watchdog period */
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ adapter->tx_ring[i].detect_tx_hung = TRUE;
++
++ /* Cause software interrupt to ensure rx ring is cleaned */
++ if (adapter->msix_entries) {
++ u32 eics = 0;
++ for (i = 0; i < adapter->num_q_vectors; i++) {
++ struct igb_q_vector *q_vector = adapter->q_vector[i];
++ eics |= q_vector->eims_value;
++ }
++ E1000_WRITE_REG(hw, E1000_EICS, eics);
++ } else {
++ E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0);
++ }
++
++ /* Reset the timer */
++ if (!test_bit(__IGB_DOWN, &adapter->state))
++ mod_timer(&adapter->watchdog_timer,
++ round_jiffies(jiffies + 2 * HZ));
++}
++
++enum latency_range {
++ lowest_latency = 0,
++ low_latency = 1,
++ bulk_latency = 2,
++ latency_invalid = 255
++};
++
++
++/**
++ * igb_update_ring_itr - update the dynamic ITR value based on packet size
++ *
++ * Stores a new ITR value based on strictly on packet size. This
++ * algorithm is less sophisticated than that used in igb_update_itr,
++ * due to the difficulty of synchronizing statistics across multiple
++ * receive rings. The divisors and thresholds used by this fuction
++ * were determined based on theoretical maximum wire speed and testing
++ * data, in order to minimize response time while increasing bulk
++ * throughput.
++ * This functionality is controlled by the InterruptThrottleRate module
++ * parameter (see igb_param.c)
++ * NOTE: This function is called only when operating in a multiqueue
++ * receive environment.
++ * @q_vector: pointer to q_vector
++ **/
++static void igb_update_ring_itr(struct igb_q_vector *q_vector)
++{
++ int new_val = q_vector->itr_val;
++ int avg_wire_size = 0;
++ struct igb_adapter *adapter = q_vector->adapter;
++
++ /* For non-gigabit speeds, just fix the interrupt rate at 4000
++ * ints/sec - ITR timer value of 120 ticks.
++ */
++ if (adapter->link_speed != SPEED_1000) {
++ new_val = 976;
++ goto set_itr_val;
++ }
++
++ if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
++ struct igb_ring *ring = q_vector->rx_ring;
++ avg_wire_size = ring->total_bytes / ring->total_packets;
++ }
++
++ if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
++ struct igb_ring *ring = q_vector->tx_ring;
++ avg_wire_size = max_t(u32, avg_wire_size,
++ (ring->total_bytes /
++ ring->total_packets));
++ }
++
++ /* if avg_wire_size isn't set no work was done */
++ if (!avg_wire_size)
++ goto clear_counts;
++
++ /* Add 24 bytes to size to account for CRC, preamble, and gap */
++ avg_wire_size += 24;
++
++ /* Don't starve jumbo frames */
++ avg_wire_size = min(avg_wire_size, 3000);
++
++ /* Give a little boost to mid-size frames */
++ if ((avg_wire_size > 300) && (avg_wire_size < 1200))
++ new_val = avg_wire_size / 3;
++ else
++ new_val = avg_wire_size / 2;
++
++set_itr_val:
++ if (new_val != q_vector->itr_val) {
++ q_vector->itr_val = new_val;
++ q_vector->set_itr = 1;
++ }
++clear_counts:
++ if (q_vector->rx_ring) {
++ q_vector->rx_ring->total_bytes = 0;
++ q_vector->rx_ring->total_packets = 0;
++ }
++ if (q_vector->tx_ring) {
++ q_vector->tx_ring->total_bytes = 0;
++ q_vector->tx_ring->total_packets = 0;
++ }
++}
++
++/**
++ * igb_update_itr - update the dynamic ITR value based on statistics
++ * Stores a new ITR value based on packets and byte
++ * counts during the last interrupt. The advantage of per interrupt
++ * computation is faster updates and more accurate ITR for the current
++ * traffic pattern. Constants in this function were computed
++ * based on theoretical maximum wire speed and thresholds were set based
++ * on testing data as well as attempting to minimize response time
++ * while increasing bulk throughput.
++ * this functionality is controlled by the InterruptThrottleRate module
++ * parameter (see igb_param.c)
++ * NOTE: These calculations are only valid when operating in a single-
++ * queue environment.
++ * @adapter: pointer to adapter
++ * @itr_setting: current adapter->itr
++ * @packets: the number of packets during this measurement interval
++ * @bytes: the number of bytes during this measurement interval
++ **/
++static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
++ int packets, int bytes)
++{
++ unsigned int retval = itr_setting;
++
++ if (packets == 0)
++ goto update_itr_done;
++
++ switch (itr_setting) {
++ case lowest_latency:
++ /* handle TSO and jumbo frames */
++ if (bytes/packets > 8000)
++ retval = bulk_latency;
++ else if ((packets < 5) && (bytes > 512))
++ retval = low_latency;
++ break;
++ case low_latency: /* 50 usec aka 20000 ints/s */
++ if (bytes > 10000) {
++ /* this if handles the TSO accounting */
++ if (bytes/packets > 8000) {
++ retval = bulk_latency;
++ } else if ((packets < 10) || ((bytes/packets) > 1200)) {
++ retval = bulk_latency;
++ } else if ((packets > 35)) {
++ retval = lowest_latency;
++ }
++ } else if (bytes/packets > 2000) {
++ retval = bulk_latency;
++ } else if (packets <= 2 && bytes < 512) {
++ retval = lowest_latency;
++ }
++ break;
++ case bulk_latency: /* 250 usec aka 4000 ints/s */
++ if (bytes > 25000) {
++ if (packets > 35)
++ retval = low_latency;
++ } else if (bytes < 1500) {
++ retval = low_latency;
++ }
++ break;
++ }
++
++update_itr_done:
++ return retval;
++}
++static void igb_set_itr(struct igb_adapter *adapter)
++{
++ u16 current_itr;
++ u32 new_itr = adapter->itr;
++
++ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
++ if (adapter->link_speed != SPEED_1000) {
++ current_itr = 0;
++ new_itr = 4000;
++ goto set_itr_now;
++ }
++
++ adapter->rx_itr = igb_update_itr(adapter,
++ adapter->rx_itr,
++ adapter->rx_ring->total_packets,
++ adapter->rx_ring->total_bytes);
++
++ adapter->tx_itr = igb_update_itr(adapter,
++ adapter->tx_itr,
++ adapter->tx_ring->total_packets,
++ adapter->tx_ring->total_bytes);
++ current_itr = max(adapter->rx_itr, adapter->tx_itr);
++
++ /* conservative mode (itr 3) eliminates the lowest_latency setting */
++ if (adapter->itr_setting == 3 && current_itr == lowest_latency)
++ current_itr = low_latency;
++
++ switch (current_itr) {
++ /* counts and packets in update_itr are dependent on these numbers */
++ case lowest_latency:
++ new_itr = 56; /* aka 70,000 ints/sec */
++ break;
++ case low_latency:
++ new_itr = 196; /* aka 20,000 ints/sec */
++ break;
++ case bulk_latency:
++ new_itr = 980; /* aka 4,000 ints/sec */
++ break;
++ default:
++ break;
++ }
++
++set_itr_now:
++ adapter->rx_ring->total_bytes = 0;
++ adapter->rx_ring->total_packets = 0;
++ adapter->tx_ring->total_bytes = 0;
++ adapter->tx_ring->total_packets = 0;
++
++ if (new_itr != adapter->itr) {
++ struct igb_q_vector *q_vector = adapter->q_vector[0];
++ /* this attempts to bias the interrupt rate towards Bulk
++ * by adding intermediate steps when interrupt rate is
++ * increasing */
++ new_itr = new_itr > adapter->itr ?
++ max((new_itr * adapter->itr) /
++ (new_itr + (adapter->itr >> 2)), new_itr) :
++ new_itr;
++ /* Don't write the value here; it resets the adapter's
++ * internal timer, and causes us to delay far longer than
++ * we should between interrupts. Instead, we write the ITR
++ * value at the beginning of the next interrupt so the timing
++ * ends up being correct.
++ */
++ adapter->itr = new_itr;
++ q_vector->itr_val = new_itr;
++ q_vector->set_itr = 1;
++ }
++
++ return;
++}
++
++#define IGB_TX_FLAGS_CSUM 0x00000001
++#define IGB_TX_FLAGS_VLAN 0x00000002
++#define IGB_TX_FLAGS_TSO 0x00000004
++#define IGB_TX_FLAGS_IPV4 0x00000008
++#define IGB_TX_FLAGS_TSTAMP 0x00000010
++#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
++#define IGB_TX_FLAGS_VLAN_SHIFT 16
++
++static inline int igb_tso_adv(struct igb_ring *tx_ring,
++ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
++{
++#ifdef NETIF_F_TSO
++ struct e1000_adv_tx_context_desc *context_desc;
++ unsigned int i;
++ int err;
++ struct igb_buffer *buffer_info;
++ u32 info = 0, tu_cmd = 0;
++ u32 mss_l4len_idx, l4len;
++ *hdr_len = 0;
++
++ if (skb_header_cloned(skb)) {
++ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
++ if (err)
++ return err;
++ }
++
++ l4len = tcp_hdrlen(skb);
++ *hdr_len += l4len;
++
++ if (skb->protocol == htons(ETH_P_IP)) {
++ struct iphdr *iph = ip_hdr(skb);
++ iph->tot_len = 0;
++ iph->check = 0;
++ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
++ iph->daddr, 0,
++ IPPROTO_TCP,
++ 0);
++#ifdef NETIF_F_TSO6
++ } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
++ ipv6_hdr(skb)->payload_len = 0;
++ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
++ &ipv6_hdr(skb)->daddr,
++ 0, IPPROTO_TCP, 0);
++#endif
++ }
++
++ i = tx_ring->next_to_use;
++
++ buffer_info = &tx_ring->buffer_info[i];
++ context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
++ /* VLAN MACLEN IPLEN */
++ if (tx_flags & IGB_TX_FLAGS_VLAN)
++ info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
++ info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
++ *hdr_len += skb_network_offset(skb);
++ info |= skb_network_header_len(skb);
++ *hdr_len += skb_network_header_len(skb);
++ context_desc->vlan_macip_lens = cpu_to_le32(info);
++
++ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
++ tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
++
++ if (skb->protocol == htons(ETH_P_IP))
++ tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
++ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
++
++ context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
++
++ /* MSS L4LEN IDX */
++ mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
++ mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
++ mss_l4len_idx |= tx_ring->ctx_idx;
++
++ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
++ context_desc->seqnum_seed = 0;
++
++ buffer_info->time_stamp = jiffies;
++ buffer_info->next_to_watch = i;
++ buffer_info->dma = 0;
++ i++;
++ if (i == tx_ring->count)
++ i = 0;
++
++ tx_ring->next_to_use = i;
++
++ return TRUE;
++#else
++ return FALSE;
++#endif /* NETIF_F_TSO */
++}
++
++static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
++ struct sk_buff *skb, u32 tx_flags)
++{
++ struct e1000_adv_tx_context_desc *context_desc;
++ struct pci_dev *pdev = tx_ring->pdev;
++ struct igb_buffer *buffer_info;
++ u32 info = 0, tu_cmd = 0;
++ unsigned int i;
++
++ if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
++ (tx_flags & IGB_TX_FLAGS_VLAN)) {
++ i = tx_ring->next_to_use;
++ buffer_info = &tx_ring->buffer_info[i];
++ context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
++
++ if (tx_flags & IGB_TX_FLAGS_VLAN)
++ info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
++
++ info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ info |= skb_network_header_len(skb);
++
++ context_desc->vlan_macip_lens = cpu_to_le32(info);
++
++ tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
++
++ if (skb->ip_summed == CHECKSUM_PARTIAL) {
++ __be16 protocol;
++
++ if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
++ const struct vlan_ethhdr *vhdr =
++ (const struct vlan_ethhdr*)skb->data;
++
++ protocol = vhdr->h_vlan_encapsulated_proto;
++ } else {
++ protocol = skb->protocol;
++ }
++
++ switch (protocol) {
++ case __constant_htons(ETH_P_IP):
++ tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
++ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
++ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
++ else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
++ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
++ break;
++#ifdef NETIF_F_IPV6_CSUM
++ case __constant_htons(ETH_P_IPV6):
++ /* XXX what about other V6 headers?? */
++ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
++ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
++ else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
++ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
++ break;
++#endif
++ default:
++ if (unlikely(net_ratelimit())) {
++ dev_warn(&pdev->dev,
++ "partial checksum but proto=%x!\n",
++ skb->protocol);
++ }
++ break;
++ }
++ }
++
++ context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
++ context_desc->seqnum_seed = 0;
++ context_desc->mss_l4len_idx = cpu_to_le32(tx_ring->ctx_idx);
++
++ buffer_info->time_stamp = jiffies;
++ buffer_info->next_to_watch = i;
++ buffer_info->dma = 0;
++
++ i++;
++ if (i == tx_ring->count)
++ i = 0;
++ tx_ring->next_to_use = i;
++
++ return TRUE;
++ }
++ return FALSE;
++}
++
++#define IGB_MAX_TXD_PWR 16
++#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
++
++static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
++ unsigned int first)
++{
++ struct igb_buffer *buffer_info;
++ unsigned int len = skb_headlen(skb);
++ unsigned int count = 0, i;
++ unsigned int f;
++
++ i = tx_ring->next_to_use;
++
++ buffer_info = &tx_ring->buffer_info[i];
++ BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
++ buffer_info->length = len;
++ /* set time_stamp *before* dma to help avoid a possible race */
++ buffer_info->time_stamp = jiffies;
++ buffer_info->next_to_watch = i;
++ buffer_info->dma = pci_map_single(tx_ring->pdev, skb->data, len,
++ PCI_DMA_TODEVICE);
++ count++;
++
++ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
++ struct skb_frag_struct *frag;
++
++ frag = &skb_shinfo(skb)->frags[f];
++ len = frag->size;
++
++ i++;
++ if (i == tx_ring->count)
++ i = 0;
++
++ buffer_info = &tx_ring->buffer_info[i];
++ BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
++ buffer_info->length = len;
++ buffer_info->time_stamp = jiffies;
++ buffer_info->next_to_watch = i;
++ buffer_info->page_dma = pci_map_page(tx_ring->pdev,
++ frag->page,
++ frag->page_offset,
++ len,
++ PCI_DMA_TODEVICE);
++
++ count++;
++ }
++
++ tx_ring->buffer_info[i].skb = skb;
++ tx_ring->buffer_info[first].next_to_watch = i;
++
++ return count;
++}
++
++static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
++ int tx_flags, int count, u32 paylen,
++ u8 hdr_len)
++{
++ union e1000_adv_tx_desc *tx_desc;
++ struct igb_buffer *buffer_info;
++ u32 olinfo_status = 0, cmd_type_len;
++ unsigned int i = tx_ring->next_to_use;
++
++ cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
++ E1000_ADVTXD_DCMD_DEXT);
++
++ if (tx_flags & IGB_TX_FLAGS_VLAN)
++ cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
++
++ if (tx_flags & IGB_TX_FLAGS_TSTAMP)
++ cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
++
++ if (tx_flags & IGB_TX_FLAGS_TSO) {
++ cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
++
++ /* insert tcp checksum */
++ olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
++
++ /* insert ip checksum */
++ if (tx_flags & IGB_TX_FLAGS_IPV4)
++ olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
++
++ } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
++ olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
++ }
++
++ if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
++ IGB_TX_FLAGS_VLAN))
++ olinfo_status |= tx_ring->ctx_idx;
++
++ olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
++
++ do {
++ buffer_info = &tx_ring->buffer_info[i];
++ tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
++ tx_desc->read.buffer_addr = buffer_info->dma ?
++ cpu_to_le64(buffer_info->dma) :
++ cpu_to_le64(buffer_info->page_dma);
++ tx_desc->read.cmd_type_len =
++ cpu_to_le32(cmd_type_len | buffer_info->length);
++ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
++ count--;
++ i++;
++ if (i == tx_ring->count)
++ i = 0;
++ } while (count > 0);
++
++ tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
++ /* Force memory writes to complete before letting h/w
++ * know there are new descriptors to fetch. (Only
++ * applicable for weak-ordered memory model archs,
++ * such as IA-64). */
++ wmb();
++
++ tx_ring->next_to_use = i;
++ writel(i, tx_ring->tail);
++ /* we need this if more than one processor can write to our tail
++ * at a time, it syncronizes IO on IA64/Altix systems */
++ mmiowb();
++}
++
++static int __igb_maybe_stop_tx(struct net_device *netdev,
++ struct igb_ring *tx_ring, int size)
++{
++ if (netif_is_multiqueue(netdev))
++ netif_stop_subqueue(netdev, tx_ring->queue_index);
++ else
++ netif_stop_queue(netdev);
++
++ /* Herbert's original patch had:
++ * smp_mb__after_netif_stop_queue();
++ * but since that doesn't exist yet, just open code it. */
++ smp_mb();
++
++ /* We need to check again in a case another CPU has just
++ * made room available. */
++ if (IGB_DESC_UNUSED(tx_ring) < size)
++ return -EBUSY;
++
++ /* A reprieve! */
++ if (netif_is_multiqueue(netdev))
++ netif_wake_subqueue(netdev, tx_ring->queue_index);
++ else
++ netif_wake_queue(netdev);
++ ++tx_ring->restart_queue;
++ return 0;
++}
++
++static int igb_maybe_stop_tx(struct net_device *netdev,
++ struct igb_ring *tx_ring, int size)
++{
++ if (IGB_DESC_UNUSED(tx_ring) >= size)
++ return 0;
++ return __igb_maybe_stop_tx(netdev, tx_ring, size);
++}
++
++#define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
++
++static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
++ struct net_device *netdev,
++ struct igb_ring *tx_ring)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ unsigned int first;
++ unsigned int tx_flags = 0;
++ u8 hdr_len = 0;
++ int tso = 0;
++#ifdef SIOCSHWTSTAMP
++ union skb_shared_tx *shtx = skb_tx(skb);
++#endif
++
++ if (test_bit(__IGB_DOWN, &adapter->state)) {
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++ }
++
++ if (skb->len <= 0) {
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++ }
++
++ /* need: 1 descriptor per page,
++ * + 2 desc gap to keep tail from touching head,
++ * + 1 desc for skb->data,
++ * + 1 desc for context descriptor,
++ * otherwise try next time */
++ if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
++ /* this is a hard error */
++ return NETDEV_TX_BUSY;
++ }
++
++#ifdef SIOCSHWTSTAMP
++ if (unlikely(shtx->hardware)) {
++ shtx->in_progress = 1;
++ tx_flags |= IGB_TX_FLAGS_TSTAMP;
++ }
++
++#endif
++ if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
++ tx_flags |= IGB_TX_FLAGS_VLAN;
++ tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
++ }
++
++ if (skb->protocol == htons(ETH_P_IP))
++ tx_flags |= IGB_TX_FLAGS_IPV4;
++
++ first = tx_ring->next_to_use;
++#ifdef NETIF_F_TSO
++ if (skb_is_gso(skb)) {
++ tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
++
++ if (tso < 0) {
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++ }
++ }
++
++#endif
++ if (tso)
++ tx_flags |= IGB_TX_FLAGS_TSO;
++ else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
++ (skb->ip_summed == CHECKSUM_PARTIAL))
++ tx_flags |= IGB_TX_FLAGS_CSUM;
++
++ igb_tx_queue_adv(tx_ring, tx_flags,
++ igb_tx_map_adv(tx_ring, skb, first),
++ skb->len, hdr_len);
++
++ netdev->trans_start = jiffies;
++
++ /* Make sure there is space in the ring for the next send. */
++ igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
++
++ return NETDEV_TX_OK;
++}
++
++static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct igb_ring *tx_ring;
++
++#ifdef HAVE_TX_MQ
++ int r_idx = 0;
++ r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
++ tx_ring = adapter->multi_tx_table[r_idx];
++#else
++ tx_ring = &adapter->tx_ring[0];
++#endif
++
++ /* This goes back to the question of how to logically map a tx queue
++ * to a flow. Right now, performance is impacted slightly negatively
++ * if using multiple tx queues. If the stack breaks away from a
++ * single qdisc implementation, we can look at this again. */
++ return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
++}
++
++/**
++ * igb_tx_timeout - Respond to a Tx Hang
++ * @netdev: network interface device structure
++ **/
++static void igb_tx_timeout(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++
++ /* Do the reset outside of interrupt context */
++ adapter->tx_timeout_count++;
++
++ schedule_work(&adapter->reset_task);
++ E1000_WRITE_REG(hw, E1000_EICS,
++ (adapter->eims_enable_mask & ~adapter->eims_other));
++}
++
++static void igb_reset_task(struct work_struct *work)
++{
++ struct igb_adapter *adapter;
++ adapter = container_of(work, struct igb_adapter, reset_task);
++
++ igb_reinit_locked(adapter);
++}
++
++/**
++ * igb_get_stats - Get System Network Statistics
++ * @netdev: network interface device structure
++ *
++ * Returns the address of the device statistics structure.
++ * The statistics are actually updated from the timer callback.
++ **/
++static struct net_device_stats *igb_get_stats(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++
++ /* only return the current stats */
++ return &adapter->net_stats;
++}
++
++/**
++ * igb_change_mtu - Change the Maximum Transfer Unit
++ * @netdev: network interface device structure
++ * @new_mtu: new value for maximum frame size
++ *
++ * Returns 0 on success, negative on failure
++ **/
++static int igb_change_mtu(struct net_device *netdev, int new_mtu)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
++ u32 rx_buffer_len, i;
++ u16 rx_ps_hdr_size = 0;
++
++ if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
++ DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
++ return -EINVAL;
++ }
++
++#define MAX_STD_JUMBO_FRAME_SIZE 9234
++ if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
++ DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
++ return -EINVAL;
++ }
++
++ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
++ msleep(1);
++
++ /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
++ * means we reserve 2 more, this pushes us to allocate from the next
++ * larger slab size.
++ * i.e. RXBUFFER_2048 --> size-4096 slab
++ */
++
++ /* igb_down has a dependency on max_frame_size */
++ adapter->max_frame_size = max_frame;
++
++ if (max_frame <= IGB_RXBUFFER_1024)
++ rx_buffer_len = IGB_RXBUFFER_1024;
++ else if (max_frame <= IGB_RXBUFFER_2048)
++ rx_buffer_len = IGB_RXBUFFER_2048;
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ else
++#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
++ rx_buffer_len = IGB_RXBUFFER_16384;
++#else
++ rx_buffer_len = PAGE_SIZE / 2;
++#endif
++#else
++ else if (max_frame <= IGB_RXBUFFER_4096)
++ rx_buffer_len = IGB_RXBUFFER_4096;
++ else if (max_frame <= IGB_RXBUFFER_8192)
++ rx_buffer_len = IGB_RXBUFFER_8192;
++ else
++ rx_buffer_len = IGB_RXBUFFER_16384;
++#endif
++
++ /* adjust allocation if LPE protects us, and we aren't using SBP */
++ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
++ (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
++ rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
++
++ if (netif_running(netdev))
++ igb_down(adapter);
++
++ DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
++ netdev->mtu, new_mtu);
++ netdev->mtu = new_mtu;
++
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ /* 82575 and greater support packet-split where the protocol
++ * header is placed in skb->data and the packet data is
++ * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
++ * In the case of a non-split, skb->data is linearly filled,
++ * followed by the page buffers. Therefore, skb->data is
++ * sized to hold the largest protocol header.
++ */
++ /* allocations using alloc_page take too long for regular MTU
++ * so only enable packet split for jumbo frames */
++ if (new_mtu > ETH_DATA_LEN)
++ rx_ps_hdr_size = IGB_RXBUFFER_128;
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
++
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ struct igb_ring *rx_ring = &adapter->rx_ring[i];
++ rx_ring->rx_buffer_len = rx_buffer_len;
++ rx_ring->rx_ps_hdr_size = rx_ps_hdr_size;
++ }
++
++ if (netif_running(netdev))
++ igb_up(adapter);
++ else
++ igb_reset(adapter);
++
++ clear_bit(__IGB_RESETTING, &adapter->state);
++
++ return 0;
++}
++
++/**
++ * igb_update_stats - Update the board statistics counters
++ * @adapter: board private structure
++ **/
++
++void igb_update_stats(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++#ifdef HAVE_PCI_ERS
++ struct pci_dev *pdev = adapter->pdev;
++#endif
++ u16 phy_tmp;
++
++#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
++
++ /*
++ * Prevent stats update while adapter is being reset, or if the pci
++ * connection is down.
++ */
++ if (adapter->link_speed == 0)
++ return;
++#ifdef HAVE_PCI_ERS
++ if (pci_channel_offline(pdev))
++ return;
++#endif
++
++ /* read stats registers */
++ adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
++ adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC);
++ adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL);
++ E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */
++ adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC);
++ adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC);
++ adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC);
++
++ adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64);
++ adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127);
++ adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255);
++ adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511);
++ adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
++ adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
++ adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS);
++ adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC);
++
++ adapter->stats.mpc += E1000_READ_REG(hw, E1000_MPC);
++ adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC);
++ adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL);
++ adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC);
++ adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL);
++ adapter->stats.dc += E1000_READ_REG(hw, E1000_DC);
++ adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC);
++ adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
++ adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC);
++ adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC);
++ adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
++ adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC);
++ adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC);
++ adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL);
++ E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */
++ adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC);
++ adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC);
++ adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC);
++ adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC);
++ adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH);
++ adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH);
++ adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR);
++
++ adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64);
++ adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127);
++ adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255);
++ adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511);
++ adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
++ adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
++
++ adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC);
++ adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC);
++
++ adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT);
++ adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC);
++
++ adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
++ adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
++ adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
++ adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC);
++ adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
++
++ adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC);
++ adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
++ adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
++ adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
++ adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
++ adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
++ adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
++ adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
++ adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
++
++ /* Fill out the OS statistics structure */
++ adapter->net_stats.multicast = adapter->stats.mprc;
++ adapter->net_stats.collisions = adapter->stats.colc;
++
++ /* Rx Errors */
++
++ /* RLEC on some newer hardware can be incorrect so build
++ * our own version based on RUC and ROC */
++ adapter->net_stats.rx_errors = adapter->stats.rxerrc +
++ adapter->stats.crcerrs + adapter->stats.algnerrc +
++ adapter->stats.ruc + adapter->stats.roc +
++ adapter->stats.cexterr;
++ adapter->net_stats.rx_length_errors = adapter->stats.ruc +
++ adapter->stats.roc;
++ adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
++ adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
++ adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
++
++ /* Tx Errors */
++ adapter->net_stats.tx_errors = adapter->stats.ecol +
++ adapter->stats.latecol;
++ adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
++ adapter->net_stats.tx_window_errors = adapter->stats.latecol;
++ adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
++
++ /* Tx Dropped needs to be maintained elsewhere */
++
++ /* Phy Stats */
++ if (hw->phy.media_type == e1000_media_type_copper) {
++ if ((adapter->link_speed == SPEED_1000) &&
++ (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
++ phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
++ adapter->phy_stats.idle_errors += phy_tmp;
++ }
++ }
++
++ /* Management Stats */
++ adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC);
++ adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC);
++ adapter->stats.mgpdc += E1000_READ_REG(hw, E1000_MGTPDC);
++}
++
++static irqreturn_t igb_msix_other(int irq, void *data)
++{
++ struct igb_adapter *adapter = data;
++ struct e1000_hw *hw = &adapter->hw;
++ u32 icr = E1000_READ_REG(hw, E1000_ICR);
++ /* reading ICR causes bit 31 of EICR to be cleared */
++
++ if (icr & E1000_ICR_DOUTSYNC) {
++ /* HW is reporting DMA is out of sync */
++ adapter->stats.doosync++;
++ }
++
++ /* Check for a mailbox event */
++ if (icr & E1000_ICR_VMMB)
++ igb_msg_task(adapter);
++
++ if (!(icr & E1000_ICR_LSC))
++ goto no_link_interrupt;
++ hw->mac.get_link_status = 1;
++ /* guard against interrupt when we're going down */
++ if (!test_bit(__IGB_DOWN, &adapter->state))
++ mod_timer(&adapter->watchdog_timer, jiffies + 1);
++
++no_link_interrupt:
++ if (adapter->vfs_allocated_count)
++ E1000_WRITE_REG(hw, E1000_IMS,
++ E1000_IMS_LSC |
++ E1000_IMS_VMMB |
++ E1000_IMS_DOUTSYNC);
++ else
++ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
++ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other);
++
++ return IRQ_HANDLED;
++}
++
++
++static void igb_write_itr(struct igb_q_vector *q_vector)
++{
++ u32 itr_val = q_vector->itr_val & 0x7FFC;
++
++ if (!q_vector->set_itr)
++ return;
++
++ if (!itr_val)
++ itr_val = 0x4;
++
++ if (q_vector->itr_shift)
++ itr_val |= itr_val << q_vector->itr_shift;
++ else
++ itr_val |= 0x8000000;
++
++ writel(itr_val, q_vector->itr_register);
++ q_vector->set_itr = 0;
++}
++
++static irqreturn_t igb_msix_ring(int irq, void *data)
++{
++ struct igb_q_vector *q_vector = data;
++
++ /* Write the ITR value calculated from the previous interrupt. */
++ igb_write_itr(q_vector);
++
++ napi_schedule(&q_vector->napi);
++
++ return IRQ_HANDLED;
++}
++
++#ifdef IGB_DCA
++static void igb_update_dca(struct igb_q_vector *q_vector)
++{
++ struct igb_adapter *adapter = q_vector->adapter;
++ struct e1000_hw *hw = &adapter->hw;
++ int cpu = get_cpu();
++
++ if (q_vector->cpu == cpu)
++ goto out_no_update;
++
++ if (q_vector->tx_ring) {
++ int q = q_vector->tx_ring->reg_idx;
++ u32 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(q));
++ if (hw->mac.type == e1000_82575) {
++ dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
++ dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
++ } else {
++ dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
++ dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
++ E1000_DCA_TXCTRL_CPUID_SHIFT_82576;
++ }
++ dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
++ E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(q), dca_txctrl);
++ }
++ if (q_vector->rx_ring) {
++ int q = q_vector->rx_ring->reg_idx;
++ u32 dca_rxctrl = E1000_READ_REG(hw, E1000_DCA_RXCTRL(q));
++ if (hw->mac.type == e1000_82575) {
++ dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
++ dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
++ } else {
++ dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
++ dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
++ E1000_DCA_RXCTRL_CPUID_SHIFT_82576;
++ }
++ dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
++ dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
++ dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
++ E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(q), dca_rxctrl);
++ }
++ q_vector->cpu = cpu;
++out_no_update:
++ put_cpu();
++}
++
++static void igb_setup_dca(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ int i;
++
++ if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
++ return;
++
++ /* Always use CB2 mode, difference is masked in the CB driver. */
++ E1000_WRITE_REG(hw, E1000_DCA_CTRL, 2);
++
++ for (i = 0; i < adapter->num_q_vectors; i++) {
++ struct igb_q_vector *q_vector = adapter->q_vector[i];
++ q_vector->cpu = -1;
++ igb_update_dca(q_vector);
++ }
++}
++
++static int __igb_notify_dca(struct device *dev, void *data)
++{
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ unsigned long event = *(unsigned long *)data;
++
++ switch (event) {
++ case DCA_PROVIDER_ADD:
++ /* if already enabled, don't do it again */
++ if (adapter->flags & IGB_FLAG_DCA_ENABLED)
++ break;
++ if (dca_add_requester(dev) == E1000_SUCCESS) {
++ adapter->flags |= IGB_FLAG_DCA_ENABLED;
++ DPRINTK(PROBE, INFO, "DCA enabled\n");
++ igb_setup_dca(adapter);
++ break;
++ }
++ /* Fall Through since DCA is disabled. */
++ case DCA_PROVIDER_REMOVE:
++ if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
++ /* without this a class_device is left
++ * hanging around in the sysfs model */
++ dca_remove_requester(dev);
++ DPRINTK(PROBE, INFO, "DCA disabled\n");
++ adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
++ E1000_WRITE_REG(hw, E1000_DCA_CTRL, 1);
++ }
++ break;
++ }
++
++ return E1000_SUCCESS;
++}
++
++static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
++ void *p)
++{
++ int ret_val;
++
++ ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
++ __igb_notify_dca);
++
++ return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
++}
++#endif /* IGB_DCA */
++
++static void igb_ping_all_vfs(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 ping;
++ int i;
++
++ for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
++ ping = E1000_PF_CONTROL_MSG;
++ if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
++ ping |= E1000_VT_MSGTYPE_CTS;
++ e1000_write_mbx(hw, &ping, 1, i);
++ }
++}
++
++static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
++{
++
++ struct e1000_hw *hw = &adapter->hw;
++ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
++ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
++
++ vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
++ IGB_VF_FLAG_MULTI_PROMISC);
++ vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
++
++#ifdef IGB_ENABLE_VF_PROMISC
++ if (*msgbuf & E1000_VF_SET_PROMISC_UNICAST) {
++ vmolr |= E1000_VMOLR_ROPE;
++ vf_data->flags |= IGB_VF_FLAG_UNI_PROMISC;
++ *msgbuf &= ~E1000_VF_SET_PROMISC_UNICAST;
++ }
++#endif
++ if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
++ vmolr |= E1000_VMOLR_MPME;
++ *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
++ } else {
++ /*
++ * if we have hashes and we are clearing a multicast promisc
++ * flag we need to write the hashes to the MTA as this step
++ * was previously skipped
++ */
++ if (vf_data->num_vf_mc_hashes > 30) {
++ vmolr |= E1000_VMOLR_MPME;
++ } else if (vf_data->num_vf_mc_hashes) {
++ int j;
++ vmolr |= E1000_VMOLR_ROMPE;
++ for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
++ hw->mac.ops.mta_set(hw,
++ vf_data->vf_mc_hashes[j]);
++ }
++ }
++
++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
++
++ /* there are flags left unprocessed, likely not supported */
++ if (*msgbuf & E1000_VT_MSGINFO_MASK)
++ return -EINVAL;
++
++ return 0;
++
++}
++
++static int igb_set_vf_multicasts(struct igb_adapter *adapter,
++ u32 *msgbuf, u32 vf)
++{
++ int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
++ u16 *hash_list = (u16 *)&msgbuf[1];
++ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
++ int i;
++
++ /* salt away the number of multicast addresses assigned
++ * to this VF for later use to restore when the PF multi cast
++ * list changes
++ */
++ vf_data->num_vf_mc_hashes = n;
++
++ /* only up to 30 hash values supported */
++ if (n > 30)
++ n = 30;
++
++ /* store the hashes for later use */
++ for (i = 0; i < n; i++)
++ vf_data->vf_mc_hashes[i] = hash_list[i];
++
++ /* Flush and reset the mta with the new values */
++ igb_set_rx_mode(adapter->netdev);
++
++ return 0;
++}
++
++static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ struct vf_data_storage *vf_data;
++ int i, j;
++
++ for (i = 0; i < adapter->vfs_allocated_count; i++) {
++ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
++ vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
++
++ vf_data = &adapter->vf_data[i];
++
++ if ((vf_data->num_vf_mc_hashes > 30) ||
++ (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
++ vmolr |= E1000_VMOLR_MPME;
++ } else if (vf_data->num_vf_mc_hashes) {
++ vmolr |= E1000_VMOLR_ROMPE;
++ for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
++ hw->mac.ops.mta_set(hw,
++ vf_data->vf_mc_hashes[j]);
++ }
++ E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
++ }
++}
++
++static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 pool_mask, reg, vid;
++ u16 vlan_default;
++ int i;
++
++ pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
++
++ /* Find the vlan filter for this id */
++ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
++ reg = E1000_READ_REG(hw, E1000_VLVF(i));
++
++ /* remove the vf from the pool */
++ reg &= ~pool_mask;
++
++ /* if pool is empty then remove entry from vfta */
++ if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
++ (reg & E1000_VLVF_VLANID_ENABLE)) {
++ reg = 0;
++ vid = reg & E1000_VLVF_VLANID_MASK;
++ igb_vfta_set(hw, vid, FALSE);
++ }
++
++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
++ }
++
++ adapter->vf_data[vf].vlans_enabled = 0;
++
++ vlan_default = adapter->vf_data[vf].default_vf_vlan_id;
++ if (vlan_default)
++ igb_vlvf_set(adapter, vlan_default, true, vf);
++}
++
++s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 reg, i;
++
++ /* The vlvf table only exists on 82576 hardware and newer */
++ if (hw->mac.type < e1000_82576)
++ return -1;
++
++ /* we only need to do this if VMDq is enabled */
++ if (!adapter->VMDQ_queues)
++ return -1;
++
++ /* Find the vlan filter for this id */
++ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
++ reg = E1000_READ_REG(hw, E1000_VLVF(i));
++ if ((reg & E1000_VLVF_VLANID_ENABLE) &&
++ vid == (reg & E1000_VLVF_VLANID_MASK))
++ break;
++ }
++
++ if (add) {
++ if (i == E1000_VLVF_ARRAY_SIZE) {
++ /* Did not find a matching VLAN ID entry that was
++ * enabled. Search for a free filter entry, i.e.
++ * one without the enable bit set
++ */
++ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
++ reg = E1000_READ_REG(hw, E1000_VLVF(i));
++ if (!(reg & E1000_VLVF_VLANID_ENABLE))
++ break;
++ }
++ }
++ if (i < E1000_VLVF_ARRAY_SIZE) {
++ /* Found an enabled/available entry */
++ reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
++
++ /* if !enabled we need to set this up in vfta */
++ if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
++ /* add VID to filter table */
++ igb_vfta_set(hw, vid, TRUE);
++ reg |= E1000_VLVF_VLANID_ENABLE;
++ }
++ reg &= ~E1000_VLVF_VLANID_MASK;
++ reg |= vid;
++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
++
++ printk(KERN_INFO "VLAN Enabled for vf %d\n", vf);
++ /* do not modify RLPML for PF devices */
++ if (vf >= adapter->vfs_allocated_count)
++ return E1000_SUCCESS;
++
++ if (!adapter->vf_data[vf].vlans_enabled) {
++ u32 size;
++ reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
++ size = reg & E1000_VMOLR_RLPML_MASK;
++ size += 4;
++ reg &= ~E1000_VMOLR_RLPML_MASK;
++ reg |= size;
++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
++ }
++
++ adapter->vf_data[vf].vlans_enabled++;
++ return E1000_SUCCESS;
++ }
++ } else {
++ if (i < E1000_VLVF_ARRAY_SIZE) {
++ /* remove vf from the pool */
++ reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
++ /* if pool is empty then remove entry from vfta */
++ if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
++ reg = 0;
++ igb_vfta_set(hw, vid, FALSE);
++ }
++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
++
++ /* do not modify RLPML for PF devices */
++ if (vf >= adapter->vfs_allocated_count)
++ return E1000_SUCCESS;
++
++ adapter->vf_data[vf].vlans_enabled--;
++ if (!adapter->vf_data[vf].vlans_enabled) {
++ u32 size;
++ reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
++ size = reg & E1000_VMOLR_RLPML_MASK;
++ size -= 4;
++ reg &= ~E1000_VMOLR_RLPML_MASK;
++ reg |= size;
++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
++ }
++ return E1000_SUCCESS;
++ }
++ }
++ return -1;
++}
++
++static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
++{
++ int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
++ int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
++
++ return igb_vlvf_set(adapter, vid, add, vf);
++}
++
++static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
++{
++ /* clear all flags */
++ adapter->vf_data[vf].flags = 0;
++ adapter->vf_data[vf].last_nack = jiffies;
++
++ /* reset offloads to defaults */
++ igb_set_vmolr(adapter, vf);
++
++ /* reset vlans for device */
++ igb_clear_vf_vfta(adapter, vf);
++
++ /* reset multicast table array for vf */
++ adapter->vf_data[vf].num_vf_mc_hashes = 0;
++
++ /* Flush and reset the mta with the new values */
++ igb_set_rx_mode(adapter->netdev);
++}
++
++static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
++ int rar_entry = hw->mac.rar_entry_count - (vf + 1);
++ u32 reg, msgbuf[3];
++ u8 *addr = (u8 *)(&msgbuf[1]);
++
++ /* process all the same items cleared in a function level reset */
++ igb_vf_reset_event(adapter, vf);
++
++ /* set vf mac address */
++ igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
++
++ /* enable transmit and receive for vf */
++ reg = E1000_READ_REG(hw, E1000_VFTE);
++ E1000_WRITE_REG(hw, E1000_VFTE, reg | (1 << vf));
++ reg = E1000_READ_REG(hw, E1000_VFRE);
++ E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf));
++
++ adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
++
++ /* reply to reset with ack and vf mac address */
++ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
++ memcpy(addr, vf_mac, 6);
++ e1000_write_mbx(hw, msgbuf, 3, vf);
++}
++
++static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
++{
++ unsigned char *addr = (char *)&msg[1];
++ int err = -1;
++
++ if (is_valid_ether_addr(addr))
++ err = igb_set_vf_mac(adapter, vf, addr);
++
++ return err;
++}
++
++static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
++ u32 msg = E1000_VT_MSGTYPE_NACK;
++
++ /* if device isn't clear to send it shouldn't be reading either */
++ if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
++ time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
++ e1000_write_mbx(hw, &msg, 1, vf);
++ vf_data->last_nack = jiffies;
++ }
++}
++
++static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
++{
++ u32 msgbuf[E1000_VFMAILBOX_SIZE];
++ struct e1000_hw *hw = &adapter->hw;
++ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
++ s32 retval;
++
++ retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
++
++ if (retval)
++ printk(KERN_ERR "Error receiving message from VF\n");
++
++ /* this is a message we already processed, do nothing */
++ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
++ return;
++
++ /*
++ * until the vf completes a virtual function reset it should not be
++ * allowed to start any configuration.
++ */
++
++ if (msgbuf[0] == E1000_VF_RESET) {
++ igb_vf_reset_msg(adapter, vf);
++ return;
++ }
++
++ if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
++ msgbuf[0] = E1000_VT_MSGTYPE_NACK;
++ if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
++ e1000_write_mbx(hw, msgbuf, 1, vf);
++ vf_data->last_nack = jiffies;
++ }
++ return;
++ }
++
++ switch ((msgbuf[0] & 0xFFFF)) {
++ case E1000_VF_SET_MAC_ADDR:
++#ifndef IGB_DISABLE_VF_MAC_SET
++ retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
++#else
++ retval = -EINVAL;
++#endif
++ break;
++ case E1000_VF_SET_PROMISC:
++ retval = igb_set_vf_promisc(adapter, msgbuf, vf);
++ break;
++ case E1000_VF_SET_MULTICAST:
++ retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
++ break;
++ case E1000_VF_SET_LPE:
++ retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
++ break;
++ case E1000_VF_SET_VLAN:
++ retval = igb_set_vf_vlan(adapter, msgbuf, vf);
++ break;
++ default:
++ printk(KERN_ERR "Unhandled Msg %8.8x\n", msgbuf[0]);
++ retval = -E1000_ERR_MBX;
++ break;
++ }
++
++ /* notify the VF of the results of what it sent us */
++ if (retval)
++ msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
++ else
++ msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
++
++ msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
++
++ e1000_write_mbx(hw, msgbuf, 1, vf);
++}
++
++static void igb_msg_task(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 vf;
++
++ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
++ /* process any reset requests */
++ if (!e1000_check_for_rst(hw, vf))
++ igb_vf_reset_event(adapter, vf);
++
++ /* process any messages pending */
++ if (!e1000_check_for_msg(hw, vf))
++ igb_rcv_msg_from_vf(adapter, vf);
++
++ /* process any acks */
++ if (!e1000_check_for_ack(hw, vf))
++ igb_rcv_ack_from_vf(adapter, vf);
++ }
++}
++
++/**
++ * igb_set_uta - Set unicast filter table address
++ * @adapter: board private structure
++ *
++ * The unicast table address is a register array of 32-bit registers.
++ * The table is meant to be used in a way similar to how the MTA is used
++ * however due to certain limitations in the hardware it is necessary to
++ * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
++ * enable bit to allow vlan tag stripping when promiscous mode is enabled
++ **/
++static void igb_set_uta(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ int i;
++
++ /* The UTA table only exists on 82576 hardware and newer */
++ if (hw->mac.type < e1000_82576)
++ return;
++
++ /* we only need to do this if VMDq is enabled */
++ if (!adapter->VMDQ_queues)
++ return;
++
++ for (i = 0; i < hw->mac.uta_reg_count; i++)
++ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, ~0);
++}
++
++/**
++ * igb_intr_msi - Interrupt Handler
++ * @irq: interrupt number
++ * @data: pointer to a network interface device structure
++ **/
++static irqreturn_t igb_intr_msi(int irq, void *data)
++{
++ struct igb_adapter *adapter = data;
++ struct igb_q_vector *q_vector = adapter->q_vector[0];
++ struct e1000_hw *hw = &adapter->hw;
++ /* read ICR disables interrupts using IAM */
++ u32 icr = E1000_READ_REG(hw, E1000_ICR);
++
++ igb_write_itr(q_vector);
++
++ if (icr & E1000_ICR_DOUTSYNC) {
++ /* HW is reporting DMA is out of sync */
++ adapter->stats.doosync++;
++ }
++
++ if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
++ hw->mac.get_link_status = 1;
++ if (!test_bit(__IGB_DOWN, &adapter->state))
++ mod_timer(&adapter->watchdog_timer, jiffies + 1);
++ }
++
++ napi_schedule(&q_vector->napi);
++
++ return IRQ_HANDLED;
++}
++
++/**
++ * igb_intr - Legacy Interrupt Handler
++ * @irq: interrupt number
++ * @data: pointer to a network interface device structure
++ **/
++static irqreturn_t igb_intr(int irq, void *data)
++{
++ struct igb_adapter *adapter = data;
++ struct igb_q_vector *q_vector = adapter->q_vector[0];
++ struct e1000_hw *hw = &adapter->hw;
++ /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
++ * need for the IMC write */
++ u32 icr = E1000_READ_REG(hw, E1000_ICR);
++ if (!icr)
++ return IRQ_NONE; /* Not our interrupt */
++
++ igb_write_itr(q_vector);
++
++ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
++ * not set, then the adapter didn't send an interrupt */
++ if (!(icr & E1000_ICR_INT_ASSERTED))
++ return IRQ_NONE;
++
++ if (icr & E1000_ICR_DOUTSYNC) {
++ /* HW is reporting DMA is out of sync */
++ adapter->stats.doosync++;
++ }
++
++ if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
++ hw->mac.get_link_status = 1;
++ /* guard against interrupt when we're going down */
++ if (!test_bit(__IGB_DOWN, &adapter->state))
++ mod_timer(&adapter->watchdog_timer, jiffies + 1);
++ }
++
++ napi_schedule(&q_vector->napi);
++
++ return IRQ_HANDLED;
++}
++
++static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
++{
++ struct igb_adapter *adapter = q_vector->adapter;
++ struct e1000_hw *hw = &adapter->hw;
++
++ if (adapter->itr_setting & 3) {
++ if (!adapter->msix_entries)
++ igb_set_itr(adapter);
++ else
++ igb_update_ring_itr(q_vector);
++ }
++
++ if (!test_bit(__IGB_DOWN, &adapter->state)) {
++ if (adapter->msix_entries)
++ E1000_WRITE_REG(hw, E1000_EIMS, q_vector->eims_value);
++ else
++ igb_irq_enable(adapter);
++ }
++}
++
++/**
++ * igb_poll - NAPI Rx polling callback
++ * @napi: napi polling structure
++ * @budget: count of how many packets we should handle
++ **/
++static int igb_poll(struct napi_struct *napi, int budget)
++{
++ struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi);
++ int tx_clean_complete = 1, work_done = 0;
++
++#ifdef IGB_DCA
++ if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
++ igb_update_dca(q_vector);
++#endif
++ if (q_vector->tx_ring)
++ tx_clean_complete = igb_clean_tx_irq(q_vector);
++
++ if (q_vector->rx_ring)
++ igb_clean_rx_irq_adv(q_vector, &work_done, budget);
++
++ if (!tx_clean_complete)
++ work_done = budget;
++
++#ifndef HAVE_NETDEV_NAPI_LIST
++ /* if netdev is disabled we need to stop polling */
++ if (!netif_running(q_vector->adapter->netdev))
++ work_done = 0;
++
++#endif
++ /* If not enough Rx work done, exit the polling mode */
++ if (work_done < budget) {
++ napi_complete(napi);
++ igb_ring_irq_enable(q_vector);
++ }
++
++ return work_done;
++}
++
++#ifdef SIOCSHWTSTAMP
++/**
++ * igb_systim_to_hwtstamp - convert system time value to hw timestamp
++ * @adapter: board private structure
++ * @shhwtstamps: timestamp structure to update
++ * @regval: unsigned 64bit system time value.
++ *
++ * We need to convert the system time value stored in the RX/TXSTMP registers
++ * into a hwtstamp which can be used by the upper level timestamping functions
++ */
++static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
++ struct skb_shared_hwtstamps *shhwtstamps,
++ u64 regval)
++{
++ u64 ns;
++
++ ns = timecounter_cyc2time(&adapter->clock, regval);
++ timecompare_update(&adapter->compare, ns);
++ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
++ shhwtstamps->hwtstamp = ns_to_ktime(ns);
++ shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
++}
++
++/**
++ * igb_tx_hwtstamp - utility function which checks for TX time stamp
++ * @adapter: board private structure
++ * @skb: packet that was just sent
++ *
++ * If we were asked to do hardware stamping and such a time stamp is
++ * available, then it must have been for this skb here because we only
++ * allow only one such packet into the queue.
++ */
++static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
++{
++ union skb_shared_tx *shtx = skb_tx(skb);
++ struct e1000_hw *hw = &adapter->hw;
++ struct skb_shared_hwtstamps shhwtstamps;
++ u64 regval;
++
++ /* if skb does not support hw timestamp or TX stamp not valid exit */
++ if (likely(!shtx->hardware) ||
++ !(E1000_READ_REG(hw, E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
++ return;
++
++ regval = E1000_READ_REG(hw, E1000_TXSTMPL);
++ regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32;
++
++ igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
++ skb_tstamp_tx(skb, &shhwtstamps);
++}
++
++#endif
++/**
++ * igb_clean_tx_irq - Reclaim resources after transmit completes
++ * @q_vector: pointer to q_vector containing needed info
++ * returns TRUE if ring is completely cleaned
++ **/
++static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
++{
++ struct igb_adapter *adapter = q_vector->adapter;
++ struct igb_ring *tx_ring = q_vector->tx_ring;
++ struct net_device *netdev = adapter->netdev;
++ struct e1000_hw *hw = &adapter->hw;
++ struct igb_buffer *buffer_info;
++ struct sk_buff *skb;
++ union e1000_adv_tx_desc *tx_desc, *eop_desc;
++ unsigned int total_bytes = 0, total_packets = 0;
++ unsigned int i, eop, count = 0;
++ bool cleaned = false;
++
++ i = tx_ring->next_to_clean;
++ eop = tx_ring->buffer_info[i].next_to_watch;
++ eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
++
++ while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
++ (count < tx_ring->count)) {
++ for (cleaned = false; !cleaned; count++) {
++ tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
++ buffer_info = &tx_ring->buffer_info[i];
++ cleaned = (i == eop);
++ skb = buffer_info->skb;
++
++ if (skb) {
++#ifdef NETIF_F_TSO
++ unsigned int segs, bytecount;
++ /* gso_segs is currently only valid for tcp */
++ segs = skb_shinfo(skb)->gso_segs ?: 1;
++ /* multiply data chunks by size of headers */
++ bytecount = ((segs - 1) * skb_headlen(skb)) +
++ skb->len;
++ total_packets += segs;
++ total_bytes += bytecount;
++#else
++ total_packets++;
++ total_bytes += skb->len;
++#endif
++#ifdef SIOCSHWTSTAMP
++ igb_tx_hwtstamp(adapter, skb);
++#endif
++ }
++
++ igb_unmap_and_free_tx_resource(tx_ring->pdev,
++ buffer_info);
++ tx_desc->wb.status = 0;
++
++ i++;
++ if (i == tx_ring->count)
++ i = 0;
++ }
++ eop = tx_ring->buffer_info[i].next_to_watch;
++ eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
++ }
++
++ tx_ring->next_to_clean = i;
++
++ if (unlikely(count &&
++ netif_carrier_ok(netdev) &&
++ IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
++ /* Make sure that anybody stopping the queue after this
++ * sees the new next_to_clean.
++ */
++ smp_mb();
++ if (netif_is_multiqueue(netdev)) {
++ if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
++ !(test_bit(__IGB_DOWN, &adapter->state))) {
++ netif_wake_subqueue(netdev, tx_ring->queue_index);
++ ++tx_ring->restart_queue;
++ }
++ } else {
++ if (netif_queue_stopped(netdev) &&
++ !(test_bit(__IGB_DOWN, &adapter->state))) {
++ netif_wake_queue(netdev);
++ ++tx_ring->restart_queue;
++ }
++ }
++ }
++
++ if (tx_ring->detect_tx_hung) {
++ /* Detect a transmit hang in hardware, this serializes the
++ * check with the clearing of time_stamp and movement of i */
++ tx_ring->detect_tx_hung = FALSE;
++ if (tx_ring->buffer_info[i].time_stamp &&
++ time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
++ (adapter->tx_timeout_factor * HZ))
++ && !(E1000_READ_REG(hw, E1000_STATUS) &
++ E1000_STATUS_TXOFF)) {
++
++ /* detected Tx unit hang */
++ DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
++ " Tx Queue <%d>\n"
++ " TDH <%x>\n"
++ " TDT <%x>\n"
++ " next_to_use <%x>\n"
++ " next_to_clean <%x>\n"
++ "buffer_info[next_to_clean]\n"
++ " time_stamp <%lx>\n"
++ " next_to_watch <%x>\n"
++ " jiffies <%lx>\n"
++ " desc.status <%x>\n",
++ tx_ring->queue_index,
++ readl(tx_ring->head),
++ readl(tx_ring->tail),
++ tx_ring->next_to_use,
++ tx_ring->next_to_clean,
++ tx_ring->buffer_info[eop].time_stamp,
++ eop,
++ jiffies,
++ eop_desc->wb.status);
++ if (netif_is_multiqueue(netdev))
++ netif_stop_subqueue(netdev,
++ tx_ring->queue_index);
++ else
++ netif_stop_queue(netdev);
++ }
++ }
++ tx_ring->total_bytes += total_bytes;
++ tx_ring->total_packets += total_packets;
++ tx_ring->stats.bytes += total_bytes;
++ tx_ring->stats.packets += total_packets;
++ adapter->net_stats.tx_bytes += total_bytes;
++ adapter->net_stats.tx_packets += total_packets;
++ return (count < tx_ring->count);
++}
++
++#ifdef IGB_LRO
++ /**
++ * igb_get_skb_hdr - helper function for LRO header processing
++ * @skb: pointer to sk_buff to be added to LRO packet
++ * @iphdr: pointer to ip header structure
++ * @tcph: pointer to tcp header structure
++ * @hdr_flags: pointer to header flags
++ * @priv: pointer to the receive descriptor for the current sk_buff
++ **/
++static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
++ u64 *hdr_flags, void *priv)
++{
++ union e1000_adv_rx_desc *rx_desc = priv;
++ u16 pkt_type = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info &
++ (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
++
++ /* Verify that this is a valid IPv4 TCP packet */
++ if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
++ E1000_RXDADV_PKTTYPE_TCP))
++ return -1;
++
++ /* Set network headers */
++ skb_reset_network_header(skb);
++ skb_set_transport_header(skb, ip_hdrlen(skb));
++ *iphdr = ip_hdr(skb);
++ *tcph = tcp_hdr(skb);
++ *hdr_flags = LRO_IPV4 | LRO_TCP;
++
++ return 0;
++
++}
++
++#endif /* IGB_LRO */
++/**
++ * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
++ * @adapter: address of board private structure
++ **/
++int igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
++{
++ struct net_device *netdev = pci_get_drvdata(rx_ring->pdev);
++ union e1000_adv_rx_desc *rx_desc;
++ struct igb_buffer *buffer_info;
++ struct sk_buff *skb;
++ unsigned int i;
++ int bufsz, err = 0;
++
++ i = rx_ring->next_to_use;
++ buffer_info = &rx_ring->buffer_info[i];
++
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ if (rx_ring->rx_ps_hdr_size)
++ bufsz = rx_ring->rx_ps_hdr_size;
++ else
++ bufsz = rx_ring->rx_buffer_len;
++#else
++ bufsz = rx_ring->rx_buffer_len;
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
++
++ while (cleaned_count--) {
++ rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
++
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ if (rx_ring->rx_ps_hdr_size && !buffer_info->page_dma) {
++ if (!buffer_info->page) {
++ buffer_info->page = netdev_alloc_page(netdev);
++ if (!buffer_info->page) {
++ err = -ENOMEM;
++ goto no_buffers;
++ }
++ buffer_info->page_offset = 0;
++ } else {
++ buffer_info->page_offset ^= PAGE_SIZE / 2;
++ }
++ buffer_info->page_dma =
++ pci_map_page(rx_ring->pdev, buffer_info->page,
++ buffer_info->page_offset,
++ rx_ring->rx_buffer_len,
++ PCI_DMA_FROMDEVICE);
++ }
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
++
++ if (!buffer_info->skb) {
++ skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
++ if (!skb) {
++ err = -ENOMEM;
++ goto no_buffers;
++ }
++
++ /* Make buffer alignment 2 beyond a 16 byte boundary
++ * this will result in a 16 byte aligned IP header after
++ * the 14 byte MAC header is removed
++ */
++ skb_reserve(skb, NET_IP_ALIGN);
++
++ buffer_info->skb = skb;
++ }
++ if (!buffer_info->dma)
++ buffer_info->dma = pci_map_single(rx_ring->pdev,
++ buffer_info->skb->data,
++ bufsz,
++ PCI_DMA_FROMDEVICE);
++ /* Refresh the desc even if buffer_addrs didn't change because
++ * each write-back erases this info. */
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ if (rx_ring->rx_ps_hdr_size) {
++ rx_desc->read.pkt_addr =
++ cpu_to_le64(buffer_info->page_dma);
++ rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
++ } else {
++ rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
++ rx_desc->read.hdr_addr = 0;
++ }
++#else
++ rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
++ rx_desc->read.hdr_addr = 0;
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
++
++ i++;
++ if (i == rx_ring->count)
++ i = 0;
++ buffer_info = &rx_ring->buffer_info[i];
++ }
++
++no_buffers:
++ if (rx_ring->next_to_use != i) {
++ rx_ring->next_to_use = i;
++ if (i == 0)
++ i = (rx_ring->count - 1);
++ else
++ i--;
++
++ /* Force memory writes to complete before letting h/w
++ * know there are new descriptors to fetch. (Only
++ * applicable for weak-ordered memory model archs,
++ * such as IA-64). */
++ wmb();
++ writel(i, rx_ring->tail);
++ }
++
++ return err;
++}
++
++/**
++ * igb_receive_skb - helper function to handle rx indications
++ * @ring: pointer to receive ring receving this packet
++ * @status: descriptor status field as written by hardware
++ * @rx_desc: receive descriptor containing vlan and type information.
++ * @skb: pointer to sk_buff to be indicated to stack
++ **/
++static void igb_receive_skb(struct igb_ring *ring, u8 status,
++ union e1000_adv_rx_desc *rx_desc,
++ struct sk_buff *skb)
++{
++ struct igb_q_vector *q_vector = ring->q_vector;
++ struct igb_adapter *adapter = q_vector->adapter;
++ bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
++
++#ifdef IGB_LRO
++ if (adapter->netdev->features & NETIF_F_LRO &&
++ skb->ip_summed == CHECKSUM_UNNECESSARY) {
++ if (vlan_extracted)
++ lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
++ adapter->vlgrp,
++ le16_to_cpu(rx_desc->wb.upper.vlan),
++ rx_desc);
++ else
++ lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
++ ring->lro_used = TRUE;
++ } else {
++#endif
++ if (vlan_extracted)
++ vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
++ le16_to_cpu(rx_desc->wb.upper.vlan),
++ skb);
++ else
++
++ napi_gro_receive(&q_vector->napi, skb);
++#ifdef IGB_LRO
++ }
++#endif
++}
++
++static inline void igb_rx_checksum_adv(struct igb_ring *ring,
++ u32 status_err, struct sk_buff *skb)
++{
++ struct igb_adapter *adapter = ring->q_vector->adapter;
++ skb->ip_summed = CHECKSUM_NONE;
++
++ /* Ignore Checksum bit is set or checksum is disabled through ethtool */
++ if (!ring->rx_csum || (status_err & E1000_RXD_STAT_IXSM))
++ return;
++
++ /* TCP/UDP checksum error bit is set */
++ if (status_err &
++ (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
++ /*
++ * work around errata with sctp packets where the TCPE aka
++ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
++ * packets, (aka let the stack check the crc32c)
++ */
++ if (!((adapter->hw.mac.type >= e1000_82576) &&
++ (skb->len == 60)))
++ ring->hw_csum_err++;
++
++ /* let the stack verify checksum errors */
++ return;
++ }
++ /* It must be a TCP or UDP packet with a valid checksum */
++ if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ ring->hw_csum_good++;
++}
++
++#ifdef SIOCSHWTSTAMP
++static inline void igb_rx_hwtstamp(struct igb_adapter *adapter, u32 staterr,
++ struct sk_buff *skb)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u64 regval;
++
++ /*
++ * If this bit is set, then the RX registers contain the time stamp. No
++ * other packet will be time stamped until we read these registers, so
++ * read the registers to make them available again. Because only one
++ * packet can be time stamped at a time, we know that the register
++ * values must belong to this one here and therefore we don't need to
++ * compare any of the additional attributes stored for it.
++ *
++ * If nothing went wrong, then it should have a skb_shared_tx that we
++ * can turn into a skb_shared_hwtstamps.
++ */
++ if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
++ return;
++ if(!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
++ return;
++
++ regval = E1000_READ_REG(hw, E1000_RXSTMPL);
++ regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32;
++
++ igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
++}
++#endif
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
++ union e1000_adv_rx_desc *rx_desc)
++{
++ /* HW will not DMA in data larger than the given buffer, even if it
++ * parses the (NFS, of course) header to be larger. In that case, it
++ * fills the header buffer and spills the rest into the page.
++ */
++ u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
++ E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
++ if (hlen > rx_ring->rx_ps_hdr_size)
++ hlen = rx_ring->rx_ps_hdr_size;
++ return hlen;
++}
++
++#endif
++static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
++ int *work_done, int budget)
++{
++ struct igb_adapter *adapter = q_vector->adapter;
++ struct net_device *netdev = adapter->netdev;
++ struct igb_ring *rx_ring = q_vector->rx_ring;
++ struct pci_dev *pdev = rx_ring->pdev;
++ union e1000_adv_rx_desc *rx_desc , *next_rxd;
++ struct igb_buffer *buffer_info , *next_buffer;
++ struct sk_buff *skb;
++ bool cleaned = FALSE;
++ int cleaned_count = 0;
++ unsigned int total_bytes = 0, total_packets = 0;
++ unsigned int i;
++ u32 staterr;
++ u16 length;
++
++ i = rx_ring->next_to_clean;
++ buffer_info = &rx_ring->buffer_info[i];
++ rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
++ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
++
++ while (staterr & E1000_RXD_STAT_DD) {
++ if (*work_done >= budget)
++ break;
++ (*work_done)++;
++
++ skb = buffer_info->skb;
++ prefetch(skb->data - NET_IP_ALIGN);
++ buffer_info->skb = NULL;
++
++ i++;
++ if (i == rx_ring->count)
++ i = 0;
++
++ next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
++ prefetch(next_rxd);
++ next_buffer = &rx_ring->buffer_info[i];
++
++ length = le16_to_cpu(rx_desc->wb.upper.length);
++ cleaned = TRUE;
++ cleaned_count++;
++
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ pci_unmap_single(pdev, buffer_info->dma,
++ rx_ring->rx_buffer_len,
++ PCI_DMA_FROMDEVICE);
++ buffer_info->dma = 0;
++ skb_put(skb, length);
++
++#else
++ if (!rx_ring->rx_ps_hdr_size) {
++ pci_unmap_single(pdev, buffer_info->dma,
++ rx_ring->rx_buffer_len,
++ PCI_DMA_FROMDEVICE);
++ buffer_info->dma = 0;
++ skb_put(skb, length);
++ goto send_up;
++ }
++
++ if (buffer_info->dma) {
++ u16 hlen = igb_get_hlen(rx_ring, rx_desc);
++ pci_unmap_single(pdev, buffer_info->dma,
++ rx_ring->rx_ps_hdr_size,
++ PCI_DMA_FROMDEVICE);
++ buffer_info->dma = 0;
++ skb_put(skb, hlen);
++ }
++
++ if (length) {
++ pci_unmap_page(pdev, buffer_info->page_dma,
++ rx_ring->rx_buffer_len,
++ PCI_DMA_FROMDEVICE);
++ buffer_info->page_dma = 0;
++
++ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
++ buffer_info->page,
++ buffer_info->page_offset,
++ length);
++
++ if (page_count(buffer_info->page) != 1)
++ buffer_info->page = NULL;
++ else
++ get_page(buffer_info->page);
++
++ skb->len += length;
++ skb->data_len += length;
++ skb->truesize += length;
++ }
++
++ if (!(staterr & E1000_RXD_STAT_EOP)) {
++ buffer_info->skb = next_buffer->skb;
++ buffer_info->dma = next_buffer->dma;
++ next_buffer->skb = skb;
++ next_buffer->dma = 0;
++ goto next_desc;
++ }
++send_up:
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
++ if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
++ dev_kfree_skb_irq(skb);
++ goto next_desc;
++ }
++
++#ifdef SIOCSHWTSTAMP
++ igb_rx_hwtstamp(adapter, staterr, skb);
++#endif
++ total_bytes += skb->len;
++ total_packets++;
++
++ igb_rx_checksum_adv(rx_ring, staterr, skb);
++
++#ifndef ETH_TYPE_TRANS_SETS_DEV
++ skb->dev = netdev;
++#endif
++ skb->protocol = eth_type_trans(skb, netdev);
++
++ igb_receive_skb(rx_ring, staterr, rx_desc, skb);
++
++ netdev->last_rx = jiffies;
++
++next_desc:
++ rx_desc->wb.upper.status_error = 0;
++
++ /* return some buffers to hardware, one at a time is too slow */
++ if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
++ if (igb_alloc_rx_buffers_adv(rx_ring, cleaned_count))
++ adapter->alloc_rx_buff_failed++;
++ cleaned_count = 0;
++ }
++
++ /* use prefetched values */
++ rx_desc = next_rxd;
++ buffer_info = next_buffer;
++ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
++ }
++
++ rx_ring->next_to_clean = i;
++ cleaned_count = IGB_DESC_UNUSED(rx_ring);
++
++#ifdef IGB_LRO
++ if (rx_ring->lro_used) {
++ lro_flush_all(&rx_ring->lro_mgr);
++ rx_ring->lro_used = FALSE;
++ }
++#endif
++
++ if (cleaned_count)
++ if (igb_alloc_rx_buffers_adv(rx_ring, cleaned_count))
++ adapter->alloc_rx_buff_failed++;
++
++ rx_ring->total_packets += total_packets;
++ rx_ring->total_bytes += total_bytes;
++ rx_ring->stats.packets += total_packets;
++ rx_ring->stats.bytes += total_bytes;
++ adapter->net_stats.rx_bytes += total_bytes;
++ adapter->net_stats.rx_packets += total_packets;
++ return cleaned;
++}
++
++#ifdef SIOCGMIIPHY
++/**
++ * igb_mii_ioctl -
++ * @netdev:
++ * @ifreq:
++ * @cmd:
++ **/
++static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct mii_ioctl_data *data = if_mii(ifr);
++
++ if (adapter->hw.phy.media_type != e1000_media_type_copper)
++ return -EOPNOTSUPP;
++
++ switch (cmd) {
++ case SIOCGMIIPHY:
++ data->phy_id = adapter->hw.phy.addr;
++ break;
++ case SIOCGMIIREG:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
++ &data->val_out))
++ return -EIO;
++ break;
++ case SIOCSMIIREG:
++ default:
++ return -EOPNOTSUPP;
++ }
++ return E1000_SUCCESS;
++}
++
++#endif
++#ifdef SIOCSHWTSTAMP
++/**
++ * igb_hwtstamp_ioctl - control hardware time stamping
++ * @netdev:
++ * @ifreq:
++ * @cmd:
++ *
++ * Outgoing time stamping can be enabled and disabled. Play nice and
++ * disable it when requested, although it shouldn't case any overhead
++ * when no packet needs it. At most one packet in the queue may be
++ * marked for time stamping, otherwise it would be impossible to tell
++ * for sure to which packet the hardware time stamp belongs.
++ *
++ * Incoming time stamping has to be configured via the hardware
++ * filters. Not all combinations are supported, in particular event
++ * type has to be specified. Matching the kind of event packet is
++ * not supported, with the exception of "all V2 events regardless of
++ * level 2 or 4".
++ *
++ **/
++static int igb_hwtstamp_ioctl(struct net_device *netdev,
++ struct ifreq *ifr, int cmd)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ struct hwtstamp_config config;
++ u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
++ u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
++ u32 tsync_rx_cfg = 0;
++ bool is_l4 = false;
++ bool is_l2 = false;
++ u32 regval;
++
++ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
++ return -EFAULT;
++
++ /* reserved for future extensions */
++ if (config.flags)
++ return -EINVAL;
++
++ switch (config.tx_type) {
++ case HWTSTAMP_TX_OFF:
++ tsync_tx_ctl = 0;
++ case HWTSTAMP_TX_ON:
++ break;
++ default:
++ return -ERANGE;
++ }
++
++ switch (config.rx_filter) {
++ case HWTSTAMP_FILTER_NONE:
++ tsync_rx_ctl = 0;
++ break;
++ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
++ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
++ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
++ case HWTSTAMP_FILTER_ALL:
++ /*
++ * register TSYNCRXCFG must be set, therefore it is not
++ * possible to time stamp both Sync and Delay_Req messages
++ * => fall back to time stamping all packets
++ */
++ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
++ config.rx_filter = HWTSTAMP_FILTER_ALL;
++ break;
++ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
++ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
++ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
++ is_l4 = true;
++ break;
++ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
++ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
++ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
++ is_l4 = true;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
++ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
++ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
++ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
++ is_l2 = true;
++ is_l4 = true;
++ config.rx_filter = HWTSTAMP_FILTER_SOME;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
++ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
++ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
++ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
++ is_l2 = true;
++ is_l4 = true;
++ config.rx_filter = HWTSTAMP_FILTER_SOME;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_EVENT:
++ case HWTSTAMP_FILTER_PTP_V2_SYNC:
++ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
++ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
++ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
++ is_l2 = true;
++ break;
++ default:
++ return -ERANGE;
++ }
++
++ if (hw->mac.type == e1000_82575) {
++ if (tsync_rx_ctl | tsync_tx_ctl)
++ return -EINVAL;
++ return 0;
++ }
++
++ /* enable/disable TX */
++ regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
++ regval &= ~E1000_TSYNCTXCTL_ENABLED;
++ regval |= tsync_tx_ctl;
++ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval);
++
++ /* enable/disable RX */
++ regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
++ regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
++ regval |= tsync_rx_ctl;
++ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval);
++
++ /* define which PTP packets are time stamped */
++ E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg);
++
++ /* define ethertype filter for timestamped packets */
++ if (is_l2)
++ E1000_WRITE_REG(hw, E1000_ETQF(3),
++ (E1000_ETQF_FILTER_ENABLE | /* enable filter */
++ E1000_ETQF_1588 | /* enable timestamping */
++ ETH_P_1588)); /* 1588 eth protocol type */
++ else
++ E1000_WRITE_REG(hw, E1000_ETQF(3), 0);
++
++#define PTP_PORT 319
++ /* L4 Queue Filter[3]: filter by destination port and protocol */
++ if (is_l4) {
++ u32 ftqf = (IPPROTO_UDP /* UDP */
++ | E1000_FTQF_VF_BP /* VF not compared */
++ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
++ | E1000_FTQF_MASK); /* mask all inputs */
++ ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
++
++ E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_PORT));
++ E1000_WRITE_REG(hw, E1000_IMIREXT(3),
++ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
++ if (hw->mac.type == e1000_82576) {
++ /* enable source port check */
++ E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_PORT));
++ ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
++ }
++ E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf);
++ } else {
++ E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK);
++ }
++ E1000_WRITE_FLUSH(hw);
++
++ adapter->hwtstamp_config = config;
++
++ /* clear TX/RX time stamp registers, just to be sure */
++ regval = E1000_READ_REG(hw, E1000_TXSTMPH);
++ regval = E1000_READ_REG(hw, E1000_RXSTMPH);
++
++ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
++ -EFAULT : 0;
++}
++
++#endif
++/**
++ * igb_ioctl -
++ * @netdev:
++ * @ifreq:
++ * @cmd:
++ **/
++static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
++{
++ switch (cmd) {
++#ifdef SIOCGMIIPHY
++ case SIOCGMIIPHY:
++ case SIOCGMIIREG:
++ case SIOCSMIIREG:
++ return igb_mii_ioctl(netdev, ifr, cmd);
++#endif
++#ifdef SIOCSHWTSTAMP
++ case SIOCSHWTSTAMP:
++ return igb_hwtstamp_ioctl(netdev, ifr, cmd);
++#endif
++#ifdef ETHTOOL_OPS_COMPAT
++ case SIOCETHTOOL:
++ return ethtool_ioctl(ifr);
++#endif
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
++{
++ struct igb_adapter *adapter = hw->back;
++ u16 cap_offset;
++
++ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
++ if (!cap_offset)
++ return -E1000_ERR_CONFIG;
++
++ pci_read_config_word(adapter->pdev, cap_offset + reg, value);
++
++ return E1000_SUCCESS;
++}
++
++s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
++{
++ struct igb_adapter *adapter = hw->back;
++ u16 cap_offset;
++
++ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
++ if (!cap_offset)
++ return -E1000_ERR_CONFIG;
++
++ pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
++
++ return E1000_SUCCESS;
++}
++
++static void igb_vlan_rx_register(struct net_device *netdev,
++ struct vlan_group *grp)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ u32 ctrl, rctl;
++
++ igb_irq_disable(adapter);
++ adapter->vlgrp = grp;
++
++ if (grp) {
++ /* enable VLAN tag insert/strip */
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl |= E1000_CTRL_VME;
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++
++ /* Disable CFI check */
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
++ rctl &= ~E1000_RCTL_CFIEN;
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
++ } else {
++ /* disable VLAN tag insert/strip */
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl &= ~E1000_CTRL_VME;
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++ }
++
++ igb_set_rlpml(adapter);
++
++ if (!test_bit(__IGB_DOWN, &adapter->state))
++ igb_irq_enable(adapter);
++}
++
++static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ int pf_id = adapter->vfs_allocated_count;
++#ifndef HAVE_NETDEV_VLAN_FEATURES
++ struct net_device *v_netdev;
++#endif
++
++ /* attempt to add filter to vlvf array */
++ igb_vlvf_set(adapter, vid, TRUE, pf_id);
++
++ /* add the filter since PF can receive vlans w/o entry in vlvf */
++ igb_vfta_set(hw, vid, TRUE);
++#ifndef HAVE_NETDEV_VLAN_FEATURES
++ /* Copy feature flags from netdev to the vlan netdev for this vid.
++ * This allows things like TSO to bubble down to our vlan device.
++ */
++ v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
++ v_netdev->features |= adapter->netdev->features;
++ vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
++#endif
++}
++
++static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ int pf_id = adapter->vfs_allocated_count;
++ s32 err;
++
++ igb_irq_disable(adapter);
++ vlan_group_set_device(adapter->vlgrp, vid, NULL);
++
++ if (!test_bit(__IGB_DOWN, &adapter->state))
++ igb_irq_enable(adapter);
++
++ /* remove vlan from VLVF table array */
++ err = igb_vlvf_set(adapter, vid, FALSE, pf_id);
++
++ /* if vid was not present in VLVF just remove it from table */
++ if (err)
++ igb_vfta_set(hw, vid, FALSE);
++}
++
++static void igb_restore_vlan(struct igb_adapter *adapter)
++{
++ igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
++
++ if (adapter->vlgrp) {
++ u16 vid;
++ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
++ if (!vlan_group_get_device(adapter->vlgrp, vid))
++ continue;
++ igb_vlan_rx_add_vid(adapter->netdev, vid);
++ }
++ }
++}
++
++int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
++{
++ struct e1000_mac_info *mac = &adapter->hw.mac;
++
++ mac->autoneg = 0;
++
++ switch (spddplx) {
++ case SPEED_10 + DUPLEX_HALF:
++ mac->forced_speed_duplex = ADVERTISE_10_HALF;
++ break;
++ case SPEED_10 + DUPLEX_FULL:
++ mac->forced_speed_duplex = ADVERTISE_10_FULL;
++ break;
++ case SPEED_100 + DUPLEX_HALF:
++ mac->forced_speed_duplex = ADVERTISE_100_HALF;
++ break;
++ case SPEED_100 + DUPLEX_FULL:
++ mac->forced_speed_duplex = ADVERTISE_100_FULL;
++ break;
++ case SPEED_1000 + DUPLEX_FULL:
++ mac->autoneg = 1;
++ adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
++ break;
++ case SPEED_1000 + DUPLEX_HALF: /* not supported */
++ default:
++ DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
++ return -EINVAL;
++ }
++ return 0;
++}
++
++#ifdef USE_REBOOT_NOTIFIER
++/* only want to do this for 2.4 kernels? */
++static int igb_notify_reboot(struct notifier_block *nb, unsigned long event,
++ void *p)
++{
++ struct pci_dev *pdev = NULL;
++
++ switch (event) {
++ case SYS_DOWN:
++ case SYS_HALT:
++ case SYS_POWER_OFF:
++ while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
++ if (pci_dev_driver(pdev) == &igb_driver)
++ igb_suspend(pdev, PMSG_SUSPEND);
++ }
++ }
++ return NOTIFY_DONE;
++}
++
++#endif
++static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ u32 ctrl, rctl, status;
++ u32 wufc = adapter->wol;
++#ifdef CONFIG_PM
++ int retval = 0;
++#endif
++
++ netif_device_detach(netdev);
++
++ if (netif_running(netdev))
++ igb_close(netdev);
++
++ igb_clear_interrupt_scheme(adapter);
++
++#ifdef CONFIG_PM
++ retval = pci_save_state(pdev);
++ if (retval)
++ return retval;
++#endif
++
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ if (status & E1000_STATUS_LU)
++ wufc &= ~E1000_WUFC_LNKC;
++
++ if (wufc) {
++ igb_setup_rctl(adapter);
++ igb_set_rx_mode(netdev);
++
++ /* turn on all-multi mode if wake on multicast is enabled */
++ if (wufc & E1000_WUFC_MC) {
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
++ rctl |= E1000_RCTL_MPE;
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
++ }
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ /* phy power management enable */
++ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
++ ctrl |= E1000_CTRL_ADVD3WUC;
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++
++ /* Allow time for pending master requests to run */
++ e1000_disable_pcie_master(hw);
++
++ E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN);
++ E1000_WRITE_REG(hw, E1000_WUFC, wufc);
++ } else {
++ E1000_WRITE_REG(hw, E1000_WUC, 0);
++ E1000_WRITE_REG(hw, E1000_WUFC, 0);
++ }
++
++ *enable_wake = wufc || adapter->en_mng_pt;
++ if (!*enable_wake)
++ e1000_shutdown_fiber_serdes_link(hw);
++
++ /* Release control of h/w to f/w. If f/w is AMT enabled, this
++ * would have already happened in close and is redundant. */
++ igb_release_hw_control(adapter);
++
++ pci_disable_device(pdev);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ int retval;
++ bool wake;
++
++ retval = __igb_shutdown(pdev, &wake);
++ if (retval)
++ return retval;
++
++ if (wake) {
++ pci_prepare_to_sleep(pdev);
++ } else {
++ pci_wake_from_d3(pdev, false);
++ pci_set_power_state(pdev, PCI_D3hot);
++ }
++
++ return 0;
++}
++
++static int igb_resume(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ u32 err;
++
++ pci_set_power_state(pdev, PCI_D0);
++ pci_restore_state(pdev);
++ err = pci_enable_device_mem(pdev);
++ if (err) {
++ dev_err(&pdev->dev, "igb: Cannot enable PCI device "
++ "from suspend\n");
++ return err;
++ }
++ pci_set_master(pdev);
++
++ pci_enable_wake(pdev, PCI_D3hot, 0);
++ pci_enable_wake(pdev, PCI_D3cold, 0);
++
++ if (igb_init_interrupt_scheme(adapter)) {
++ DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
++ return -ENOMEM;
++ }
++
++ /* e1000_power_up_phy(adapter); */
++
++ igb_reset(adapter);
++
++ /* let the f/w know that the h/w is now under the control of the
++ * driver. */
++ igb_get_hw_control(adapter);
++
++ E1000_WRITE_REG(hw, E1000_WUS, ~0);
++
++ if (netif_running(netdev)) {
++ err = igb_open(netdev);
++ if (err)
++ return err;
++ }
++
++ netif_device_attach(netdev);
++
++ return 0;
++}
++#endif
++
++#ifndef USE_REBOOT_NOTIFIER
++static void igb_shutdown(struct pci_dev *pdev)
++{
++ bool wake;
++
++ __igb_shutdown(pdev, &wake);
++
++ if (system_state == SYSTEM_POWER_OFF) {
++ pci_wake_from_d3(pdev, wake);
++ pci_set_power_state(pdev, PCI_D3hot);
++ }
++}
++
++#endif
++#ifdef CONFIG_NET_POLL_CONTROLLER
++/*
++ * Polling 'interrupt' - used by things like netconsole to send skbs
++ * without having to re-enable interrupts. It's not called while
++ * the interrupt routine is executing.
++ */
++static void igb_netpoll(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ int i;
++
++ if (!adapter->msix_entries) {
++ struct igb_q_vector *q_vector = adapter->q_vector[0];
++ igb_irq_disable(adapter);
++ napi_schedule(&q_vector->napi);
++ return;
++ }
++
++ for (i = 0; i < adapter->num_q_vectors; i++) {
++ struct igb_q_vector *q_vector = adapter->q_vector[i];
++ E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value);
++ napi_schedule(&q_vector->napi);
++ }
++}
++#endif /* CONFIG_NET_POLL_CONTROLLER */
++
++#ifdef HAVE_PCI_ERS
++/**
++ * igb_io_error_detected - called when PCI error is detected
++ * @pdev: Pointer to PCI device
++ * @state: The current pci connection state
++ *
++ * This function is called after a PCI bus error affecting
++ * this device has been detected.
++ */
++static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
++ pci_channel_state_t state)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct igb_adapter *adapter = netdev_priv(netdev);
++
++ netif_device_detach(netdev);
++
++ if (state == pci_channel_io_perm_failure)
++ return PCI_ERS_RESULT_DISCONNECT;
++
++ if (netif_running(netdev))
++ igb_down(adapter);
++ pci_disable_device(pdev);
++
++ /* Request a slot slot reset. */
++ return PCI_ERS_RESULT_NEED_RESET;
++}
++
++/**
++ * igb_io_slot_reset - called after the pci bus has been reset.
++ * @pdev: Pointer to PCI device
++ *
++ * Restart the card from scratch, as if from a cold-boot. Implementation
++ * resembles the first-half of the igb_resume routine.
++ */
++static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ pci_ers_result_t result;
++
++ if (pci_enable_device_mem(pdev)) {
++ dev_err(&pdev->dev,
++ "Cannot re-enable PCI device after reset.\n");
++ result = PCI_ERS_RESULT_DISCONNECT;
++ } else {
++ pci_set_master(pdev);
++ pci_restore_state(pdev);
++
++ pci_enable_wake(pdev, PCI_D3hot, 0);
++ pci_enable_wake(pdev, PCI_D3cold, 0);
++
++ igb_reset(adapter);
++ E1000_WRITE_REG(hw, E1000_WUS, ~0);
++ result = PCI_ERS_RESULT_RECOVERED;
++ }
++
++ pci_cleanup_aer_uncorrect_error_status(pdev);
++
++ return result;
++}
++
++/**
++ * igb_io_resume - called when traffic can start flowing again.
++ * @pdev: Pointer to PCI device
++ *
++ * This callback is called when the error recovery driver tells us that
++ * its OK to resume normal operation. Implementation resembles the
++ * second-half of the igb_resume routine.
++ */
++static void igb_io_resume(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct igb_adapter *adapter = netdev_priv(netdev);
++
++ if (netif_running(netdev)) {
++ if (igb_up(adapter)) {
++ dev_err(&pdev->dev, "igb_up failed after reset\n");
++ return;
++ }
++ }
++
++ netif_device_attach(netdev);
++
++ /* let the f/w know that the h/w is now under the control of the
++ * driver. */
++ igb_get_hw_control(adapter);
++}
++
++#endif /* HAVE_PCI_ERS */
++static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
++ u8 qsel)
++{
++ u32 rar_low, rar_high;
++ struct e1000_hw *hw = &adapter->hw;
++
++ /* HW expects these in little endian so we reverse the byte order
++ * from network order (big endian) to little endian
++ */
++ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
++ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
++
++ /* Indicate to hardware the Address is Valid. */
++ rar_high |= E1000_RAH_AV;
++
++ if (hw->mac.type == e1000_82575)
++ rar_high |= E1000_RAH_POOL_1 * qsel;
++ else
++ rar_high |= E1000_RAH_POOL_1 << qsel;
++
++ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
++ E1000_WRITE_FLUSH(hw);
++ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
++ E1000_WRITE_FLUSH(hw);
++}
++
++int igb_set_vf_mac(struct igb_adapter *adapter,
++ int vf, unsigned char *mac_addr)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ /* VF MAC addresses start at end of receive addresses and moves
++ * torwards the first, as a result a collision should not be possible */
++ int rar_entry = hw->mac.rar_entry_count - (vf + 1);
++
++ memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, 6);
++
++ igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
++
++ return 0;
++}
++
++static void igb_vmm_control(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++
++ /* replication is not supported for 82575 */
++ if (hw->mac.type == e1000_82575)
++ return;
++
++ if (adapter->vfs_allocated_count || adapter->VMDQ_queues) {
++ e1000_vmdq_set_loopback_pf(hw, true);
++ e1000_vmdq_set_replication_pf(hw, true);
++ } else {
++ e1000_vmdq_set_loopback_pf(hw, false);
++ e1000_vmdq_set_replication_pf(hw, false);
++ }
++}
++
++static void igb_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
++{
++ unsigned char my_mac_addr[6];
++ unsigned char oui[OUI_LEN] = {0x02, 0xAA, 0x00};
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ unsigned int vfn = (event_mask & 7);
++
++ bool enable = ((event_mask & 0x10000000U) != 0);
++
++ if (enable) {
++ random_ether_addr(my_mac_addr);
++ memcpy(my_mac_addr, oui, OUI_LEN);
++ printk(KERN_INFO "IOV1: VF %d is enabled\n", vfn);
++ printk(KERN_INFO "Assigned MAC: "
++ "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
++ my_mac_addr[0], my_mac_addr[1], my_mac_addr[2],
++ my_mac_addr[3], my_mac_addr[4], my_mac_addr[5]);
++ igb_set_vf_mac(adapter, vfn, my_mac_addr);
++ } else {
++ printk(KERN_INFO "IOV1: VF %d is disabled\n", vfn);
++ }
++}
++
++/* igb_main.c */
+Index: linux-2.6.22/drivers/net/igb/igb_param.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/igb_param.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,599 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++
++#include <linux/netdevice.h>
++
++#include "igb.h"
++
++/* This is the only thing that needs to be changed to adjust the
++ * maximum number of ports that the driver can manage.
++ */
++
++#define IGB_MAX_NIC 32
++
++#define OPTION_UNSET -1
++#define OPTION_DISABLED 0
++#define OPTION_ENABLED 1
++
++/* All parameters are treated the same, as an integer array of values.
++ * This macro just reduces the need to repeat the same declaration code
++ * over and over (plus this helps to avoid typo bugs).
++ */
++
++#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET }
++#ifndef module_param_array
++/* Module Parameters are always initialized to -1, so that the driver
++ * can tell the difference between no user specified value or the
++ * user asking for the default value.
++ * The true default values are loaded in when igb_check_options is called.
++ *
++ * This is a GCC extension to ANSI C.
++ * See the item "Labeled Elements in Initializers" in the section
++ * "Extensions to the C Language Family" of the GCC documentation.
++ */
++
++#define IGB_PARAM(X, desc) \
++ static const int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
++ MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \
++ MODULE_PARM_DESC(X, desc);
++#else
++#define IGB_PARAM(X, desc) \
++ static int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
++ static unsigned int num_##X; \
++ module_param_array_named(X, X, int, &num_##X, 0); \
++ MODULE_PARM_DESC(X, desc);
++#endif
++
++/* Interrupt Throttle Rate (interrupts/sec)
++ *
++ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
++ */
++IGB_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
++#define DEFAULT_ITR 3
++#define MAX_ITR 100000
++#define MIN_ITR 120
++/* IntMode (Interrupt Mode)
++ *
++ * Valid Range: 0 - 2
++ *
++ * Default Value: 2 (MSI-X)
++ */
++IGB_PARAM(IntMode, "Interrupt Mode");
++#define MAX_INTMODE IGB_INT_MODE_MSIX
++#define MIN_INTMODE IGB_INT_MODE_LEGACY
++
++/* LLIPort (Low Latency Interrupt TCP Port)
++ *
++ * Valid Range: 0 - 65535
++ *
++ * Default Value: 0 (disabled)
++ */
++IGB_PARAM(LLIPort, "Low Latency Interrupt TCP Port");
++
++#define DEFAULT_LLIPORT 0
++#define MAX_LLIPORT 0xFFFF
++#define MIN_LLIPORT 0
++
++/* LLIPush (Low Latency Interrupt on TCP Push flag)
++ *
++ * Valid Range: 0, 1
++ *
++ * Default Value: 0 (disabled)
++ */
++IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag");
++
++#define DEFAULT_LLIPUSH 0
++#define MAX_LLIPUSH 1
++#define MIN_LLIPUSH 0
++
++/* LLISize (Low Latency Interrupt on Packet Size)
++ *
++ * Valid Range: 0 - 1500
++ *
++ * Default Value: 0 (disabled)
++ */
++IGB_PARAM(LLISize, "Low Latency Interrupt on Packet Size");
++
++#define DEFAULT_LLISIZE 0
++#define MAX_LLISIZE 1500
++#define MIN_LLISIZE 0
++
++#ifdef IGB_LRO
++/* LROAggr (Large Receive Offload)
++ *
++ * Valid Range: 2 - 44
++ *
++ * Default Value: 32
++ */
++IGB_PARAM(LROAggr, "LRO - Maximum packets to aggregate");
++
++#define DEFAULT_LRO_AGGR 32
++#define MAX_LRO_AGGR 44
++#define MIN_LRO_AGGR 2
++#endif
++
++/* RSS (Enable RSS multiqueue receive)
++ *
++ * Valid Range: 0 - 8
++ *
++ * Default Value: 1
++ */
++IGB_PARAM(RSS, "RSS - multiqueue receive count");
++
++#define DEFAULT_RSS 1
++#define MAX_RSS ((adapter->hw.mac.type == e1000_82575) ? 4 : 8)
++#define MIN_RSS 0
++
++/* VMDQ (Enable VMDq multiqueue receive)
++ *
++ * Valid Range: 0 - 8
++ *
++ * Default Value: 0
++ */
++IGB_PARAM(VMDQ, "VMDQ - VMDq multiqueue receive");
++
++#define DEFAULT_VMDQ 0
++#define MAX_VMDQ MAX_RSS
++#define MIN_VMDQ 0
++
++#ifdef CONFIG_PCI_IOV
++/* max_vfs (Enable SR-IOV VF devices)
++ *
++ * Valid Range: 0 - 7
++ *
++ * Default Value: 0
++ */
++IGB_PARAM(max_vfs, "max_vfs - SR-IOV VF devices");
++
++#define DEFAULT_SRIOV 0
++#define MAX_SRIOV 7
++#define MIN_SRIOV 0
++
++#endif /* CONFIG_PCI_IOV */
++
++/* QueuePairs (Enable TX/RX queue pairs for interrupt handling)
++ *
++ * Valid Range: 0 - 1
++ *
++ * Default Value: 1
++ */
++IGB_PARAM(QueuePairs, "QueuePairs - TX/RX queue pairs for interrupt handling");
++
++#define DEFAULT_QUEUE_PAIRS 1
++#define MAX_QUEUE_PAIRS 1
++#define MIN_QUEUE_PAIRS 0
++
++struct igb_option {
++ enum { enable_option, range_option, list_option } type;
++ const char *name;
++ const char *err;
++ int def;
++ union {
++ struct { /* range_option info */
++ int min;
++ int max;
++ } r;
++ struct { /* list_option info */
++ int nr;
++ struct igb_opt_list { int i; char *str; } *p;
++ } l;
++ } arg;
++};
++
++static int __devinit igb_validate_option(unsigned int *value,
++ struct igb_option *opt,
++ struct igb_adapter *adapter)
++{
++ if (*value == OPTION_UNSET) {
++ *value = opt->def;
++ return 0;
++ }
++
++ switch (opt->type) {
++ case enable_option:
++ switch (*value) {
++ case OPTION_ENABLED:
++ DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
++ return 0;
++ case OPTION_DISABLED:
++ DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
++ return 0;
++ }
++ break;
++ case range_option:
++ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
++ DPRINTK(PROBE, INFO,
++ "%s set to %d\n", opt->name, *value);
++ return 0;
++ }
++ break;
++ case list_option: {
++ int i;
++ struct igb_opt_list *ent;
++
++ for (i = 0; i < opt->arg.l.nr; i++) {
++ ent = &opt->arg.l.p[i];
++ if (*value == ent->i) {
++ if (ent->str[0] != '\0')
++ DPRINTK(PROBE, INFO, "%s\n", ent->str);
++ return 0;
++ }
++ }
++ }
++ break;
++ default:
++ BUG();
++ }
++
++ DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n",
++ opt->name, *value, opt->err);
++ *value = opt->def;
++ return -1;
++}
++
++/**
++ * igb_check_options - Range Checking for Command Line Parameters
++ * @adapter: board private structure
++ *
++ * This routine checks all command line parameters for valid user
++ * input. If an invalid value is given, or if no user specified
++ * value exists, a default value is used. The final value is stored
++ * in a variable in the adapter structure.
++ **/
++
++void __devinit igb_check_options(struct igb_adapter *adapter)
++{
++ int bd = adapter->bd_number;
++
++ if (bd >= IGB_MAX_NIC) {
++ DPRINTK(PROBE, NOTICE,
++ "Warning: no configuration for board #%d\n", bd);
++ DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
++#ifndef module_param_array
++ bd = IGB_MAX_NIC;
++#endif
++ }
++
++ { /* Interrupt Throttling Rate */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "Interrupt Throttling Rate (ints/sec)",
++ .err = "using default of " __MODULE_STRING(DEFAULT_ITR),
++ .def = DEFAULT_ITR,
++ .arg = { .r = { .min = MIN_ITR,
++ .max = MAX_ITR } }
++ };
++
++#ifdef module_param_array
++ if (num_InterruptThrottleRate > bd) {
++#endif
++ adapter->itr = InterruptThrottleRate[bd];
++ switch (adapter->itr) {
++ case 0:
++ DPRINTK(PROBE, INFO, "%s turned off\n",
++ opt.name);
++ break;
++ case 1:
++ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
++ opt.name);
++ adapter->itr_setting = adapter->itr;
++ adapter->itr = IGB_START_ITR;
++ break;
++ case 3:
++ DPRINTK(PROBE, INFO,
++ "%s set to dynamic conservative mode\n",
++ opt.name);
++ adapter->itr_setting = adapter->itr;
++ adapter->itr = IGB_START_ITR;
++ break;
++ default:
++ igb_validate_option(&adapter->itr, &opt,
++ adapter);
++ /* Save the setting, because the dynamic bits
++ * change itr. In case of invalid user value,
++ * default to conservative mode, else need to
++ * clear the lower two bits because they are
++ * used as control */
++ if (adapter->itr == 3) {
++ adapter->itr_setting = adapter->itr;
++ adapter->itr = IGB_START_ITR;
++ } else {
++ adapter->itr = 1000000000 / (adapter->itr * 256);
++ adapter->itr_setting = adapter->itr & ~3;
++ }
++ break;
++ }
++#ifdef module_param_array
++ } else {
++ adapter->itr_setting = opt.def;
++ adapter->itr = 8000;
++ }
++#endif
++ }
++ { /* Interrupt Mode */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "Interrupt Mode",
++ .err = "defaulting to 2 (MSI-X)",
++ .def = IGB_INT_MODE_MSIX,
++ .arg = { .r = { .min = MIN_INTMODE,
++ .max = MAX_INTMODE } }
++ };
++
++#ifdef module_param_array
++ if (num_IntMode > bd) {
++#endif
++ unsigned int int_mode = IntMode[bd];
++ igb_validate_option(&int_mode, &opt, adapter);
++ adapter->int_mode = int_mode;
++#ifdef module_param_array
++ } else {
++ adapter->int_mode = opt.def;
++ }
++#endif
++ }
++ { /* Low Latency Interrupt TCP Port */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "Low Latency Interrupt TCP Port",
++ .err = "using default of " __MODULE_STRING(DEFAULT_LLIPORT),
++ .def = DEFAULT_LLIPORT,
++ .arg = { .r = { .min = MIN_LLIPORT,
++ .max = MAX_LLIPORT } }
++ };
++
++#ifdef module_param_array
++ if (num_LLIPort > bd) {
++#endif
++ adapter->lli_port = LLIPort[bd];
++ if (adapter->lli_port) {
++ igb_validate_option(&adapter->lli_port, &opt,
++ adapter);
++ } else {
++ DPRINTK(PROBE, INFO, "%s turned off\n",
++ opt.name);
++ }
++#ifdef module_param_array
++ } else {
++ adapter->lli_port = opt.def;
++ }
++#endif
++ }
++ { /* Low Latency Interrupt on Packet Size */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "Low Latency Interrupt on Packet Size",
++ .err = "using default of " __MODULE_STRING(DEFAULT_LLISIZE),
++ .def = DEFAULT_LLISIZE,
++ .arg = { .r = { .min = MIN_LLISIZE,
++ .max = MAX_LLISIZE } }
++ };
++
++#ifdef module_param_array
++ if (num_LLISize > bd) {
++#endif
++ adapter->lli_size = LLISize[bd];
++ if (adapter->lli_size) {
++ igb_validate_option(&adapter->lli_size, &opt,
++ adapter);
++ } else {
++ DPRINTK(PROBE, INFO, "%s turned off\n",
++ opt.name);
++ }
++#ifdef module_param_array
++ } else {
++ adapter->lli_size = opt.def;
++ }
++#endif
++ }
++ { /* Low Latency Interrupt on TCP Push flag */
++ struct igb_option opt = {
++ .type = enable_option,
++ .name = "Low Latency Interrupt on TCP Push flag",
++ .err = "defaulting to Disabled",
++ .def = OPTION_DISABLED
++ };
++
++#ifdef module_param_array
++ if (num_LLIPush > bd) {
++#endif
++ unsigned int lli_push = LLIPush[bd];
++ igb_validate_option(&lli_push, &opt, adapter);
++ adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0;
++#ifdef module_param_array
++ } else {
++ adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0;
++ }
++#endif
++ }
++#ifdef IGB_LRO
++ { /* Large Receive Offload - Maximum packets to aggregate */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "LRO - Maximum packets to aggregate",
++ .err = "using default of " __MODULE_STRING(DEFAULT_LRO_AGGR),
++ .def = DEFAULT_LRO_AGGR,
++ .arg = { .r = { .min = MIN_LRO_AGGR,
++ .max = MAX_LRO_AGGR } }
++ };
++
++#ifdef module_param_array
++ if (num_LROAggr > bd) {
++#endif
++ adapter->lro_max_aggr = LROAggr[bd];
++ igb_validate_option(&adapter->lro_max_aggr, &opt, adapter);
++
++#ifdef module_param_array
++ } else {
++ adapter->lro_max_aggr = opt.def;
++ }
++#endif
++ }
++#endif /* IGB_LRO */
++#ifdef CONFIG_PCI_IOV
++ { /* SRIOV - Enable SR-IOV VF devices */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "max_vfs - SR-IOV VF devices",
++ .err = "using default of " __MODULE_STRING(DEFAULT_SRIOV),
++ .def = DEFAULT_SRIOV,
++ .arg = { .r = { .min = MIN_SRIOV,
++ .max = MAX_SRIOV } }
++ };
++
++#ifdef module_param_array
++ if (num_max_vfs > bd) {
++#endif
++ adapter->vfs_allocated_count = max_vfs[bd];
++ igb_validate_option(&adapter->vfs_allocated_count, &opt, adapter);
++
++#ifdef module_param_array
++ } else {
++ adapter->vfs_allocated_count = opt.def;
++ }
++#endif
++ if (adapter->hw.mac.type != e1000_82576 && adapter->vfs_allocated_count) {
++ adapter->vfs_allocated_count = 0;
++ DPRINTK(PROBE, INFO, "SR-IOV option max_vfs only supported on 82576.\n");
++ }
++ }
++#endif /* CONFIG_PCI_IOV */
++ { /* VMDQ - Enable VMDq multiqueue receive */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "VMDQ - VMDq multiqueue receive count",
++ .err = "using default of " __MODULE_STRING(DEFAULT_VMDQ),
++ .def = DEFAULT_VMDQ,
++ .arg = { .r = { .min = MIN_VMDQ,
++ .max = (MAX_VMDQ - adapter->vfs_allocated_count) } }
++ };
++#ifdef module_param_array
++ if (num_VMDQ > bd) {
++#endif
++ adapter->VMDQ_queues = VMDQ[bd];
++ if (adapter->vfs_allocated_count && !adapter->VMDQ_queues) {
++ DPRINTK(PROBE, INFO, "Enabling SR-IOV requires VMDq be set to at least 1\n");
++ adapter->VMDQ_queues = 1;
++ }
++ igb_validate_option(&adapter->VMDQ_queues, &opt, adapter);
++
++#ifdef module_param_array
++ } else {
++ if (!adapter->vfs_allocated_count)
++ adapter->VMDQ_queues = opt.def;
++ else
++ adapter->VMDQ_queues = 1;
++ }
++#endif
++ }
++ { /* RSS - Enable RSS multiqueue receives */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "RSS - RSS multiqueue receive count",
++ .err = "using default of " __MODULE_STRING(DEFAULT_RSS),
++ .def = DEFAULT_RSS,
++ .arg = { .r = { .min = MIN_RSS,
++ .max = MAX_RSS } }
++ };
++
++ if (adapter->VMDQ_queues) {
++ switch (adapter->hw.mac.type) {
++ case e1000_82576:
++ opt.arg.r.max = 2;
++ break;
++ case e1000_82575:
++ if (adapter->VMDQ_queues == 2)
++ opt.arg.r.max = 3;
++ if (adapter->VMDQ_queues <= 2)
++ break;
++ default:
++ opt.arg.r.max = 1;
++ break;
++ }
++ }
++
++#ifdef module_param_array
++ if (num_RSS > bd) {
++#endif
++ adapter->RSS_queues = RSS[bd];
++ switch (adapter->RSS_queues) {
++ case 1:
++ break;
++ default:
++ igb_validate_option(&adapter->RSS_queues, &opt, adapter);
++ if (adapter->RSS_queues)
++ break;
++ case 0:
++ adapter->RSS_queues = min_t(u32, opt.arg.r.max, num_online_cpus());
++ break;
++ }
++#ifdef module_param_array
++ } else {
++ adapter->RSS_queues = opt.def;
++ }
++#endif
++ }
++ { /* QueuePairs - Enable TX/RX queue pairs for interrupt handling */
++ struct igb_option opt = {
++ .type = enable_option,
++ .name = "QueuePairs - TX/RX queue pairs for interrupt handling",
++ .err = "defaulting to Enabled",
++ .def = OPTION_ENABLED
++ };
++
++#ifdef module_param_array
++ if (num_QueuePairs > bd) {
++#endif
++ unsigned int qp = QueuePairs[bd];
++ /*
++ * we must enable queue pairs if the number of queues
++ * exceeds the number of avaialble interrupts. We are
++ * limited to 10, or 3 per unallocated vf.
++ */
++ if ((adapter->RSS_queues > 4) ||
++ (adapter->VMDQ_queues > 4) ||
++ ((adapter->RSS_queues > 1) &&
++ ((adapter->VMDQ_queues > 3) ||
++ (adapter->vfs_allocated_count > 6)))) {
++ if (qp == OPTION_DISABLED) {
++ qp = OPTION_ENABLED;
++ DPRINTK(PROBE, INFO,
++ "Number of queues exceeds available interrupts, %s\n",opt.err);
++ }
++ }
++ igb_validate_option(&qp, &opt, adapter);
++ adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0;
++
++#ifdef module_param_array
++ } else {
++ adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0;
++ }
++#endif
++ }
++}
++
+Index: linux-2.6.22/drivers/net/igb/igb_regtest.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/igb_regtest.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,135 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++/* ethtool register test data */
++struct igb_reg_test {
++ u16 reg;
++ u16 reg_offset;
++ u16 array_len;
++ u16 test_type;
++ u32 mask;
++ u32 write;
++};
++
++/* In the hardware, registers are laid out either singly, in arrays
++ * spaced 0x100 bytes apart, or in contiguous tables. We assume
++ * most tests take place on arrays or single registers (handled
++ * as a single-element array) and special-case the tables.
++ * Table tests are always pattern tests.
++ *
++ * We also make provision for some required setup steps by specifying
++ * registers to be written without any read-back testing.
++ */
++
++#define PATTERN_TEST 1
++#define SET_READ_TEST 2
++#define WRITE_NO_TEST 3
++#define TABLE32_TEST 4
++#define TABLE64_TEST_LO 5
++#define TABLE64_TEST_HI 6
++
++/* 82576 reg test */
++static struct igb_reg_test reg_test_82576[] = {
++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ /* Enable all queues before testing. */
++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
++ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
++ /* RDH is read-only for 82576, only test RDT. */
++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
++ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RA, 0, 16, TABLE64_TEST_LO,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RA, 0, 16, TABLE64_TEST_HI,
++ 0x83FFFFFF, 0xFFFFFFFF },
++ { E1000_RA2, 0, 8, TABLE64_TEST_LO,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RA2, 0, 8, TABLE64_TEST_HI,
++ 0x83FFFFFF, 0xFFFFFFFF },
++ { E1000_MTA, 0, 128, TABLE32_TEST,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { 0, 0, 0, 0 }
++};
++
++/* 82575 register test */
++static struct igb_reg_test reg_test_82575[] = {
++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
++ /* Enable all four RX queues before testing. */
++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
++ /* RDH is read-only for 82575, only test RDT. */
++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
++ { E1000_RA, 0, 16, TABLE64_TEST_LO,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RA, 0, 16, TABLE64_TEST_HI,
++ 0x800FFFFF, 0xFFFFFFFF },
++ { E1000_MTA, 0, 128, TABLE32_TEST,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { 0, 0, 0, 0 }
++};
++
++
+Index: linux-2.6.22/drivers/net/igb/kcompat.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/kcompat.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,552 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "igb.h"
++#include "kcompat.h"
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) )
++struct sk_buff *
++_kc_skb_pad(struct sk_buff *skb, int pad)
++{
++ struct sk_buff *nskb;
++
++ /* If the skbuff is non linear tailroom is always zero.. */
++ if(skb_tailroom(skb) >= pad)
++ {
++ memset(skb->data+skb->len, 0, pad);
++ return skb;
++ }
++
++ nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
++ kfree_skb(skb);
++ if(nskb)
++ memset(nskb->data+nskb->len, 0, pad);
++ return nskb;
++}
++#endif /* < 2.4.21 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
++
++/**************************************/
++/* PCI DMA MAPPING */
++
++#if defined(CONFIG_HIGHMEM)
++
++#ifndef PCI_DRAM_OFFSET
++#define PCI_DRAM_OFFSET 0
++#endif
++
++u64
++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
++ size_t size, int direction)
++{
++ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
++ PCI_DRAM_OFFSET);
++}
++
++#else /* CONFIG_HIGHMEM */
++
++u64
++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
++ size_t size, int direction)
++{
++ return pci_map_single(dev, (void *)page_address(page) + offset, size,
++ direction);
++}
++
++#endif /* CONFIG_HIGHMEM */
++
++void
++_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
++ int direction)
++{
++ return pci_unmap_single(dev, dma_addr, size, direction);
++}
++
++#endif /* 2.4.13 => 2.4.3 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
++
++/**************************************/
++/* PCI DRIVER API */
++
++int
++_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
++{
++ if (!pci_dma_supported(dev, mask))
++ return -EIO;
++ dev->dma_mask = mask;
++ return 0;
++}
++
++int
++_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
++{
++ int i;
++
++ for (i = 0; i < 6; i++) {
++ if (pci_resource_len(dev, i) == 0)
++ continue;
++
++ if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
++ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
++ pci_release_regions(dev);
++ return -EBUSY;
++ }
++ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
++ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
++ pci_release_regions(dev);
++ return -EBUSY;
++ }
++ }
++ }
++ return 0;
++}
++
++void
++_kc_pci_release_regions(struct pci_dev *dev)
++{
++ int i;
++
++ for (i = 0; i < 6; i++) {
++ if (pci_resource_len(dev, i) == 0)
++ continue;
++
++ if (pci_resource_flags(dev, i) & IORESOURCE_IO)
++ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
++
++ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
++ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
++ }
++}
++
++/**************************************/
++/* NETWORK DRIVER API */
++
++struct net_device *
++_kc_alloc_etherdev(int sizeof_priv)
++{
++ struct net_device *dev;
++ int alloc_size;
++
++ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
++ dev = kmalloc(alloc_size, GFP_KERNEL);
++ if (!dev)
++ return NULL;
++ memset(dev, 0, alloc_size);
++
++ if (sizeof_priv)
++ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
++ dev->name[0] = '\0';
++ ether_setup(dev);
++
++ return dev;
++}
++
++int
++_kc_is_valid_ether_addr(u8 *addr)
++{
++ const char zaddr[6] = { 0, };
++
++ return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
++}
++
++#endif /* 2.4.3 => 2.4.0 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
++
++int
++_kc_pci_set_power_state(struct pci_dev *dev, int state)
++{
++ return 0;
++}
++
++int
++_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
++{
++ return 0;
++}
++
++#endif /* 2.4.6 => 2.4.3 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
++void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
++ int off, int size)
++{
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++ frag->page = page;
++ frag->page_offset = off;
++ frag->size = size;
++ skb_shinfo(skb)->nr_frags = i + 1;
++}
++
++/*
++ * Original Copyright:
++ * find_next_bit.c: fallback find next bit implementation
++ *
++ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
++ * Written by David Howells (dhowells@redhat.com)
++ */
++
++/**
++ * find_next_bit - find the next set bit in a memory region
++ * @addr: The address to base the search on
++ * @offset: The bitnumber to start searching at
++ * @size: The maximum size to search
++ */
++unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
++ unsigned long offset)
++{
++ const unsigned long *p = addr + BITOP_WORD(offset);
++ unsigned long result = offset & ~(BITS_PER_LONG-1);
++ unsigned long tmp;
++
++ if (offset >= size)
++ return size;
++ size -= result;
++ offset %= BITS_PER_LONG;
++ if (offset) {
++ tmp = *(p++);
++ tmp &= (~0UL << offset);
++ if (size < BITS_PER_LONG)
++ goto found_first;
++ if (tmp)
++ goto found_middle;
++ size -= BITS_PER_LONG;
++ result += BITS_PER_LONG;
++ }
++ while (size & ~(BITS_PER_LONG-1)) {
++ if ((tmp = *(p++)))
++ goto found_middle;
++ result += BITS_PER_LONG;
++ size -= BITS_PER_LONG;
++ }
++ if (!size)
++ return result;
++ tmp = *p;
++
++found_first:
++ tmp &= (~0UL >> (BITS_PER_LONG - size));
++ if (tmp == 0UL) /* Are any bits set? */
++ return result + size; /* Nope. */
++found_middle:
++ return result + ffs(tmp);
++}
++
++#endif /* 2.6.0 => 2.4.6 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
++void *_kc_kzalloc(size_t size, int flags)
++{
++ void *ret = kmalloc(size, flags);
++ if (ret)
++ memset(ret, 0, size);
++ return ret;
++}
++#endif /* <= 2.6.13 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
++struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
++ unsigned int length)
++{
++ /* 16 == NET_PAD_SKB */
++ struct sk_buff *skb;
++ skb = alloc_skb(length + 16, GFP_ATOMIC);
++ if (likely(skb != NULL)) {
++ skb_reserve(skb, 16);
++ skb->dev = dev;
++ }
++ return skb;
++}
++#endif /* <= 2.6.17 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
++int _kc_pci_save_state(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct adapter_struct *adapter = netdev_priv(netdev);
++ int size = PCI_CONFIG_SPACE_LEN, i;
++ u16 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
++ u16 pcie_link_status;
++
++ if (pcie_cap_offset) {
++ if (!pci_read_config_word(pdev,
++ pcie_cap_offset + PCIE_LINK_STATUS,
++ &pcie_link_status))
++ size = PCIE_CONFIG_SPACE_LEN;
++ }
++ pci_config_space_ich8lan();
++#ifdef HAVE_PCI_ERS
++ if (adapter->config_space == NULL)
++#else
++ WARN_ON(adapter->config_space != NULL);
++#endif
++ adapter->config_space = kmalloc(size, GFP_KERNEL);
++ if (!adapter->config_space) {
++ printk(KERN_ERR "Out of memory in pci_save_state\n");
++ return -ENOMEM;
++ }
++ for (i = 0; i < (size / 4); i++)
++ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
++ return 0;
++}
++
++void _kc_pci_restore_state(struct pci_dev * pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct adapter_struct *adapter = netdev_priv(netdev);
++ int size = PCI_CONFIG_SPACE_LEN, i;
++ u16 pcie_cap_offset;
++ u16 pcie_link_status;
++
++ if (adapter->config_space != NULL) {
++ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
++ if (pcie_cap_offset &&
++ !pci_read_config_word(pdev,
++ pcie_cap_offset + PCIE_LINK_STATUS,
++ &pcie_link_status))
++ size = PCIE_CONFIG_SPACE_LEN;
++
++ pci_config_space_ich8lan();
++ for (i = 0; i < (size / 4); i++)
++ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
++#ifndef HAVE_PCI_ERS
++ kfree(adapter->config_space);
++ adapter->config_space = NULL;
++#endif
++ }
++}
++
++#ifdef HAVE_PCI_ERS
++void _kc_free_netdev(struct net_device *netdev)
++{
++ struct adapter_struct *adapter = netdev_priv(netdev);
++
++ if (adapter->config_space != NULL)
++ kfree(adapter->config_space);
++#ifdef CONFIG_SYSFS
++ if (netdev->reg_state == NETREG_UNINITIALIZED) {
++ kfree((char *)netdev - netdev->padded);
++ } else {
++ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
++ netdev->reg_state = NETREG_RELEASED;
++ class_device_put(&netdev->class_dev);
++ }
++#else
++ kfree((char *)netdev - netdev->padded);
++#endif
++}
++#endif
++#endif /* <= 2.6.18 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
++#ifdef NAPI
++struct net_device *napi_to_poll_dev(struct napi_struct *napi)
++{
++ struct adapter_q_vector *q_vector = container_of(napi,
++ struct adapter_q_vector,
++ napi);
++ return &q_vector->poll_dev;
++}
++
++int __kc_adapter_clean(struct net_device *netdev, int *budget)
++{
++ int work_done;
++ int work_to_do = min(*budget, netdev->quota);
++ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
++ struct napi_struct *napi = netdev->priv;
++ work_done = napi->poll(napi, work_to_do);
++ *budget -= work_done;
++ netdev->quota -= work_done;
++ return (work_done >= work_to_do) ? 1 : 0;
++}
++#endif /* NAPI */
++#endif /* <= 2.6.24 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
++#ifdef HAVE_TX_MQ
++void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
++{
++ struct adapter_struct *adapter = netdev_priv(netdev);
++ int i;
++
++ netif_stop_queue(netdev);
++ if (netif_is_multiqueue(netdev))
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ netif_stop_subqueue(netdev, i);
++}
++void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
++{
++ struct adapter_struct *adapter = netdev_priv(netdev);
++ int i;
++
++ netif_wake_queue(netdev);
++ if (netif_is_multiqueue(netdev))
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ netif_wake_subqueue(netdev, i);
++}
++void _kc_netif_tx_start_all_queues(struct net_device *netdev)
++{
++ struct adapter_struct *adapter = netdev_priv(netdev);
++ int i;
++
++ netif_start_queue(netdev);
++ if (netif_is_multiqueue(netdev))
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ netif_start_subqueue(netdev, i);
++}
++#endif /* HAVE_TX_MQ */
++#endif /* < 2.6.27 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
++
++int
++_kc_pci_prepare_to_sleep(struct pci_dev *dev)
++{
++ pci_power_t target_state;
++ int error;
++
++ target_state = pci_choose_state(dev, PMSG_SUSPEND);
++
++ pci_enable_wake(dev, target_state, true);
++
++ error = pci_set_power_state(dev, target_state);
++
++ if (error)
++ pci_enable_wake(dev, target_state, false);
++
++ return error;
++}
++
++int
++_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
++{
++ int err;
++
++ err = pci_enable_wake(dev, PCI_D3cold, enable);
++ if (err)
++ goto out;
++
++ err = pci_enable_wake(dev, PCI_D3hot, enable);
++
++out:
++ return err;
++}
++#endif /* < 2.6.28 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
++void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
++{
++ struct pci_dev *parent = pdev->bus->self;
++ u16 link_state;
++ int pos;
++
++ if (!parent)
++ return;
++
++ pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
++ if (pos) {
++ pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
++ link_state &= ~state;
++ pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
++ }
++}
++#endif /* < 2.6.29 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
++#ifdef HAVE_NETDEV_SELECT_QUEUE
++#include <net/ip.h>
++static u32 _kc_simple_tx_hashrnd;
++static u32 _kc_simple_tx_hashrnd_initialized;
++
++u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
++{
++ u32 addr1, addr2, ports;
++ u32 hash, ihl;
++ u8 ip_proto = 0;
++
++ if (unlikely(!_kc_simple_tx_hashrnd_initialized)) {
++ get_random_bytes(&_kc_simple_tx_hashrnd, 4);
++ _kc_simple_tx_hashrnd_initialized = 1;
++ }
++
++ switch (skb->protocol) {
++ case htons(ETH_P_IP):
++ if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
++ ip_proto = ip_hdr(skb)->protocol;
++ addr1 = ip_hdr(skb)->saddr;
++ addr2 = ip_hdr(skb)->daddr;
++ ihl = ip_hdr(skb)->ihl;
++ break;
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++ case htons(ETH_P_IPV6):
++ ip_proto = ipv6_hdr(skb)->nexthdr;
++ addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
++ addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
++ ihl = (40 >> 2);
++ break;
++#endif
++ default:
++ return 0;
++ }
++
++
++ switch (ip_proto) {
++ case IPPROTO_TCP:
++ case IPPROTO_UDP:
++ case IPPROTO_DCCP:
++ case IPPROTO_ESP:
++ case IPPROTO_AH:
++ case IPPROTO_SCTP:
++ case IPPROTO_UDPLITE:
++ ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
++ break;
++
++ default:
++ ports = 0;
++ break;
++ }
++
++ hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd);
++
++ return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
++}
++#endif /* HAVE_NETDEV_SELECT_QUEUE */
++#endif /* < 2.6.30 */
+Index: linux-2.6.22/drivers/net/igb/kcompat.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/kcompat.h 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,1793 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _KCOMPAT_H_
++#define _KCOMPAT_H_
++
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/ioport.h>
++#include <linux/slab.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/udp.h>
++#include <linux/mii.h>
++#include <asm/io.h>
++
++/* NAPI enable/disable flags here */
++#define NAPI
++
++#define adapter_struct igb_adapter
++#define adapter_q_vector igb_q_vector
++#define NAPI
++
++/* and finally set defines so that the code sees the changes */
++#ifdef NAPI
++#else
++#endif /* NAPI */
++
++/* packet split disable/enable */
++#ifdef DISABLE_PACKET_SPLIT
++#undef CONFIG_E1000_DISABLE_PACKET_SPLIT
++#define CONFIG_E1000_DISABLE_PACKET_SPLIT
++#undef CONFIG_IGB_DISABLE_PACKET_SPLIT
++#define CONFIG_IGB_DISABLE_PACKET_SPLIT
++#endif
++
++/* MSI compatibility code for all kernels and drivers */
++#ifdef DISABLE_PCI_MSI
++#undef CONFIG_PCI_MSI
++#endif
++#ifndef CONFIG_PCI_MSI
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
++struct msix_entry {
++ u16 vector; /* kernel uses to write allocated vector */
++ u16 entry; /* driver uses to specify entry, OS writes */
++};
++#endif
++#define pci_enable_msi(a) -ENOTSUPP
++#define pci_disable_msi(a) do {} while (0)
++#define pci_enable_msix(a, b, c) -ENOTSUPP
++#define pci_disable_msix(a) do {} while (0)
++#define msi_remove_pci_irq_vectors(a) do {} while (0)
++#endif /* CONFIG_PCI_MSI */
++#ifdef DISABLE_PM
++#undef CONFIG_PM
++#endif
++
++#ifdef DISABLE_NET_POLL_CONTROLLER
++#undef CONFIG_NET_POLL_CONTROLLER
++#endif
++
++#ifndef PMSG_SUSPEND
++#define PMSG_SUSPEND 3
++#endif
++
++/* generic boolean compatibility */
++#undef TRUE
++#undef FALSE
++#define TRUE true
++#define FALSE false
++#ifdef GCC_VERSION
++#if ( GCC_VERSION < 3000 )
++#define _Bool char
++#endif
++#else
++#define _Bool char
++#endif
++#ifndef bool
++#define bool _Bool
++#define true 1
++#define false 0
++#endif
++
++
++#ifndef module_param
++#define module_param(v,t,p) MODULE_PARM(v, "i");
++#endif
++
++#ifndef DMA_64BIT_MASK
++#define DMA_64BIT_MASK 0xffffffffffffffffULL
++#endif
++
++#ifndef DMA_32BIT_MASK
++#define DMA_32BIT_MASK 0x00000000ffffffffULL
++#endif
++
++#ifndef PCI_CAP_ID_EXP
++#define PCI_CAP_ID_EXP 0x10
++#endif
++
++#ifndef PCIE_LINK_STATE_L0S
++#define PCIE_LINK_STATE_L0S 1
++#endif
++
++#ifndef mmiowb
++#ifdef CONFIG_IA64
++#define mmiowb() asm volatile ("mf.a" ::: "memory")
++#else
++#define mmiowb()
++#endif
++#endif
++
++#ifndef SET_NETDEV_DEV
++#define SET_NETDEV_DEV(net, pdev)
++#endif
++
++#ifndef HAVE_FREE_NETDEV
++#define free_netdev(x) kfree(x)
++#endif
++
++#ifdef HAVE_POLL_CONTROLLER
++#define CONFIG_NET_POLL_CONTROLLER
++#endif
++
++#ifndef NETDEV_TX_OK
++#define NETDEV_TX_OK 0
++#endif
++
++#ifndef NETDEV_TX_BUSY
++#define NETDEV_TX_BUSY 1
++#endif
++
++#ifndef NETDEV_TX_LOCKED
++#define NETDEV_TX_LOCKED -1
++#endif
++
++#ifdef CONFIG_PCI_IOV
++#define VMDQ_P(p) ((p) + adapter->num_vfs)
++#else
++#define VMDQ_P(p) (p)
++#endif
++
++#ifndef SKB_DATAREF_SHIFT
++/* if we do not have the infrastructure to detect if skb_header is cloned
++ just return false in all cases */
++#define skb_header_cloned(x) 0
++#endif
++
++#ifndef NETIF_F_GSO
++#define gso_size tso_size
++#define gso_segs tso_segs
++#endif
++
++#ifndef NETIF_F_GRO
++#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
++ vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
++#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
++#endif
++
++#ifndef NETIF_F_SCTP_CSUM
++#define NETIF_F_SCTP_CSUM 0
++#endif
++
++#ifndef IPPROTO_SCTP
++#define IPPROTO_SCTP 132
++#endif
++
++#ifndef CHECKSUM_PARTIAL
++#define CHECKSUM_PARTIAL CHECKSUM_HW
++#define CHECKSUM_COMPLETE CHECKSUM_HW
++#endif
++
++#ifndef __read_mostly
++#define __read_mostly
++#endif
++
++#ifndef HAVE_NETIF_MSG
++#define HAVE_NETIF_MSG 1
++enum {
++ NETIF_MSG_DRV = 0x0001,
++ NETIF_MSG_PROBE = 0x0002,
++ NETIF_MSG_LINK = 0x0004,
++ NETIF_MSG_TIMER = 0x0008,
++ NETIF_MSG_IFDOWN = 0x0010,
++ NETIF_MSG_IFUP = 0x0020,
++ NETIF_MSG_RX_ERR = 0x0040,
++ NETIF_MSG_TX_ERR = 0x0080,
++ NETIF_MSG_TX_QUEUED = 0x0100,
++ NETIF_MSG_INTR = 0x0200,
++ NETIF_MSG_TX_DONE = 0x0400,
++ NETIF_MSG_RX_STATUS = 0x0800,
++ NETIF_MSG_PKTDATA = 0x1000,
++ NETIF_MSG_HW = 0x2000,
++ NETIF_MSG_WOL = 0x4000,
++};
++
++#else
++#define NETIF_MSG_HW 0x2000
++#define NETIF_MSG_WOL 0x4000
++#endif /* HAVE_NETIF_MSG */
++
++#ifndef MII_RESV1
++#define MII_RESV1 0x17 /* Reserved... */
++#endif
++
++#ifndef unlikely
++#define unlikely(_x) _x
++#define likely(_x) _x
++#endif
++
++#ifndef WARN_ON
++#define WARN_ON(x)
++#endif
++
++#ifndef PCI_DEVICE
++#define PCI_DEVICE(vend,dev) \
++ .vendor = (vend), .device = (dev), \
++ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
++#endif
++
++#ifndef num_online_cpus
++#define num_online_cpus() smp_num_cpus
++#endif
++
++
++#ifndef _LINUX_RANDOM_H
++#include <linux/random.h>
++#endif
++
++#ifndef DECLARE_BITMAP
++#ifndef BITS_TO_LONGS
++#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
++#endif
++#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
++#endif
++
++#ifndef VLAN_HLEN
++#define VLAN_HLEN 4
++#endif
++
++#ifndef VLAN_ETH_HLEN
++#define VLAN_ETH_HLEN 18
++#endif
++
++#ifndef VLAN_ETH_FRAME_LEN
++#define VLAN_ETH_FRAME_LEN 1518
++#endif
++
++#ifndef DCA_GET_TAG_TWO_ARGS
++#define dca3_get_tag(a,b) dca_get_tag(b)
++#endif
++
++/*****************************************************************************/
++/* Installations with ethtool version without eeprom, adapter id, or statistics
++ * support */
++
++#ifndef ETH_GSTRING_LEN
++#define ETH_GSTRING_LEN 32
++#endif
++
++#ifndef ETHTOOL_GSTATS
++#define ETHTOOL_GSTATS 0x1d
++#undef ethtool_drvinfo
++#define ethtool_drvinfo k_ethtool_drvinfo
++struct k_ethtool_drvinfo {
++ u32 cmd;
++ char driver[32];
++ char version[32];
++ char fw_version[32];
++ char bus_info[32];
++ char reserved1[32];
++ char reserved2[16];
++ u32 n_stats;
++ u32 testinfo_len;
++ u32 eedump_len;
++ u32 regdump_len;
++};
++
++struct ethtool_stats {
++ u32 cmd;
++ u32 n_stats;
++ u64 data[0];
++};
++#endif /* ETHTOOL_GSTATS */
++
++#ifndef ETHTOOL_PHYS_ID
++#define ETHTOOL_PHYS_ID 0x1c
++#endif /* ETHTOOL_PHYS_ID */
++
++#ifndef ETHTOOL_GSTRINGS
++#define ETHTOOL_GSTRINGS 0x1b
++enum ethtool_stringset {
++ ETH_SS_TEST = 0,
++ ETH_SS_STATS,
++};
++struct ethtool_gstrings {
++ u32 cmd; /* ETHTOOL_GSTRINGS */
++ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
++ u32 len; /* number of strings in the string set */
++ u8 data[0];
++};
++#endif /* ETHTOOL_GSTRINGS */
++
++#ifndef ETHTOOL_TEST
++#define ETHTOOL_TEST 0x1a
++enum ethtool_test_flags {
++ ETH_TEST_FL_OFFLINE = (1 << 0),
++ ETH_TEST_FL_FAILED = (1 << 1),
++};
++struct ethtool_test {
++ u32 cmd;
++ u32 flags;
++ u32 reserved;
++ u32 len;
++ u64 data[0];
++};
++#endif /* ETHTOOL_TEST */
++
++#ifndef ETHTOOL_GEEPROM
++#define ETHTOOL_GEEPROM 0xb
++#undef ETHTOOL_GREGS
++struct ethtool_eeprom {
++ u32 cmd;
++ u32 magic;
++ u32 offset;
++ u32 len;
++ u8 data[0];
++};
++
++struct ethtool_value {
++ u32 cmd;
++ u32 data;
++};
++#endif /* ETHTOOL_GEEPROM */
++
++#ifndef ETHTOOL_GLINK
++#define ETHTOOL_GLINK 0xa
++#endif /* ETHTOOL_GLINK */
++
++#ifndef ETHTOOL_GREGS
++#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
++#define ethtool_regs _kc_ethtool_regs
++/* for passing big chunks of data */
++struct _kc_ethtool_regs {
++ u32 cmd;
++ u32 version; /* driver-specific, indicates different chips/revs */
++ u32 len; /* bytes */
++ u8 data[0];
++};
++#endif /* ETHTOOL_GREGS */
++
++#ifndef ETHTOOL_GMSGLVL
++#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
++#endif
++#ifndef ETHTOOL_SMSGLVL
++#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
++#endif
++#ifndef ETHTOOL_NWAY_RST
++#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
++#endif
++#ifndef ETHTOOL_GLINK
++#define ETHTOOL_GLINK 0x0000000a /* Get link status */
++#endif
++#ifndef ETHTOOL_GEEPROM
++#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
++#endif
++#ifndef ETHTOOL_SEEPROM
++#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
++#endif
++#ifndef ETHTOOL_GCOALESCE
++#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
++/* for configuring coalescing parameters of chip */
++#define ethtool_coalesce _kc_ethtool_coalesce
++struct _kc_ethtool_coalesce {
++ u32 cmd; /* ETHTOOL_{G,S}COALESCE */
++
++ /* How many usecs to delay an RX interrupt after
++ * a packet arrives. If 0, only rx_max_coalesced_frames
++ * is used.
++ */
++ u32 rx_coalesce_usecs;
++
++ /* How many packets to delay an RX interrupt after
++ * a packet arrives. If 0, only rx_coalesce_usecs is
++ * used. It is illegal to set both usecs and max frames
++ * to zero as this would cause RX interrupts to never be
++ * generated.
++ */
++ u32 rx_max_coalesced_frames;
++
++ /* Same as above two parameters, except that these values
++ * apply while an IRQ is being serviced by the host. Not
++ * all cards support this feature and the values are ignored
++ * in that case.
++ */
++ u32 rx_coalesce_usecs_irq;
++ u32 rx_max_coalesced_frames_irq;
++
++ /* How many usecs to delay a TX interrupt after
++ * a packet is sent. If 0, only tx_max_coalesced_frames
++ * is used.
++ */
++ u32 tx_coalesce_usecs;
++
++ /* How many packets to delay a TX interrupt after
++ * a packet is sent. If 0, only tx_coalesce_usecs is
++ * used. It is illegal to set both usecs and max frames
++ * to zero as this would cause TX interrupts to never be
++ * generated.
++ */
++ u32 tx_max_coalesced_frames;
++
++ /* Same as above two parameters, except that these values
++ * apply while an IRQ is being serviced by the host. Not
++ * all cards support this feature and the values are ignored
++ * in that case.
++ */
++ u32 tx_coalesce_usecs_irq;
++ u32 tx_max_coalesced_frames_irq;
++
++ /* How many usecs to delay in-memory statistics
++ * block updates. Some drivers do not have an in-memory
++ * statistic block, and in such cases this value is ignored.
++ * This value must not be zero.
++ */
++ u32 stats_block_coalesce_usecs;
++
++ /* Adaptive RX/TX coalescing is an algorithm implemented by
++ * some drivers to improve latency under low packet rates and
++ * improve throughput under high packet rates. Some drivers
++ * only implement one of RX or TX adaptive coalescing. Anything
++ * not implemented by the driver causes these values to be
++ * silently ignored.
++ */
++ u32 use_adaptive_rx_coalesce;
++ u32 use_adaptive_tx_coalesce;
++
++ /* When the packet rate (measured in packets per second)
++ * is below pkt_rate_low, the {rx,tx}_*_low parameters are
++ * used.
++ */
++ u32 pkt_rate_low;
++ u32 rx_coalesce_usecs_low;
++ u32 rx_max_coalesced_frames_low;
++ u32 tx_coalesce_usecs_low;
++ u32 tx_max_coalesced_frames_low;
++
++ /* When the packet rate is below pkt_rate_high but above
++ * pkt_rate_low (both measured in packets per second) the
++ * normal {rx,tx}_* coalescing parameters are used.
++ */
++
++ /* When the packet rate is (measured in packets per second)
++ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
++ * used.
++ */
++ u32 pkt_rate_high;
++ u32 rx_coalesce_usecs_high;
++ u32 rx_max_coalesced_frames_high;
++ u32 tx_coalesce_usecs_high;
++ u32 tx_max_coalesced_frames_high;
++
++ /* How often to do adaptive coalescing packet rate sampling,
++ * measured in seconds. Must not be zero.
++ */
++ u32 rate_sample_interval;
++};
++#endif /* ETHTOOL_GCOALESCE */
++
++#ifndef ETHTOOL_SCOALESCE
++#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
++#endif
++#ifndef ETHTOOL_GRINGPARAM
++#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
++/* for configuring RX/TX ring parameters */
++#define ethtool_ringparam _kc_ethtool_ringparam
++struct _kc_ethtool_ringparam {
++ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
++
++ /* Read only attributes. These indicate the maximum number
++ * of pending RX/TX ring entries the driver will allow the
++ * user to set.
++ */
++ u32 rx_max_pending;
++ u32 rx_mini_max_pending;
++ u32 rx_jumbo_max_pending;
++ u32 tx_max_pending;
++
++ /* Values changeable by the user. The valid values are
++ * in the range 1 to the "*_max_pending" counterpart above.
++ */
++ u32 rx_pending;
++ u32 rx_mini_pending;
++ u32 rx_jumbo_pending;
++ u32 tx_pending;
++};
++#endif /* ETHTOOL_GRINGPARAM */
++
++#ifndef ETHTOOL_SRINGPARAM
++#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
++#endif
++#ifndef ETHTOOL_GPAUSEPARAM
++#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
++/* for configuring link flow control parameters */
++#define ethtool_pauseparam _kc_ethtool_pauseparam
++struct _kc_ethtool_pauseparam {
++ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
++
++ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
++ * being true) the user may set 'autoneg' here non-zero to have the
++ * pause parameters be auto-negotiated too. In such a case, the
++ * {rx,tx}_pause values below determine what capabilities are
++ * advertised.
++ *
++ * If 'autoneg' is zero or the link is not being auto-negotiated,
++ * then {rx,tx}_pause force the driver to use/not-use pause
++ * flow control.
++ */
++ u32 autoneg;
++ u32 rx_pause;
++ u32 tx_pause;
++};
++#endif /* ETHTOOL_GPAUSEPARAM */
++
++#ifndef ETHTOOL_SPAUSEPARAM
++#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
++#endif
++#ifndef ETHTOOL_GRXCSUM
++#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
++#endif
++#ifndef ETHTOOL_SRXCSUM
++#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
++#endif
++#ifndef ETHTOOL_GTXCSUM
++#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
++#endif
++#ifndef ETHTOOL_STXCSUM
++#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
++#endif
++#ifndef ETHTOOL_GSG
++#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
++ * (ethtool_value) */
++#endif
++#ifndef ETHTOOL_SSG
++#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
++ * (ethtool_value). */
++#endif
++#ifndef ETHTOOL_TEST
++#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
++#endif
++#ifndef ETHTOOL_GSTRINGS
++#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
++#endif
++#ifndef ETHTOOL_PHYS_ID
++#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
++#endif
++#ifndef ETHTOOL_GSTATS
++#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
++#endif
++#ifndef ETHTOOL_GTSO
++#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
++#endif
++#ifndef ETHTOOL_STSO
++#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
++#endif
++
++#ifndef ETHTOOL_BUSINFO_LEN
++#define ETHTOOL_BUSINFO_LEN 32
++#endif
++
++/*****************************************************************************/
++/* 2.4.3 => 2.4.0 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
++
++/**************************************/
++/* PCI DRIVER API */
++
++#ifndef pci_set_dma_mask
++#define pci_set_dma_mask _kc_pci_set_dma_mask
++extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
++#endif
++
++#ifndef pci_request_regions
++#define pci_request_regions _kc_pci_request_regions
++extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
++#endif
++
++#ifndef pci_release_regions
++#define pci_release_regions _kc_pci_release_regions
++extern void _kc_pci_release_regions(struct pci_dev *pdev);
++#endif
++
++/**************************************/
++/* NETWORK DRIVER API */
++
++#ifndef alloc_etherdev
++#define alloc_etherdev _kc_alloc_etherdev
++extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
++#endif
++
++#ifndef is_valid_ether_addr
++#define is_valid_ether_addr _kc_is_valid_ether_addr
++extern int _kc_is_valid_ether_addr(u8 *addr);
++#endif
++
++/**************************************/
++/* MISCELLANEOUS */
++
++#ifndef INIT_TQUEUE
++#define INIT_TQUEUE(_tq, _routine, _data) \
++ do { \
++ INIT_LIST_HEAD(&(_tq)->list); \
++ (_tq)->sync = 0; \
++ (_tq)->routine = _routine; \
++ (_tq)->data = _data; \
++ } while (0)
++#endif
++
++#endif /* 2.4.3 => 2.4.0 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
++/* Generic MII registers. */
++#define MII_BMCR 0x00 /* Basic mode control register */
++#define MII_BMSR 0x01 /* Basic mode status register */
++#define MII_PHYSID1 0x02 /* PHYS ID 1 */
++#define MII_PHYSID2 0x03 /* PHYS ID 2 */
++#define MII_ADVERTISE 0x04 /* Advertisement control reg */
++#define MII_LPA 0x05 /* Link partner ability reg */
++#define MII_EXPANSION 0x06 /* Expansion register */
++/* Basic mode control register. */
++#define BMCR_FULLDPLX 0x0100 /* Full duplex */
++#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
++/* Basic mode status register. */
++#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
++#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
++#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
++#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
++#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
++#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
++/* Advertisement control register. */
++#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
++#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
++#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
++#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
++#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
++#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
++ ADVERTISE_100HALF | ADVERTISE_100FULL)
++/* Expansion register for auto-negotiation. */
++#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
++#endif
++
++/*****************************************************************************/
++/* 2.4.6 => 2.4.3 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
++
++#ifndef pci_set_power_state
++#define pci_set_power_state _kc_pci_set_power_state
++extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
++#endif
++
++#ifndef pci_enable_wake
++#define pci_enable_wake _kc_pci_enable_wake
++extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
++#endif
++
++#ifndef pci_disable_device
++#define pci_disable_device _kc_pci_disable_device
++extern void _kc_pci_disable_device(struct pci_dev *pdev);
++#endif
++
++/* PCI PM entry point syntax changed, so don't support suspend/resume */
++#undef CONFIG_PM
++
++#endif /* 2.4.6 => 2.4.3 */
++
++#ifndef HAVE_PCI_SET_MWI
++#define pci_set_mwi(X) pci_write_config_word(X, \
++ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
++ PCI_COMMAND_INVALIDATE);
++#define pci_clear_mwi(X) pci_write_config_word(X, \
++ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
++ ~PCI_COMMAND_INVALIDATE);
++#endif
++
++/*****************************************************************************/
++/* 2.4.10 => 2.4.9 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
++
++/**************************************/
++/* MODULE API */
++
++#ifndef MODULE_LICENSE
++ #define MODULE_LICENSE(X)
++#endif
++
++/**************************************/
++/* OTHER */
++
++#undef min
++#define min(x,y) ({ \
++ const typeof(x) _x = (x); \
++ const typeof(y) _y = (y); \
++ (void) (&_x == &_y); \
++ _x < _y ? _x : _y; })
++
++#undef max
++#define max(x,y) ({ \
++ const typeof(x) _x = (x); \
++ const typeof(y) _y = (y); \
++ (void) (&_x == &_y); \
++ _x > _y ? _x : _y; })
++
++#define min_t(type,x,y) ({ \
++ type _x = (x); \
++ type _y = (y); \
++ _x < _y ? _x : _y; })
++
++#define max_t(type,x,y) ({ \
++ type _x = (x); \
++ type _y = (y); \
++ _x > _y ? _x : _y; })
++
++#ifndef list_for_each_safe
++#define list_for_each_safe(pos, n, head) \
++ for (pos = (head)->next, n = pos->next; pos != (head); \
++ pos = n, n = pos->next)
++#endif
++
++#endif /* 2.4.10 -> 2.4.6 */
++
++
++/*****************************************************************************/
++/* 2.4.13 => 2.4.10 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
++
++/**************************************/
++/* PCI DMA MAPPING */
++
++#ifndef virt_to_page
++ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
++#endif
++
++#ifndef pci_map_page
++#define pci_map_page _kc_pci_map_page
++extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
++#endif
++
++#ifndef pci_unmap_page
++#define pci_unmap_page _kc_pci_unmap_page
++extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
++#endif
++
++/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
++
++#undef DMA_32BIT_MASK
++#define DMA_32BIT_MASK 0xffffffff
++#undef DMA_64BIT_MASK
++#define DMA_64BIT_MASK 0xffffffff
++
++/**************************************/
++/* OTHER */
++
++#ifndef cpu_relax
++#define cpu_relax() rep_nop()
++#endif
++
++struct vlan_ethhdr {
++ unsigned char h_dest[ETH_ALEN];
++ unsigned char h_source[ETH_ALEN];
++ unsigned short h_vlan_proto;
++ unsigned short h_vlan_TCI;
++ unsigned short h_vlan_encapsulated_proto;
++};
++#endif /* 2.4.13 => 2.4.10 */
++
++/*****************************************************************************/
++/* 2.4.17 => 2.4.12 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
++
++#ifndef __devexit_p
++ #define __devexit_p(x) &(x)
++#endif
++
++#endif /* 2.4.17 => 2.4.13 */
++
++/*****************************************************************************/
++/* 2.4.20 => 2.4.19 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
++
++/* we won't support NAPI on less than 2.4.20 */
++#ifdef NAPI
++#undef NAPI
++#endif
++
++#endif /* 2.4.20 => 2.4.19 */
++
++/*****************************************************************************/
++/* < 2.4.21 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) )
++#define skb_pad(x,y) _kc_skb_pad(x, y)
++struct sk_buff * _kc_skb_pad(struct sk_buff *skb, int pad);
++#endif /* < 2.4.21 */
++
++/*****************************************************************************/
++/* 2.4.22 => 2.4.17 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
++#define pci_name(x) ((x)->slot_name)
++#endif
++
++/*****************************************************************************/
++/*****************************************************************************/
++/* 2.4.23 => 2.4.22 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
++/*****************************************************************************/
++#ifdef NAPI
++#ifndef netif_poll_disable
++#define netif_poll_disable(x) _kc_netif_poll_disable(x)
++static inline void _kc_netif_poll_disable(struct net_device *netdev)
++{
++ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
++ /* No hurry */
++ current->state = TASK_INTERRUPTIBLE;
++ schedule_timeout(1);
++ }
++}
++#endif
++#ifndef netif_poll_enable
++#define netif_poll_enable(x) _kc_netif_poll_enable(x)
++static inline void _kc_netif_poll_enable(struct net_device *netdev)
++{
++ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
++}
++#endif
++#endif /* NAPI */
++#ifndef netif_tx_disable
++#define netif_tx_disable(x) _kc_netif_tx_disable(x)
++static inline void _kc_netif_tx_disable(struct net_device *dev)
++{
++ spin_lock_bh(&dev->xmit_lock);
++ netif_stop_queue(dev);
++ spin_unlock_bh(&dev->xmit_lock);
++}
++#endif
++#endif /* 2.4.23 => 2.4.22 */
++
++/*****************************************************************************/
++/* 2.6.4 => 2.6.0 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
++ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
++#define ETHTOOL_OPS_COMPAT
++#endif /* 2.6.4 => 2.6.0 */
++
++/*****************************************************************************/
++/* 2.5.71 => 2.4.x */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
++#define sk_protocol protocol
++#define pci_get_device pci_find_device
++#endif /* 2.5.70 => 2.4.x */
++
++/*****************************************************************************/
++/* < 2.4.27 or 2.6.0 <= 2.6.5 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
++ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
++
++#ifndef netif_msg_init
++#define netif_msg_init _kc_netif_msg_init
++static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
++{
++ /* use default */
++ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
++ return default_msg_enable_bits;
++ if (debug_value == 0) /* no output */
++ return 0;
++ /* set low N bits */
++ return (1 << debug_value) -1;
++}
++#endif
++
++#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
++/*****************************************************************************/
++#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
++ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
++ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
++#define netdev_priv(x) x->priv
++#endif
++
++/*****************************************************************************/
++/* <= 2.5.0 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
++#undef pci_register_driver
++#define pci_register_driver pci_module_init
++
++#define dev_err(__unused_dev, format, arg...) \
++ printk(KERN_ERR "%s: " format, pci_name(pdev) , ## arg)
++#define dev_warn(__unused_dev, format, arg...) \
++ printk(KERN_WARNING "%s: " format, pci_name(pdev) , ## arg)
++
++/* hlist_* code - double linked lists */
++struct hlist_head {
++ struct hlist_node *first;
++};
++
++struct hlist_node {
++ struct hlist_node *next, **pprev;
++};
++
++static inline void __hlist_del(struct hlist_node *n)
++{
++ struct hlist_node *next = n->next;
++ struct hlist_node **pprev = n->pprev;
++ *pprev = next;
++ if (next)
++ next->pprev = pprev;
++}
++
++static inline void hlist_del(struct hlist_node *n)
++{
++ __hlist_del(n);
++ n->next = NULL;
++ n->pprev = NULL;
++}
++
++static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
++{
++ struct hlist_node *first = h->first;
++ n->next = first;
++ if (first)
++ first->pprev = &n->next;
++ h->first = n;
++ n->pprev = &h->first;
++}
++
++static inline int hlist_empty(const struct hlist_head *h)
++{
++ return !h->first;
++}
++#define HLIST_HEAD_INIT { .first = NULL }
++#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
++#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
++static inline void INIT_HLIST_NODE(struct hlist_node *h)
++{
++ h->next = NULL;
++ h->pprev = NULL;
++}
++#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
++
++#define hlist_for_each_entry(tpos, pos, head, member) \
++ for (pos = (head)->first; \
++ pos && ({ prefetch(pos->next); 1;}) && \
++ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
++ pos = pos->next)
++
++#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
++ for (pos = (head)->first; \
++ pos && ({ n = pos->next; 1; }) && \
++ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
++ pos = n)
++
++/* we ignore GFP here */
++#define dma_alloc_coherent(dv, sz, dma, gfp) \
++ pci_alloc_consistent(pdev, (sz), (dma))
++#define dma_free_coherent(dv, sz, addr, dma_addr) \
++ pci_free_consistent(pdev, (sz), (addr), (dma_addr))
++
++#ifndef might_sleep
++#define might_sleep()
++#endif
++
++#endif /* <= 2.5.0 */
++
++/*****************************************************************************/
++/* 2.5.28 => 2.4.23 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
++
++static inline void _kc_synchronize_irq(void)
++{
++ synchronize_irq();
++}
++#undef synchronize_irq
++#define synchronize_irq(X) _kc_synchronize_irq()
++
++#include <linux/tqueue.h>
++#define work_struct tq_struct
++#undef INIT_WORK
++#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
++#undef container_of
++#define container_of list_entry
++#define schedule_work schedule_task
++#define flush_scheduled_work flush_scheduled_tasks
++#define cancel_work_sync(x) flush_scheduled_work()
++
++#endif /* 2.5.28 => 2.4.17 */
++
++/*****************************************************************************/
++/* 2.6.0 => 2.5.28 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
++#define MODULE_INFO(version, _version)
++#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
++#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
++#endif
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
++#endif
++
++#define pci_set_consistent_dma_mask(dev,mask) 1
++
++#undef dev_put
++#define dev_put(dev) __dev_put(dev)
++
++#ifndef skb_fill_page_desc
++#define skb_fill_page_desc _kc_skb_fill_page_desc
++extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
++#endif
++
++#undef ALIGN
++#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
++
++#ifndef page_count
++#define page_count(p) atomic_read(&(p)->count)
++#endif
++
++/* find_first_bit and find_next bit are not defined for most
++ * 2.4 kernels (except for the redhat 2.4.21 kernels
++ */
++#include <linux/bitops.h>
++#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
++#undef find_next_bit
++#define find_next_bit _kc_find_next_bit
++extern unsigned long _kc_find_next_bit(const unsigned long *addr,
++ unsigned long size,
++ unsigned long offset);
++#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
++
++#endif /* 2.6.0 => 2.5.28 */
++
++/*****************************************************************************/
++/* 2.6.4 => 2.6.0 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
++#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
++#endif /* 2.6.4 => 2.6.0 */
++
++/*****************************************************************************/
++/* 2.6.5 => 2.6.0 */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
++#define pci_dma_sync_single_for_cpu pci_dma_sync_single
++#define pci_dma_sync_single_for_device pci_dma_sync_single_for_cpu
++#endif /* 2.6.5 => 2.6.0 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
++/* taken from 2.6 include/linux/bitmap.h */
++#undef bitmap_zero
++#define bitmap_zero _kc_bitmap_zero
++static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
++{
++ if (nbits <= BITS_PER_LONG)
++ *dst = 0UL;
++ else {
++ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
++ memset(dst, 0, len);
++ }
++}
++#define random_ether_addr _kc_random_ether_addr
++static inline void _kc_random_ether_addr(u8 *addr)
++{
++ get_random_bytes(addr, ETH_ALEN);
++ addr[0] &= 0xfe; /* clear multicast */
++ addr[0] |= 0x02; /* set local assignment */
++}
++#endif /* < 2.6.6 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
++#undef if_mii
++#define if_mii _kc_if_mii
++static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
++{
++ return (struct mii_ioctl_data *) &rq->ifr_ifru;
++}
++#endif /* < 2.6.7 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
++#ifndef PCI_EXP_DEVCTL
++#define PCI_EXP_DEVCTL 8
++#endif
++#ifndef PCI_EXP_DEVCTL_CERE
++#define PCI_EXP_DEVCTL_CERE 0x0001
++#endif
++#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
++ schedule_timeout((x * HZ)/1000 + 2); \
++ } while (0)
++
++#endif /* < 2.6.8 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
++#include <net/dsfield.h>
++#define __iomem
++
++#ifndef kcalloc
++#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
++extern void *_kc_kzalloc(size_t size, int flags);
++#endif
++#define MSEC_PER_SEC 1000L
++static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
++{
++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
++ return (MSEC_PER_SEC / HZ) * j;
++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
++ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
++#else
++ return (j * MSEC_PER_SEC) / HZ;
++#endif
++}
++static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
++{
++ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
++ return MAX_JIFFY_OFFSET;
++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
++ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
++ return m * (HZ / MSEC_PER_SEC);
++#else
++ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
++#endif
++}
++
++#define msleep_interruptible _kc_msleep_interruptible
++static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
++{
++ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
++
++ while (timeout && !signal_pending(current)) {
++ __set_current_state(TASK_INTERRUPTIBLE);
++ timeout = schedule_timeout(timeout);
++ }
++ return _kc_jiffies_to_msecs(timeout);
++}
++
++/* Basic mode control register. */
++#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
++
++#ifndef __le16
++#define __le16 u16
++#endif
++#ifndef __le32
++#define __le32 u32
++#endif
++#ifndef __le64
++#define __le64 u64
++#endif
++#ifndef __be16
++#define __be16 u16
++#endif
++
++#ifdef pci_dma_mapping_error
++#undef pci_dma_mapping_error
++#endif
++#define pci_dma_mapping_error _kc_pci_dma_mapping_error
++static inline int _kc_pci_dma_mapping_error(struct pci_dev *pdev,
++ dma_addr_t dma_addr)
++{
++ return dma_addr == 0;
++}
++#endif /* < 2.6.9 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
++#ifdef module_param_array_named
++#undef module_param_array_named
++#define module_param_array_named(name, array, type, nump, perm) \
++ static struct kparam_array __param_arr_##name \
++ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
++ sizeof(array[0]), array }; \
++ module_param_call(name, param_array_set, param_array_get, \
++ &__param_arr_##name, perm)
++#endif /* module_param_array_named */
++#endif /* < 2.6.10 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
++#define PCI_D0 0
++#define PCI_D1 1
++#define PCI_D2 2
++#define PCI_D3hot 3
++#define PCI_D3cold 4
++typedef int pci_power_t;
++#define pci_choose_state(pdev,state) state
++#define PMSG_SUSPEND 3
++#define PCI_EXP_LNKCTL 16
++
++#undef NETIF_F_LLTX
++
++#ifndef ARCH_HAS_PREFETCH
++#define prefetch(X)
++#endif
++
++#ifndef NET_IP_ALIGN
++#define NET_IP_ALIGN 2
++#endif
++
++#define KC_USEC_PER_SEC 1000000L
++#define usecs_to_jiffies _kc_usecs_to_jiffies
++static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
++{
++#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
++ return (KC_USEC_PER_SEC / HZ) * j;
++#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
++ return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
++#else
++ return (j * KC_USEC_PER_SEC) / HZ;
++#endif
++}
++static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
++{
++ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
++ return MAX_JIFFY_OFFSET;
++#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
++ return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
++#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
++ return m * (HZ / KC_USEC_PER_SEC);
++#else
++ return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
++#endif
++}
++#endif /* < 2.6.11 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
++#include <linux/reboot.h>
++#define USE_REBOOT_NOTIFIER
++
++/* Generic MII registers. */
++#define MII_CTRL1000 0x09 /* 1000BASE-T control */
++#define MII_STAT1000 0x0a /* 1000BASE-T status */
++/* Advertisement control register. */
++#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
++#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
++/* 1000BASE-T Control register */
++#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
++#endif /* < 2.6.12 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
++#define pm_message_t u32
++#ifndef kzalloc
++#define kzalloc _kc_kzalloc
++extern void *_kc_kzalloc(size_t size, int flags);
++#endif
++
++/* Generic MII registers. */
++#define MII_ESTATUS 0x0f /* Extended Status */
++/* Basic mode status register. */
++#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
++/* Extended status register. */
++#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
++#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
++#endif /* < 2.6.14 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
++#ifndef device_can_wakeup
++#define device_can_wakeup(dev) (1)
++#endif
++#ifndef device_set_wakeup_enable
++#define device_set_wakeup_enable(dev, val) do{}while(0)
++#endif
++#ifndef device_init_wakeup
++#define device_init_wakeup(dev,val) do {} while (0)
++#endif
++#endif /* < 2.6.15 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
++#undef DEFINE_MUTEX
++#define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
++#define mutex_lock(x) down_interruptible(x)
++#define mutex_unlock(x) up(x)
++
++#undef HAVE_PCI_ERS
++#else /* 2.6.16 and above */
++#undef HAVE_PCI_ERS
++#define HAVE_PCI_ERS
++#endif /* < 2.6.16 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
++
++#ifndef IRQ_HANDLED
++#define irqreturn_t void
++#define IRQ_HANDLED
++#define IRQ_NONE
++#endif
++
++#ifndef IRQF_PROBE_SHARED
++#ifdef SA_PROBEIRQ
++#define IRQF_PROBE_SHARED SA_PROBEIRQ
++#else
++#define IRQF_PROBE_SHARED 0
++#endif
++#endif
++
++#ifndef IRQF_SHARED
++#define IRQF_SHARED SA_SHIRQ
++#endif
++
++#ifndef ARRAY_SIZE
++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
++#endif
++
++#ifndef netdev_alloc_skb
++#define netdev_alloc_skb _kc_netdev_alloc_skb
++extern struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
++ unsigned int length);
++#endif
++
++#ifndef skb_is_gso
++#ifdef NETIF_F_TSO
++#define skb_is_gso _kc_skb_is_gso
++static inline int _kc_skb_is_gso(const struct sk_buff *skb)
++{
++ return skb_shinfo(skb)->gso_size;
++}
++#else
++#define skb_is_gso(a) 0
++#endif
++#endif
++
++#endif /* < 2.6.18 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
++
++#ifndef DIV_ROUND_UP
++#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
++#endif
++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
++#ifndef RHEL_RELEASE_CODE
++#define RHEL_RELEASE_CODE 0
++#endif
++#ifndef RHEL_RELEASE_VERSION
++#define RHEL_RELEASE_VERSION(a,b) 0
++#endif
++#ifndef AX_RELEASE_CODE
++#define AX_RELEASE_CODE 0
++#endif
++#ifndef AX_RELEASE_VERSION
++#define AX_RELEASE_VERSION(a,b) 0
++#endif
++#if (!(( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) ) && ( RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0) ) || ( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0) ) || (AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0))))
++typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
++#endif
++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
++#undef CONFIG_INET_LRO
++#undef CONFIG_INET_LRO_MODULE
++#ifdef IXGBE_FCOE
++#undef CONFIG_FCOE
++#undef CONFIG_FCOE_MODULE
++#endif /* IXGBE_FCOE */
++#endif
++typedef irqreturn_t (*new_handler_t)(int, void*);
++static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
++#else /* 2.4.x */
++typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
++typedef void (*new_handler_t)(int, void*);
++static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
++#endif /* >= 2.5.x */
++{
++ irq_handler_t new_handler = (irq_handler_t) handler;
++ return request_irq(irq, new_handler, flags, devname, dev_id);
++}
++
++#undef request_irq
++#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
++
++#define irq_handler_t new_handler_t
++/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
++#define PCIE_CONFIG_SPACE_LEN 256
++#define PCI_CONFIG_SPACE_LEN 64
++#define PCIE_LINK_STATUS 0x12
++#define pci_config_space_ich8lan() do {} while(0)
++#undef pci_save_state
++extern int _kc_pci_save_state(struct pci_dev *);
++#define pci_save_state(pdev) _kc_pci_save_state(pdev)
++#undef pci_restore_state
++extern void _kc_pci_restore_state(struct pci_dev *);
++#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
++#ifdef HAVE_PCI_ERS
++#undef free_netdev
++extern void _kc_free_netdev(struct net_device *);
++#define free_netdev(netdev) _kc_free_netdev(netdev)
++#endif
++static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
++{
++ return 0;
++}
++#define pci_disable_pcie_error_reporting(dev) do {} while (0)
++#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
++#else /* 2.6.19 */
++#include <linux/aer.h>
++#endif /* < 2.6.19 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
++#undef INIT_WORK
++#define INIT_WORK(_work, _func) \
++do { \
++ INIT_LIST_HEAD(&(_work)->entry); \
++ (_work)->pending = 0; \
++ (_work)->func = (void (*)(void *))_func; \
++ (_work)->data = _work; \
++ init_timer(&(_work)->timer); \
++} while (0)
++#endif
++
++#ifndef PCI_VDEVICE
++#define PCI_VDEVICE(ven, dev) \
++ PCI_VENDOR_ID_##ven, (dev), \
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0
++#endif
++
++#ifndef round_jiffies
++#define round_jiffies(x) x
++#endif
++
++#define csum_offset csum
++
++#endif /* < 2.6.20 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
++#define to_net_dev(class) container_of(class, struct net_device, class_dev)
++#define NETDEV_CLASS_DEV
++#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
++#define vlan_group_set_device(vg, id, dev) if (vg) vg->vlan_devices[id] = dev;
++#define pci_channel_offline(pdev) (pdev->error_state && \
++ pdev->error_state != pci_channel_io_normal)
++#define pci_request_selected_regions(pdev, bars, name) \
++ pci_request_regions(pdev, name)
++#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
++#endif /* < 2.6.21 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
++#define tcp_hdr(skb) (skb->h.th)
++#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
++#define skb_transport_offset(skb) (skb->h.raw - skb->data)
++#define skb_transport_header(skb) (skb->h.raw)
++#define ipv6_hdr(skb) (skb->nh.ipv6h)
++#define ip_hdr(skb) (skb->nh.iph)
++#define skb_network_offset(skb) (skb->nh.raw - skb->data)
++#define skb_network_header(skb) (skb->nh.raw)
++#define skb_tail_pointer(skb) skb->tail
++#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
++ memcpy(skb->data + offset, from, len)
++#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
++#define pci_register_driver pci_module_init
++#define skb_mac_header(skb) skb->mac.raw
++
++#ifdef NETIF_F_MULTI_QUEUE
++#ifndef alloc_etherdev_mq
++#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
++#endif
++#endif /* NETIF_F_MULTI_QUEUE */
++
++#ifndef ETH_FCS_LEN
++#define ETH_FCS_LEN 4
++#endif
++#define cancel_work_sync(x) flush_scheduled_work()
++#ifndef udp_hdr
++#define udp_hdr _udp_hdr
++static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
++{
++ return (struct udphdr *)skb_transport_header(skb);
++}
++#endif
++#else /* 2.6.22 */
++#define ETH_TYPE_TRANS_SETS_DEV
++#endif /* < 2.6.22 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
++#undef ETHTOOL_GPERMADDR
++#undef SET_MODULE_OWNER
++#define SET_MODULE_OWNER(dev) do { } while (0)
++#endif /* > 2.6.22 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
++#define netif_subqueue_stopped(_a, _b) 0
++#endif /* < 2.6.23 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
++/* if GRO is supported then the napi struct must already exist */
++#ifndef NETIF_F_GRO
++/* NAPI API changes in 2.6.24 break everything */
++struct napi_struct {
++ /* used to look up the real NAPI polling routine */
++ int (*poll)(struct napi_struct *, int);
++ struct net_device *dev;
++ int weight;
++};
++#endif
++
++#ifdef NAPI
++extern int __kc_adapter_clean(struct net_device *, int *);
++extern struct net_device *napi_to_poll_dev(struct napi_struct *napi);
++#define napi_enable(napi) do { \
++ struct napi_struct *_napi = (napi); \
++ /* abuse if_port as a counter */ \
++ if (!_napi->dev->if_port) { \
++ netif_poll_enable(_napi->dev); \
++ } \
++ ++_napi->dev->if_port; \
++ netif_poll_enable(napi_to_poll_dev(_napi)); \
++ } while (0)
++#define napi_disable(napi) do { \
++ struct napi_struct *_napi = (napi); \
++ netif_poll_disable(napi_to_poll_dev(_napi)); \
++ --_napi->dev->if_port; \
++ if (!_napi->dev->if_port) \
++ netif_poll_disable(_napi->dev); \
++ } while (0)
++#define netif_napi_add(_netdev, _napi, _poll, _weight) \
++ do { \
++ struct napi_struct *__napi = (_napi); \
++ struct net_device *poll_dev = napi_to_poll_dev(__napi); \
++ poll_dev->poll = &(__kc_adapter_clean); \
++ poll_dev->priv = (_napi); \
++ poll_dev->weight = (_weight); \
++ set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
++ set_bit(__LINK_STATE_START, &poll_dev->state);\
++ dev_hold(poll_dev); \
++ _netdev->poll = &(__kc_adapter_clean); \
++ _netdev->weight = (_weight); \
++ __napi->poll = &(_poll); \
++ __napi->weight = (_weight); \
++ __napi->dev = (_netdev); \
++ set_bit(__LINK_STATE_RX_SCHED, &(_netdev)->state); \
++ } while (0)
++#define netif_napi_del(_napi) \
++ do { \
++ struct net_device *poll_dev = napi_to_poll_dev(_napi); \
++ WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
++ dev_put(poll_dev); \
++ memset(poll_dev, 0, sizeof(struct net_device));\
++ } while (0)
++#define napi_schedule_prep(_napi) \
++ (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
++#define napi_schedule(_napi) netif_rx_schedule(napi_to_poll_dev(_napi))
++#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
++#ifndef NETIF_F_GRO
++#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
++#else
++#define napi_complete(_napi) \
++ do { \
++ napi_gro_flush(_napi); \
++ netif_rx_complete(napi_to_poll_dev(_napi)); \
++ } while (0)
++#endif /* NETIF_F_GRO */
++#else /* NAPI */
++#define netif_napi_add(_netdev, _napi, _poll, _weight) \
++ do { \
++ struct napi_struct *__napi = _napi; \
++ _netdev->poll = &(_poll); \
++ _netdev->weight = (_weight); \
++ __napi->poll = &(_poll); \
++ __napi->weight = (_weight); \
++ __napi->dev = (_netdev); \
++ } while (0)
++#define netif_napi_del(_a) do {} while (0)
++#endif /* NAPI */
++
++#undef dev_get_by_name
++#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
++#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
++#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
++#else /* < 2.6.24 */
++#define HAVE_ETHTOOL_GET_SSET_COUNT
++#define HAVE_NETDEV_NAPI_LIST
++#endif /* < 2.6.24 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
++#include <linux/pm_qos_params.h>
++#endif /* > 2.6.24 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
++#define PM_QOS_CPU_DMA_LATENCY 1
++
++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
++#include <linux/latency.h>
++#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
++#define pm_qos_add_requirement(pm_qos_class, name, value) \
++ set_acceptable_latency(name, value)
++#define pm_qos_remove_requirement(pm_qos_class, name) \
++ remove_acceptable_latency(name)
++#define pm_qos_update_requirement(pm_qos_class, name, value) \
++ modify_acceptable_latency(name, value)
++#else
++#define PM_QOS_DEFAULT_VALUE -1
++#define pm_qos_add_requirement(pm_qos_class, name, value)
++#define pm_qos_remove_requirement(pm_qos_class, name)
++#define pm_qos_update_requirement(pm_qos_class, name, value) { \
++ if (value != PM_QOS_DEFAULT_VALUE) { \
++ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
++ pci_name(adapter->pdev)); \
++ } \
++}
++#endif /* > 2.6.18 */
++
++#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
++
++#endif /* < 2.6.25 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
++#else /* < 2.6.26 */
++#include <linux/pci-aspm.h>
++#define HAVE_NETDEV_VLAN_FEATURES
++#endif /* < 2.6.26 */
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
++#if (((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && defined(CONFIG_PM_SLEEP)))
++#undef device_set_wakeup_enable
++#define device_set_wakeup_enable(dev, val) \
++ do { \
++ u16 pmc = 0; \
++ int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
++ if (pm) { \
++ pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
++ &pmc); \
++ } \
++ (dev)->power.can_wakeup = !!(pmc >> 11); \
++ (dev)->power.should_wakeup = (val && (pmc >> 11)); \
++ } while (0)
++#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
++#endif /* 2.6.15 through 2.6.27 */
++#ifndef netif_napi_del
++#define netif_napi_del(_a) do {} while (0)
++#ifdef NAPI
++#ifdef CONFIG_NETPOLL
++#undef netif_napi_del
++#define netif_napi_del(_a) list_del(&(_a)->dev_list);
++#endif
++#endif
++#endif /* netif_napi_del */
++#ifndef pci_dma_mapping_error
++#define pci_dma_mapping_error(pdev, dma_addr) pci_dma_mapping_error(dma_addr)
++#endif
++
++#ifdef CONFIG_NETDEVICES_MULTIQUEUE
++#define HAVE_TX_MQ
++#endif
++
++#ifdef HAVE_TX_MQ
++extern void _kc_netif_tx_stop_all_queues(struct net_device *);
++extern void _kc_netif_tx_wake_all_queues(struct net_device *);
++extern void _kc_netif_tx_start_all_queues(struct net_device *);
++#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
++#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
++#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
++#undef netif_stop_subqueue
++#define netif_stop_subqueue(_ndev,_qi) do { \
++ if (netif_is_multiqueue((_ndev))) \
++ netif_stop_subqueue((_ndev), (_qi)); \
++ else \
++ netif_stop_queue((_ndev)); \
++ } while (0)
++#undef netif_start_subqueue
++#define netif_start_subqueue(_ndev,_qi) do { \
++ if (netif_is_multiqueue((_ndev))) \
++ netif_start_subqueue((_ndev), (_qi)); \
++ else \
++ netif_start_queue((_ndev)); \
++ } while (0)
++#else /* HAVE_TX_MQ */
++#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
++#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
++#define netif_tx_start_all_queues(a) netif_start_queue(a)
++#else
++#define netif_tx_start_all_queues(a) do {} while (0)
++#endif
++#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
++#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
++#endif /* HAVE_TX_MQ */
++#ifndef NETIF_F_MULTI_QUEUE
++#define NETIF_F_MULTI_QUEUE 0
++#define netif_is_multiqueue(a) 0
++#define netif_wake_subqueue(a, b)
++#endif /* NETIF_F_MULTI_QUEUE */
++#else /* < 2.6.27 */
++#define HAVE_TX_MQ
++#define HAVE_NETDEV_SELECT_QUEUE
++#endif /* < 2.6.27 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
++#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \
++ pci_resource_len(pdev, bar))
++#define pci_wake_from_d3 _kc_pci_wake_from_d3
++#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
++extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
++extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
++#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
++#endif /* < 2.6.28 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
++#define pci_request_selected_regions_exclusive(pdev, bars, name) \
++ pci_request_selected_regions(pdev, bars, name)
++extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
++#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
++#else /* < 2.6.29 */
++#ifdef CONFIG_DCB
++#define HAVE_PFC_MODE_ENABLE
++#endif /* CONFIG_DCB */
++#endif /* < 2.6.29 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
++#ifdef IXGBE_FCOE
++#undef CONFIG_FCOE
++#undef CONFIG_FCOE_MODULE
++#endif /* IXGBE_FCOE */
++extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb);
++#define skb_tx_hash(n, s) _kc_skb_tx_hash(n, s)
++#define skb_record_rx_queue(a, b) do {} while (0)
++#else
++#define HAVE_ASPM_QUIRKS
++#endif /* < 2.6.30 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
++#define ETH_P_1588 0x88F7
++#else
++#ifndef HAVE_NETDEV_STORAGE_ADDRESS
++#define HAVE_NETDEV_STORAGE_ADDRESS
++#endif
++#ifndef HAVE_NETDEV_HW_ADDR
++#define HAVE_NETDEV_HW_ADDR
++#endif
++#endif /* < 2.6.31 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
++#undef netdev_tx_t
++#define netdev_tx_t int
++#endif /* < 2.6.32 */
++#endif /* _KCOMPAT_H_ */
+Index: linux-2.6.22/drivers/net/igb/kcompat_ethtool.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.22/drivers/net/igb/kcompat_ethtool.c 2009-12-18 12:39:22.000000000 -0500
+@@ -0,0 +1,1168 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2009 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ You should have received a copy of the GNU General Public License along with
++ this program; if not, write to the Free Software Foundation, Inc.,
++ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++/*
++ * net/core/ethtool.c - Ethtool ioctl handler
++ * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
++ *
++ * This file is where we call all the ethtool_ops commands to get
++ * the information ethtool needs. We fall back to calling do_ioctl()
++ * for drivers which haven't been converted to ethtool_ops yet.
++ *
++ * It's GPL, stupid.
++ *
++ * Modification by sfeldma@pobox.com to work as backward compat
++ * solution for pre-ethtool_ops kernels.
++ * - copied struct ethtool_ops from ethtool.h
++ * - defined SET_ETHTOOL_OPS
++ * - put in some #ifndef NETIF_F_xxx wrappers
++ * - changes refs to dev->ethtool_ops to ethtool_ops
++ * - changed dev_ethtool to ethtool_ioctl
++ * - remove EXPORT_SYMBOL()s
++ * - added _kc_ prefix in built-in ethtool_op_xxx ops.
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/mii.h>
++#include <linux/ethtool.h>
++#include <linux/netdevice.h>
++#include <asm/uaccess.h>
++
++#include "kcompat.h"
++
++#undef SUPPORTED_10000baseT_Full
++#define SUPPORTED_10000baseT_Full (1 << 12)
++#undef ADVERTISED_10000baseT_Full
++#define ADVERTISED_10000baseT_Full (1 << 12)
++#undef SPEED_10000
++#define SPEED_10000 10000
++
++#undef ethtool_ops
++#define ethtool_ops _kc_ethtool_ops
++
++struct _kc_ethtool_ops {
++ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
++ int (*set_settings)(struct net_device *, struct ethtool_cmd *);
++ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
++ int (*get_regs_len)(struct net_device *);
++ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
++ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
++ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
++ u32 (*get_msglevel)(struct net_device *);
++ void (*set_msglevel)(struct net_device *, u32);
++ int (*nway_reset)(struct net_device *);
++ u32 (*get_link)(struct net_device *);
++ int (*get_eeprom_len)(struct net_device *);
++ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
++ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
++ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
++ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
++ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
++ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
++ void (*get_pauseparam)(struct net_device *,
++ struct ethtool_pauseparam*);
++ int (*set_pauseparam)(struct net_device *,
++ struct ethtool_pauseparam*);
++ u32 (*get_rx_csum)(struct net_device *);
++ int (*set_rx_csum)(struct net_device *, u32);
++ u32 (*get_tx_csum)(struct net_device *);
++ int (*set_tx_csum)(struct net_device *, u32);
++ u32 (*get_sg)(struct net_device *);
++ int (*set_sg)(struct net_device *, u32);
++ u32 (*get_tso)(struct net_device *);
++ int (*set_tso)(struct net_device *, u32);
++ int (*self_test_count)(struct net_device *);
++ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
++ void (*get_strings)(struct net_device *, u32 stringset, u8 *);
++ int (*phys_id)(struct net_device *, u32);
++ int (*get_stats_count)(struct net_device *);
++ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *,
++ u64 *);
++} *ethtool_ops = NULL;
++
++#undef SET_ETHTOOL_OPS
++#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops))
++
++/*
++ * Some useful ethtool_ops methods that are device independent. If we find that
++ * all drivers want to do the same thing here, we can turn these into dev_()
++ * function calls.
++ */
++
++#undef ethtool_op_get_link
++#define ethtool_op_get_link _kc_ethtool_op_get_link
++u32 _kc_ethtool_op_get_link(struct net_device *dev)
++{
++ return netif_carrier_ok(dev) ? 1 : 0;
++}
++
++#undef ethtool_op_get_tx_csum
++#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum
++u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev)
++{
++#ifdef NETIF_F_IP_CSUM
++ return (dev->features & NETIF_F_IP_CSUM) != 0;
++#else
++ return 0;
++#endif
++}
++
++#undef ethtool_op_set_tx_csum
++#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum
++int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
++{
++#ifdef NETIF_F_IP_CSUM
++ if (data)
++#ifdef NETIF_F_IPV6_CSUM
++ dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
++ else
++ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
++#else
++ dev->features |= NETIF_F_IP_CSUM;
++ else
++ dev->features &= ~NETIF_F_IP_CSUM;
++#endif
++#endif
++
++ return 0;
++}
++
++#undef ethtool_op_get_sg
++#define ethtool_op_get_sg _kc_ethtool_op_get_sg
++u32 _kc_ethtool_op_get_sg(struct net_device *dev)
++{
++#ifdef NETIF_F_SG
++ return (dev->features & NETIF_F_SG) != 0;
++#else
++ return 0;
++#endif
++}
++
++#undef ethtool_op_set_sg
++#define ethtool_op_set_sg _kc_ethtool_op_set_sg
++int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data)
++{
++#ifdef NETIF_F_SG
++ if (data)
++ dev->features |= NETIF_F_SG;
++ else
++ dev->features &= ~NETIF_F_SG;
++#endif
++
++ return 0;
++}
++
++#undef ethtool_op_get_tso
++#define ethtool_op_get_tso _kc_ethtool_op_get_tso
++u32 _kc_ethtool_op_get_tso(struct net_device *dev)
++{
++#ifdef NETIF_F_TSO
++ return (dev->features & NETIF_F_TSO) != 0;
++#else
++ return 0;
++#endif
++}
++
++#undef ethtool_op_set_tso
++#define ethtool_op_set_tso _kc_ethtool_op_set_tso
++int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data)
++{
++#ifdef NETIF_F_TSO
++ if (data)
++ dev->features |= NETIF_F_TSO;
++ else
++ dev->features &= ~NETIF_F_TSO;
++#endif
++
++ return 0;
++}
++
++/* Handlers for each ethtool command */
++
++static int ethtool_get_settings(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_cmd cmd = { ETHTOOL_GSET };
++ int err;
++
++ if (!ethtool_ops->get_settings)
++ return -EOPNOTSUPP;
++
++ err = ethtool_ops->get_settings(dev, &cmd);
++ if (err < 0)
++ return err;
++
++ if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_set_settings(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_cmd cmd;
++
++ if (!ethtool_ops->set_settings)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
++ return -EFAULT;
++
++ return ethtool_ops->set_settings(dev, &cmd);
++}
++
++static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_drvinfo info;
++ struct ethtool_ops *ops = ethtool_ops;
++
++ if (!ops->get_drvinfo)
++ return -EOPNOTSUPP;
++
++ memset(&info, 0, sizeof(info));
++ info.cmd = ETHTOOL_GDRVINFO;
++ ops->get_drvinfo(dev, &info);
++
++ if (ops->self_test_count)
++ info.testinfo_len = ops->self_test_count(dev);
++ if (ops->get_stats_count)
++ info.n_stats = ops->get_stats_count(dev);
++ if (ops->get_regs_len)
++ info.regdump_len = ops->get_regs_len(dev);
++ if (ops->get_eeprom_len)
++ info.eedump_len = ops->get_eeprom_len(dev);
++
++ if (copy_to_user(useraddr, &info, sizeof(info)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_get_regs(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_regs regs;
++ struct ethtool_ops *ops = ethtool_ops;
++ void *regbuf;
++ int reglen, ret;
++
++ if (!ops->get_regs || !ops->get_regs_len)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(®s, useraddr, sizeof(regs)))
++ return -EFAULT;
++
++ reglen = ops->get_regs_len(dev);
++ if (regs.len > reglen)
++ regs.len = reglen;
++
++ regbuf = kmalloc(reglen, GFP_USER);
++ if (!regbuf)
++ return -ENOMEM;
++
++ ops->get_regs(dev, ®s, regbuf);
++
++ ret = -EFAULT;
++ if (copy_to_user(useraddr, ®s, sizeof(regs)))
++ goto out;
++ useraddr += offsetof(struct ethtool_regs, data);
++ if (copy_to_user(useraddr, regbuf, reglen))
++ goto out;
++ ret = 0;
++
++out:
++ kfree(regbuf);
++ return ret;
++}
++
++static int ethtool_get_wol(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
++
++ if (!ethtool_ops->get_wol)
++ return -EOPNOTSUPP;
++
++ ethtool_ops->get_wol(dev, &wol);
++
++ if (copy_to_user(useraddr, &wol, sizeof(wol)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_set_wol(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_wolinfo wol;
++
++ if (!ethtool_ops->set_wol)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&wol, useraddr, sizeof(wol)))
++ return -EFAULT;
++
++ return ethtool_ops->set_wol(dev, &wol);
++}
++
++static int ethtool_get_msglevel(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_value edata = { ETHTOOL_GMSGLVL };
++
++ if (!ethtool_ops->get_msglevel)
++ return -EOPNOTSUPP;
++
++ edata.data = ethtool_ops->get_msglevel(dev);
++
++ if (copy_to_user(useraddr, &edata, sizeof(edata)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_set_msglevel(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_value edata;
++
++ if (!ethtool_ops->set_msglevel)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&edata, useraddr, sizeof(edata)))
++ return -EFAULT;
++
++ ethtool_ops->set_msglevel(dev, edata.data);
++ return 0;
++}
++
++static int ethtool_nway_reset(struct net_device *dev)
++{
++ if (!ethtool_ops->nway_reset)
++ return -EOPNOTSUPP;
++
++ return ethtool_ops->nway_reset(dev);
++}
++
++static int ethtool_get_link(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_value edata = { ETHTOOL_GLINK };
++
++ if (!ethtool_ops->get_link)
++ return -EOPNOTSUPP;
++
++ edata.data = ethtool_ops->get_link(dev);
++
++ if (copy_to_user(useraddr, &edata, sizeof(edata)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_get_eeprom(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_eeprom eeprom;
++ struct ethtool_ops *ops = ethtool_ops;
++ u8 *data;
++ int ret;
++
++ if (!ops->get_eeprom || !ops->get_eeprom_len)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
++ return -EFAULT;
++
++ /* Check for wrap and zero */
++ if (eeprom.offset + eeprom.len <= eeprom.offset)
++ return -EINVAL;
++
++ /* Check for exceeding total eeprom len */
++ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
++ return -EINVAL;
++
++ data = kmalloc(eeprom.len, GFP_USER);
++ if (!data)
++ return -ENOMEM;
++
++ ret = -EFAULT;
++ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
++ goto out;
++
++ ret = ops->get_eeprom(dev, &eeprom, data);
++ if (ret)
++ goto out;
++
++ ret = -EFAULT;
++ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
++ goto out;
++ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
++ goto out;
++ ret = 0;
++
++out:
++ kfree(data);
++ return ret;
++}
++
++static int ethtool_set_eeprom(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_eeprom eeprom;
++ struct ethtool_ops *ops = ethtool_ops;
++ u8 *data;
++ int ret;
++
++ if (!ops->set_eeprom || !ops->get_eeprom_len)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
++ return -EFAULT;
++
++ /* Check for wrap and zero */
++ if (eeprom.offset + eeprom.len <= eeprom.offset)
++ return -EINVAL;
++
++ /* Check for exceeding total eeprom len */
++ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
++ return -EINVAL;
++
++ data = kmalloc(eeprom.len, GFP_USER);
++ if (!data)
++ return -ENOMEM;
++
++ ret = -EFAULT;
++ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
++ goto out;
++
++ ret = ops->set_eeprom(dev, &eeprom, data);
++ if (ret)
++ goto out;
++
++ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
++ ret = -EFAULT;
++
++out:
++ kfree(data);
++ return ret;
++}
++
++static int ethtool_get_coalesce(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
++
++ if (!ethtool_ops->get_coalesce)
++ return -EOPNOTSUPP;
++
++ ethtool_ops->get_coalesce(dev, &coalesce);
++
++ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_set_coalesce(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_coalesce coalesce;
++
++ if (!ethtool_ops->get_coalesce)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
++ return -EFAULT;
++
++ return ethtool_ops->set_coalesce(dev, &coalesce);
++}
++
++static int ethtool_get_ringparam(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
++
++ if (!ethtool_ops->get_ringparam)
++ return -EOPNOTSUPP;
++
++ ethtool_ops->get_ringparam(dev, &ringparam);
++
++ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_set_ringparam(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_ringparam ringparam;
++
++ if (!ethtool_ops->get_ringparam)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
++ return -EFAULT;
++
++ return ethtool_ops->set_ringparam(dev, &ringparam);
++}
++
++static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
++
++ if (!ethtool_ops->get_pauseparam)
++ return -EOPNOTSUPP;
++
++ ethtool_ops->get_pauseparam(dev, &pauseparam);
++
++ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_pauseparam pauseparam;
++
++ if (!ethtool_ops->get_pauseparam)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
++ return -EFAULT;
++
++ return ethtool_ops->set_pauseparam(dev, &pauseparam);
++}
++
++static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_value edata = { ETHTOOL_GRXCSUM };
++
++ if (!ethtool_ops->get_rx_csum)
++ return -EOPNOTSUPP;
++
++ edata.data = ethtool_ops->get_rx_csum(dev);
++
++ if (copy_to_user(useraddr, &edata, sizeof(edata)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_value edata;
++
++ if (!ethtool_ops->set_rx_csum)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&edata, useraddr, sizeof(edata)))
++ return -EFAULT;
++
++ ethtool_ops->set_rx_csum(dev, edata.data);
++ return 0;
++}
++
++static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_value edata = { ETHTOOL_GTXCSUM };
++
++ if (!ethtool_ops->get_tx_csum)
++ return -EOPNOTSUPP;
++
++ edata.data = ethtool_ops->get_tx_csum(dev);
++
++ if (copy_to_user(useraddr, &edata, sizeof(edata)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_value edata;
++
++ if (!ethtool_ops->set_tx_csum)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&edata, useraddr, sizeof(edata)))
++ return -EFAULT;
++
++ return ethtool_ops->set_tx_csum(dev, edata.data);
++}
++
++static int ethtool_get_sg(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_value edata = { ETHTOOL_GSG };
++
++ if (!ethtool_ops->get_sg)
++ return -EOPNOTSUPP;
++
++ edata.data = ethtool_ops->get_sg(dev);
++
++ if (copy_to_user(useraddr, &edata, sizeof(edata)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_set_sg(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_value edata;
++
++ if (!ethtool_ops->set_sg)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&edata, useraddr, sizeof(edata)))
++ return -EFAULT;
++
++ return ethtool_ops->set_sg(dev, edata.data);
++}
++
++static int ethtool_get_tso(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_value edata = { ETHTOOL_GTSO };
++
++ if (!ethtool_ops->get_tso)
++ return -EOPNOTSUPP;
++
++ edata.data = ethtool_ops->get_tso(dev);
++
++ if (copy_to_user(useraddr, &edata, sizeof(edata)))
++ return -EFAULT;
++ return 0;
++}
++
++static int ethtool_set_tso(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_value edata;
++
++ if (!ethtool_ops->set_tso)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&edata, useraddr, sizeof(edata)))
++ return -EFAULT;
++
++ return ethtool_ops->set_tso(dev, edata.data);
++}
++
++static int ethtool_self_test(struct net_device *dev, char *useraddr)
++{
++ struct ethtool_test test;
++ struct ethtool_ops *ops = ethtool_ops;
++ u64 *data;
++ int ret;
++
++ if (!ops->self_test || !ops->self_test_count)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&test, useraddr, sizeof(test)))
++ return -EFAULT;
++
++ test.len = ops->self_test_count(dev);
++ data = kmalloc(test.len * sizeof(u64), GFP_USER);
++ if (!data)
++ return -ENOMEM;
++
++ ops->self_test(dev, &test, data);
++
++ ret = -EFAULT;
++ if (copy_to_user(useraddr, &test, sizeof(test)))
++ goto out;
++ useraddr += sizeof(test);
++ if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
++ goto out;
++ ret = 0;
++
++out:
++ kfree(data);
++ return ret;
++}
++
++static int ethtool_get_strings(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_gstrings gstrings;
++ struct ethtool_ops *ops = ethtool_ops;
++ u8 *data;
++ int ret;
++
++ if (!ops->get_strings)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
++ return -EFAULT;
++
++ switch (gstrings.string_set) {
++ case ETH_SS_TEST:
++ if (!ops->self_test_count)
++ return -EOPNOTSUPP;
++ gstrings.len = ops->self_test_count(dev);
++ break;
++ case ETH_SS_STATS:
++ if (!ops->get_stats_count)
++ return -EOPNOTSUPP;
++ gstrings.len = ops->get_stats_count(dev);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
++ if (!data)
++ return -ENOMEM;
++
++ ops->get_strings(dev, gstrings.string_set, data);
++
++ ret = -EFAULT;
++ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
++ goto out;
++ useraddr += sizeof(gstrings);
++ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
++ goto out;
++ ret = 0;
++
++out:
++ kfree(data);
++ return ret;
++}
++
++static int ethtool_phys_id(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_value id;
++
++ if (!ethtool_ops->phys_id)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&id, useraddr, sizeof(id)))
++ return -EFAULT;
++
++ return ethtool_ops->phys_id(dev, id.data);
++}
++
++static int ethtool_get_stats(struct net_device *dev, void *useraddr)
++{
++ struct ethtool_stats stats;
++ struct ethtool_ops *ops = ethtool_ops;
++ u64 *data;
++ int ret;
++
++ if (!ops->get_ethtool_stats || !ops->get_stats_count)
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&stats, useraddr, sizeof(stats)))
++ return -EFAULT;
++
++ stats.n_stats = ops->get_stats_count(dev);
++ data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER);
++ if (!data)
++ return -ENOMEM;
++
++ ops->get_ethtool_stats(dev, &stats, data);
++
++ ret = -EFAULT;
++ if (copy_to_user(useraddr, &stats, sizeof(stats)))
++ goto out;
++ useraddr += sizeof(stats);
++ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
++ goto out;
++ ret = 0;
++
++out:
++ kfree(data);
++ return ret;
++}
++
++/* The main entry point in this file. Called from net/core/dev.c */
++
++#define ETHTOOL_OPS_COMPAT
++int ethtool_ioctl(struct ifreq *ifr)
++{
++ struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
++ void *useraddr = (void *) ifr->ifr_data;
++ u32 ethcmd;
++
++ /*
++ * XXX: This can be pushed down into the ethtool_* handlers that
++ * need it. Keep existing behavior for the moment.
++ */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++
++ if (!dev || !netif_device_present(dev))
++ return -ENODEV;
++
++ if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd)))
++ return -EFAULT;
++
++ switch (ethcmd) {
++ case ETHTOOL_GSET:
++ return ethtool_get_settings(dev, useraddr);
++ case ETHTOOL_SSET:
++ return ethtool_set_settings(dev, useraddr);
++ case ETHTOOL_GDRVINFO:
++ return ethtool_get_drvinfo(dev, useraddr);
++ case ETHTOOL_GREGS:
++ return ethtool_get_regs(dev, useraddr);
++ case ETHTOOL_GWOL:
++ return ethtool_get_wol(dev, useraddr);
++ case ETHTOOL_SWOL:
++ return ethtool_set_wol(dev, useraddr);
++ case ETHTOOL_GMSGLVL:
++ return ethtool_get_msglevel(dev, useraddr);
++ case ETHTOOL_SMSGLVL:
++ return ethtool_set_msglevel(dev, useraddr);
++ case ETHTOOL_NWAY_RST:
++ return ethtool_nway_reset(dev);
++ case ETHTOOL_GLINK:
++ return ethtool_get_link(dev, useraddr);
++ case ETHTOOL_GEEPROM:
++ return ethtool_get_eeprom(dev, useraddr);
++ case ETHTOOL_SEEPROM:
++ return ethtool_set_eeprom(dev, useraddr);
++ case ETHTOOL_GCOALESCE:
++ return ethtool_get_coalesce(dev, useraddr);
++ case ETHTOOL_SCOALESCE:
++ return ethtool_set_coalesce(dev, useraddr);
++ case ETHTOOL_GRINGPARAM:
++ return ethtool_get_ringparam(dev, useraddr);
++ case ETHTOOL_SRINGPARAM:
++ return ethtool_set_ringparam(dev, useraddr);
++ case ETHTOOL_GPAUSEPARAM:
++ return ethtool_get_pauseparam(dev, useraddr);
++ case ETHTOOL_SPAUSEPARAM:
++ return ethtool_set_pauseparam(dev, useraddr);
++ case ETHTOOL_GRXCSUM:
++ return ethtool_get_rx_csum(dev, useraddr);
++ case ETHTOOL_SRXCSUM:
++ return ethtool_set_rx_csum(dev, useraddr);
++ case ETHTOOL_GTXCSUM:
++ return ethtool_get_tx_csum(dev, useraddr);
++ case ETHTOOL_STXCSUM:
++ return ethtool_set_tx_csum(dev, useraddr);
++ case ETHTOOL_GSG:
++ return ethtool_get_sg(dev, useraddr);
++ case ETHTOOL_SSG:
++ return ethtool_set_sg(dev, useraddr);
++ case ETHTOOL_GTSO:
++ return ethtool_get_tso(dev, useraddr);
++ case ETHTOOL_STSO:
++ return ethtool_set_tso(dev, useraddr);
++ case ETHTOOL_TEST:
++ return ethtool_self_test(dev, useraddr);
++ case ETHTOOL_GSTRINGS:
++ return ethtool_get_strings(dev, useraddr);
++ case ETHTOOL_PHYS_ID:
++ return ethtool_phys_id(dev, useraddr);
++ case ETHTOOL_GSTATS:
++ return ethtool_get_stats(dev, useraddr);
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ return -EOPNOTSUPP;
++}
++
++#define mii_if_info _kc_mii_if_info
++struct _kc_mii_if_info {
++ int phy_id;
++ int advertising;
++ int phy_id_mask;
++ int reg_num_mask;
++
++ unsigned int full_duplex : 1; /* is full duplex? */
++ unsigned int force_media : 1; /* is autoneg. disabled? */
++
++ struct net_device *dev;
++ int (*mdio_read) (struct net_device *dev, int phy_id, int location);
++ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
++};
++
++struct ethtool_cmd;
++struct mii_ioctl_data;
++
++#undef mii_link_ok
++#define mii_link_ok _kc_mii_link_ok
++#undef mii_nway_restart
++#define mii_nway_restart _kc_mii_nway_restart
++#undef mii_ethtool_gset
++#define mii_ethtool_gset _kc_mii_ethtool_gset
++#undef mii_ethtool_sset
++#define mii_ethtool_sset _kc_mii_ethtool_sset
++#undef mii_check_link
++#define mii_check_link _kc_mii_check_link
++#undef generic_mii_ioctl
++#define generic_mii_ioctl _kc_generic_mii_ioctl
++extern int _kc_mii_link_ok (struct mii_if_info *mii);
++extern int _kc_mii_nway_restart (struct mii_if_info *mii);
++extern int _kc_mii_ethtool_gset(struct mii_if_info *mii,
++ struct ethtool_cmd *ecmd);
++extern int _kc_mii_ethtool_sset(struct mii_if_info *mii,
++ struct ethtool_cmd *ecmd);
++extern void _kc_mii_check_link (struct mii_if_info *mii);
++extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
++ struct mii_ioctl_data *mii_data, int cmd,
++ unsigned int *duplex_changed);
++
++
++struct _kc_pci_dev_ext {
++ struct pci_dev *dev;
++ void *pci_drvdata;
++ struct pci_driver *driver;
++};
++
++struct _kc_net_dev_ext {
++ struct net_device *dev;
++ unsigned int carrier;
++};
++
++
++/**************************************/
++/* mii support */
++
++int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
++{
++ struct net_device *dev = mii->dev;
++ u32 advert, bmcr, lpa, nego;
++
++ ecmd->supported =
++ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
++ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
++ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
++
++ /* only supports twisted-pair */
++ ecmd->port = PORT_MII;
++
++ /* only supports internal transceiver */
++ ecmd->transceiver = XCVR_INTERNAL;
++
++ /* this isn't fully supported at higher layers */
++ ecmd->phy_address = mii->phy_id;
++
++ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
++ if (advert & ADVERTISE_10HALF)
++ ecmd->advertising |= ADVERTISED_10baseT_Half;
++ if (advert & ADVERTISE_10FULL)
++ ecmd->advertising |= ADVERTISED_10baseT_Full;
++ if (advert & ADVERTISE_100HALF)
++ ecmd->advertising |= ADVERTISED_100baseT_Half;
++ if (advert & ADVERTISE_100FULL)
++ ecmd->advertising |= ADVERTISED_100baseT_Full;
++
++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
++ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
++ if (bmcr & BMCR_ANENABLE) {
++ ecmd->advertising |= ADVERTISED_Autoneg;
++ ecmd->autoneg = AUTONEG_ENABLE;
++
++ nego = mii_nway_result(advert & lpa);
++ if (nego == LPA_100FULL || nego == LPA_100HALF)
++ ecmd->speed = SPEED_100;
++ else
++ ecmd->speed = SPEED_10;
++ if (nego == LPA_100FULL || nego == LPA_10FULL) {
++ ecmd->duplex = DUPLEX_FULL;
++ mii->full_duplex = 1;
++ } else {
++ ecmd->duplex = DUPLEX_HALF;
++ mii->full_duplex = 0;
++ }
++ } else {
++ ecmd->autoneg = AUTONEG_DISABLE;
++
++ ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
++ ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
++ }
++
++ /* ignore maxtxpkt, maxrxpkt for now */
++
++ return 0;
++}
++
++int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
++{
++ struct net_device *dev = mii->dev;
++
++ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
++ return -EINVAL;
++ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
++ return -EINVAL;
++ if (ecmd->port != PORT_MII)
++ return -EINVAL;
++ if (ecmd->transceiver != XCVR_INTERNAL)
++ return -EINVAL;
++ if (ecmd->phy_address != mii->phy_id)
++ return -EINVAL;
++ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
++ return -EINVAL;
++
++ /* ignore supported, maxtxpkt, maxrxpkt */
++
++ if (ecmd->autoneg == AUTONEG_ENABLE) {
++ u32 bmcr, advert, tmp;
++
++ if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
++ ADVERTISED_10baseT_Full |
++ ADVERTISED_100baseT_Half |
++ ADVERTISED_100baseT_Full)) == 0)
++ return -EINVAL;
++
++ /* advertise only what has been requested */
++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
++ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
++ if (ADVERTISED_10baseT_Half)
++ tmp |= ADVERTISE_10HALF;
++ if (ADVERTISED_10baseT_Full)
++ tmp |= ADVERTISE_10FULL;
++ if (ADVERTISED_100baseT_Half)
++ tmp |= ADVERTISE_100HALF;
++ if (ADVERTISED_100baseT_Full)
++ tmp |= ADVERTISE_100FULL;
++ if (advert != tmp) {
++ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
++ mii->advertising = tmp;
++ }
++
++ /* turn on autonegotiation, and force a renegotiate */
++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
++ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
++
++ mii->force_media = 0;
++ } else {
++ u32 bmcr, tmp;
++
++ /* turn off auto negotiation, set speed and duplexity */
++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
++ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
++ if (ecmd->speed == SPEED_100)
++ tmp |= BMCR_SPEED100;
++ if (ecmd->duplex == DUPLEX_FULL) {
++ tmp |= BMCR_FULLDPLX;
++ mii->full_duplex = 1;
++ } else
++ mii->full_duplex = 0;
++ if (bmcr != tmp)
++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
++
++ mii->force_media = 1;
++ }
++ return 0;
++}
++
++int _kc_mii_link_ok (struct mii_if_info *mii)
++{
++ /* first, a dummy read, needed to latch some MII phys */
++ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
++ if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
++ return 1;
++ return 0;
++}
++
++int _kc_mii_nway_restart (struct mii_if_info *mii)
++{
++ int bmcr;
++ int r = -EINVAL;
++
++ /* if autoneg is off, it's an error */
++ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
++
++ if (bmcr & BMCR_ANENABLE) {
++ bmcr |= BMCR_ANRESTART;
++ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
++ r = 0;
++ }
++
++ return r;
++}
++
++void _kc_mii_check_link (struct mii_if_info *mii)
++{
++ int cur_link = mii_link_ok(mii);
++ int prev_link = netif_carrier_ok(mii->dev);
++
++ if (cur_link && !prev_link)
++ netif_carrier_on(mii->dev);
++ else if (prev_link && !cur_link)
++ netif_carrier_off(mii->dev);
++}
++
++int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
++ struct mii_ioctl_data *mii_data, int cmd,
++ unsigned int *duplex_chg_out)
++{
++ int rc = 0;
++ unsigned int duplex_changed = 0;
++
++ if (duplex_chg_out)
++ *duplex_chg_out = 0;
++
++ mii_data->phy_id &= mii_if->phy_id_mask;
++ mii_data->reg_num &= mii_if->reg_num_mask;
++
++ switch(cmd) {
++ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */
++ case SIOCGMIIPHY:
++ mii_data->phy_id = mii_if->phy_id;
++ /* fall through */
++
++ case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */
++ case SIOCGMIIREG:
++ mii_data->val_out =
++ mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
++ mii_data->reg_num);
++ break;
++
++ case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */
++ case SIOCSMIIREG: {
++ u16 val = mii_data->val_in;
++
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++
++ if (mii_data->phy_id == mii_if->phy_id) {
++ switch(mii_data->reg_num) {
++ case MII_BMCR: {
++ unsigned int new_duplex = 0;
++ if (val & (BMCR_RESET|BMCR_ANENABLE))
++ mii_if->force_media = 0;
++ else
++ mii_if->force_media = 1;
++ if (mii_if->force_media &&
++ (val & BMCR_FULLDPLX))
++ new_duplex = 1;
++ if (mii_if->full_duplex != new_duplex) {
++ duplex_changed = 1;
++ mii_if->full_duplex = new_duplex;
++ }
++ break;
++ }
++ case MII_ADVERTISE:
++ mii_if->advertising = val;
++ break;
++ default:
++ /* do nothing */
++ break;
++ }
++ }
++
++ mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
++ mii_data->reg_num, val);
++ break;
++ }
++
++ default:
++ rc = -EOPNOTSUPP;
++ break;
++ }
++
++ if ((rc == 0) && (duplex_chg_out) && (duplex_changed))
++ *duplex_chg_out = 1;
++
++ return rc;
++}
++