1 Index: linux-2.6.22/drivers/net/Kconfig
2 ===================================================================
3 --- linux-2.6.22.orig/drivers/net/Kconfig 2009-12-18 12:37:55.000000000 -0500
4 +++ linux-2.6.22/drivers/net/Kconfig 2009-12-18 12:39:22.000000000 -0500
6 <file:Documentation/networking/net-modules.txt>. The module
10 + tristate "Intel(R) 82575 Gigabit Ethernet support"
13 + This driver supports Intel(R) 82575 gigabit ethernet adapters.
14 + For more information on how to identify your adapter, go to the
15 + Adapter & Driver ID Guide at:
17 + <http://support.intel.com/support/network/adapter/pro100/21397.htm>
19 + For general information and support, go to the Intel support
22 + <http://support.intel.com>
24 + More specific information on configuring the driver is in
25 + <file:Documentation/networking/igb.txt>.
27 + To compile this driver as a module, choose M here and read
28 + <file:Documentation/networking/net-modules.txt>. The module
31 source "drivers/net/ixp2000/Kconfig"
34 Index: linux-2.6.22/drivers/net/Makefile
35 ===================================================================
36 --- linux-2.6.22.orig/drivers/net/Makefile 2009-12-18 12:38:07.000000000 -0500
37 +++ linux-2.6.22/drivers/net/Makefile 2009-12-18 12:39:22.000000000 -0500
39 obj-$(CONFIG_BONDING) += bonding/
40 obj-$(CONFIG_ATL1) += atl1/
41 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
42 +obj-$(CONFIG_IGB) += igb/
44 gianfar_driver-objs := gianfar.o \
46 Index: linux-2.6.22/drivers/net/igb/Makefile
47 ===================================================================
48 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
49 +++ linux-2.6.22/drivers/net/igb/Makefile 2009-12-18 12:39:22.000000000 -0500
51 +################################################################################
53 +# Intel 82575 PCI-Express Ethernet Linux driver
54 +# Copyright(c) 1999 - 2009 Intel Corporation.
56 +# This program is free software; you can redistribute it and/or modify it
57 +# under the terms and conditions of the GNU General Public License,
58 +# version 2, as published by the Free Software Foundation.
60 +# This program is distributed in the hope it will be useful, but WITHOUT
61 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
62 +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
65 +# You should have received a copy of the GNU General Public License along with
66 +# this program; if not, write to the Free Software Foundation, Inc.,
67 +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
69 +# The full GNU General Public License is included in this distribution in
70 +# the file called "COPYING".
72 +# Contact Information:
73 +# Linux NICS <linux.nics@intel.com>
74 +# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
75 +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
77 +################################################################################
80 +# Makefile for the Intel(R) 82575 PCI-Express ethernet driver
83 +obj-$(CONFIG_IGB) += igb.o
85 +igb-objs := igb_main.o igb_ethtool.o igb_param.o kcompat.o e1000_api.o e1000_manage.o e1000_82575.o \
86 + e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
88 Index: linux-2.6.22/drivers/net/igb/e1000_82575.c
89 ===================================================================
90 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
91 +++ linux-2.6.22/drivers/net/igb/e1000_82575.c 2009-12-18 12:39:22.000000000 -0500
93 +/*******************************************************************************
95 + Intel(R) Gigabit Ethernet Linux driver
96 + Copyright(c) 2007-2009 Intel Corporation.
98 + This program is free software; you can redistribute it and/or modify it
99 + under the terms and conditions of the GNU General Public License,
100 + version 2, as published by the Free Software Foundation.
102 + This program is distributed in the hope it will be useful, but WITHOUT
103 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
104 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
107 + You should have received a copy of the GNU General Public License along with
108 + this program; if not, write to the Free Software Foundation, Inc.,
109 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
111 + The full GNU General Public License is included in this distribution in
112 + the file called "COPYING".
114 + Contact Information:
115 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
116 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
118 +*******************************************************************************/
121 + * 82575EB Gigabit Network Connection
122 + * 82575EB Gigabit Backplane Connection
123 + * 82575GB Gigabit Network Connection
124 + * 82576 Gigabit Network Connection
125 + * 82576 Quad Port Gigabit Mezzanine Adapter
128 +#include "e1000_api.h"
130 +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
131 +static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
132 +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
133 +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
134 +static void e1000_release_phy_82575(struct e1000_hw *hw);
135 +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
136 +static void e1000_release_nvm_82575(struct e1000_hw *hw);
137 +static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
138 +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
139 +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
141 +static s32 e1000_init_hw_82575(struct e1000_hw *hw);
142 +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
143 +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
145 +static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
146 +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
148 +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
149 +static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
150 +static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
151 +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
152 + u32 offset, u16 data);
153 +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
154 +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
155 +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
156 + u16 *speed, u16 *duplex);
157 +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
158 +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
159 +static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
160 +static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
161 +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
162 +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
163 +static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
164 +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
167 + * e1000_init_phy_params_82575 - Init PHY func ptrs.
168 + * @hw: pointer to the HW structure
170 +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
172 + struct e1000_phy_info *phy = &hw->phy;
173 + s32 ret_val = E1000_SUCCESS;
175 + DEBUGFUNC("e1000_init_phy_params_82575");
177 + if (hw->phy.media_type != e1000_media_type_copper) {
178 + phy->type = e1000_phy_none;
182 + phy->ops.power_up = e1000_power_up_phy_copper;
183 + phy->ops.power_down = e1000_power_down_phy_copper_82575;
185 + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
186 + phy->reset_delay_us = 100;
188 + phy->ops.acquire = e1000_acquire_phy_82575;
189 + phy->ops.check_reset_block = e1000_check_reset_block_generic;
190 + phy->ops.commit = e1000_phy_sw_reset_generic;
191 + phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
192 + phy->ops.release = e1000_release_phy_82575;
194 + if (e1000_sgmii_active_82575(hw)) {
195 + phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
196 + phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
197 + phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
199 + phy->ops.reset = e1000_phy_hw_reset_generic;
200 + phy->ops.read_reg = e1000_read_phy_reg_igp;
201 + phy->ops.write_reg = e1000_write_phy_reg_igp;
204 + /* Set phy->phy_addr and phy->id. */
205 + ret_val = e1000_get_phy_id_82575(hw);
207 + /* Verify phy id and set remaining function pointers */
209 + case M88E1111_I_PHY_ID:
210 + phy->type = e1000_phy_m88;
211 + phy->ops.check_polarity = e1000_check_polarity_m88;
212 + phy->ops.get_info = e1000_get_phy_info_m88;
213 + phy->ops.get_cable_length = e1000_get_cable_length_m88;
214 + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
216 + case IGP03E1000_E_PHY_ID:
217 + case IGP04E1000_E_PHY_ID:
218 + phy->type = e1000_phy_igp_3;
219 + phy->ops.check_polarity = e1000_check_polarity_igp;
220 + phy->ops.get_info = e1000_get_phy_info_igp;
221 + phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
222 + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
223 + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
224 + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
227 + ret_val = -E1000_ERR_PHY;
236 + * e1000_init_nvm_params_82575 - Init NVM func ptrs.
237 + * @hw: pointer to the HW structure
239 +static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
241 + struct e1000_nvm_info *nvm = &hw->nvm;
242 + u32 eecd = E1000_READ_REG(hw, E1000_EECD);
245 + DEBUGFUNC("e1000_init_nvm_params_82575");
247 + nvm->opcode_bits = 8;
248 + nvm->delay_usec = 1;
249 + switch (nvm->override) {
250 + case e1000_nvm_override_spi_large:
251 + nvm->page_size = 32;
252 + nvm->address_bits = 16;
254 + case e1000_nvm_override_spi_small:
255 + nvm->page_size = 8;
256 + nvm->address_bits = 8;
259 + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
260 + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
264 + nvm->type = e1000_nvm_eeprom_spi;
266 + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
267 + E1000_EECD_SIZE_EX_SHIFT);
270 + * Added to a constant, "size" becomes the left-shift value
271 + * for setting word_size.
273 + size += NVM_WORD_SIZE_BASE_SHIFT;
275 + /* EEPROM access above 16k is unsupported */
278 + nvm->word_size = 1 << size;
280 + /* Function Pointers */
281 + nvm->ops.acquire = e1000_acquire_nvm_82575;
282 + nvm->ops.read = e1000_read_nvm_eerd;
283 + nvm->ops.release = e1000_release_nvm_82575;
284 + nvm->ops.update = e1000_update_nvm_checksum_generic;
285 + nvm->ops.valid_led_default = e1000_valid_led_default_82575;
286 + nvm->ops.validate = e1000_validate_nvm_checksum_generic;
287 + nvm->ops.write = e1000_write_nvm_spi;
289 + return E1000_SUCCESS;
293 + * e1000_init_mac_params_82575 - Init MAC func ptrs.
294 + * @hw: pointer to the HW structure
296 +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
298 + struct e1000_mac_info *mac = &hw->mac;
299 + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
302 + DEBUGFUNC("e1000_init_mac_params_82575");
304 + /* Set media type */
306 + * The 82575 uses bits 22:23 for link mode. The mode can be changed
307 + * based on the EEPROM. We cannot rely upon device ID. There
308 + * is no distinguishable difference between fiber and internal
309 + * SerDes mode on the 82575. There can be an external PHY attached
310 + * on the SGMII interface. For this, we'll set sgmii_active to true.
312 + hw->phy.media_type = e1000_media_type_copper;
313 + dev_spec->sgmii_active = false;
315 + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
316 + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
317 + case E1000_CTRL_EXT_LINK_MODE_SGMII:
318 + dev_spec->sgmii_active = true;
319 + ctrl_ext |= E1000_CTRL_I2C_ENA;
321 + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
322 + hw->phy.media_type = e1000_media_type_internal_serdes;
323 + ctrl_ext |= E1000_CTRL_I2C_ENA;
326 + ctrl_ext &= ~E1000_CTRL_I2C_ENA;
330 + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
332 + /* Set mta register count */
333 + mac->mta_reg_count = 128;
334 + /* Set uta register count */
335 + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
336 + /* Set rar entry count */
337 + mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
338 + if (mac->type == e1000_82576)
339 + mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
340 + /* Set if part includes ASF firmware */
341 + mac->asf_firmware_present = true;
342 + /* Set if manageability features are enabled. */
343 + mac->arc_subsystem_valid =
344 + (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
347 + /* Function pointers */
349 + /* bus type/speed/width */
350 + mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
352 + mac->ops.reset_hw = e1000_reset_hw_82575;
353 + /* hw initialization */
354 + mac->ops.init_hw = e1000_init_hw_82575;
356 + mac->ops.setup_link = e1000_setup_link_generic;
357 + /* physical interface link setup */
358 + mac->ops.setup_physical_interface =
359 + (hw->phy.media_type == e1000_media_type_copper)
360 + ? e1000_setup_copper_link_82575
361 + : e1000_setup_serdes_link_82575;
362 + /* physical interface shutdown */
363 + mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
364 + /* check for link */
365 + mac->ops.check_for_link = e1000_check_for_link_82575;
366 + /* receive address register setting */
367 + mac->ops.rar_set = e1000_rar_set_generic;
368 + /* read mac address */
369 + mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
370 + /* multicast address update */
371 + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
373 + mac->ops.write_vfta = e1000_write_vfta_generic;
374 + /* clearing VFTA */
375 + mac->ops.clear_vfta = e1000_clear_vfta_generic;
377 + mac->ops.mta_set = e1000_mta_set_generic;
379 + mac->ops.id_led_init = e1000_id_led_init_generic;
381 + mac->ops.blink_led = e1000_blink_led_generic;
383 + mac->ops.setup_led = e1000_setup_led_generic;
385 + mac->ops.cleanup_led = e1000_cleanup_led_generic;
386 + /* turn on/off LED */
387 + mac->ops.led_on = e1000_led_on_generic;
388 + mac->ops.led_off = e1000_led_off_generic;
389 + /* clear hardware counters */
390 + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
392 + mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
394 + /* set lan id for port to determine which phy lock to use */
395 + hw->mac.ops.set_lan_id(hw);
397 + return E1000_SUCCESS;
401 + * e1000_init_function_pointers_82575 - Init func ptrs.
402 + * @hw: pointer to the HW structure
404 + * Called to initialize all function pointers and parameters.
406 +void e1000_init_function_pointers_82575(struct e1000_hw *hw)
408 + DEBUGFUNC("e1000_init_function_pointers_82575");
410 + hw->mac.ops.init_params = e1000_init_mac_params_82575;
411 + hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
412 + hw->phy.ops.init_params = e1000_init_phy_params_82575;
413 + hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
417 + * e1000_acquire_phy_82575 - Acquire rights to access PHY
418 + * @hw: pointer to the HW structure
420 + * Acquire access rights to the correct PHY.
422 +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
424 + u16 mask = E1000_SWFW_PHY0_SM;
426 + DEBUGFUNC("e1000_acquire_phy_82575");
428 + if (hw->bus.func == E1000_FUNC_1)
429 + mask = E1000_SWFW_PHY1_SM;
431 + return e1000_acquire_swfw_sync_82575(hw, mask);
435 + * e1000_release_phy_82575 - Release rights to access PHY
436 + * @hw: pointer to the HW structure
438 + * A wrapper to release access rights to the correct PHY.
440 +static void e1000_release_phy_82575(struct e1000_hw *hw)
442 + u16 mask = E1000_SWFW_PHY0_SM;
444 + DEBUGFUNC("e1000_release_phy_82575");
446 + if (hw->bus.func == E1000_FUNC_1)
447 + mask = E1000_SWFW_PHY1_SM;
449 + e1000_release_swfw_sync_82575(hw, mask);
453 + * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
454 + * @hw: pointer to the HW structure
455 + * @offset: register offset to be read
456 + * @data: pointer to the read data
458 + * Reads the PHY register at offset using the serial gigabit media independent
459 + * interface and stores the retrieved information in data.
461 +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
464 + s32 ret_val = -E1000_ERR_PARAM;
466 + DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
468 + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
469 + DEBUGOUT1("PHY Address %u is out of range\n", offset);
473 + ret_val = hw->phy.ops.acquire(hw);
477 + ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
479 + hw->phy.ops.release(hw);
486 + * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
487 + * @hw: pointer to the HW structure
488 + * @offset: register offset to write to
489 + * @data: data to write at register offset
491 + * Writes the data to PHY register at the offset using the serial gigabit
492 + * media independent interface.
494 +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
497 + s32 ret_val = -E1000_ERR_PARAM;
499 + DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
501 + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
502 + DEBUGOUT1("PHY Address %d is out of range\n", offset);
506 + ret_val = hw->phy.ops.acquire(hw);
510 + ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
512 + hw->phy.ops.release(hw);
519 + * e1000_get_phy_id_82575 - Retrieve PHY addr and id
520 + * @hw: pointer to the HW structure
522 + * Retrieves the PHY address and ID for both PHY's which do and do not use
525 +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
527 + struct e1000_phy_info *phy = &hw->phy;
528 + s32 ret_val = E1000_SUCCESS;
532 + DEBUGFUNC("e1000_get_phy_id_82575");
535 + * For SGMII PHYs, we try the list of possible addresses until
536 + * we find one that works. For non-SGMII PHYs
537 + * (e.g. integrated copper PHYs), an address of 1 should
538 + * work. The result of this function should mean phy->phy_addr
539 + * and phy->id are set correctly.
541 + if (!e1000_sgmii_active_82575(hw)) {
543 + ret_val = e1000_get_phy_id(hw);
547 + /* Power on sgmii phy if it is disabled */
548 + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
549 + E1000_WRITE_REG(hw, E1000_CTRL_EXT,
550 + ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
551 + E1000_WRITE_FLUSH(hw);
555 + * The address field in the I2CCMD register is 3 bits and 0 is invalid.
556 + * Therefore, we need to test 1-7
558 + for (phy->addr = 1; phy->addr < 8; phy->addr++) {
559 + ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
560 + if (ret_val == E1000_SUCCESS) {
561 + DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
565 + * At the time of this writing, The M88 part is
566 + * the only supported SGMII PHY product.
568 + if (phy_id == M88_VENDOR)
571 + DEBUGOUT1("PHY address %u was unreadable\n",
576 + /* A valid PHY type couldn't be found. */
577 + if (phy->addr == 8) {
579 + ret_val = -E1000_ERR_PHY;
581 + ret_val = e1000_get_phy_id(hw);
584 + /* restore previous sfp cage power state */
585 + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
592 + * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
593 + * @hw: pointer to the HW structure
595 + * Resets the PHY using the serial gigabit media independent interface.
597 +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
599 + s32 ret_val = E1000_SUCCESS;
601 + DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
604 + * This isn't a true "hard" reset, but is the only reset
605 + * available to us at this time.
608 + DEBUGOUT("Soft resetting SGMII attached PHY...\n");
610 + if (!(hw->phy.ops.write_reg))
614 + * SFP documentation requires the following to configure the SPF module
615 + * to work on SGMII. No further documentation is given.
617 + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
621 + ret_val = hw->phy.ops.commit(hw);
628 + * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
629 + * @hw: pointer to the HW structure
630 + * @active: true to enable LPLU, false to disable
632 + * Sets the LPLU D0 state according to the active flag. When
633 + * activating LPLU this function also disables smart speed
634 + * and vice versa. LPLU will not be activated unless the
635 + * device autonegotiation advertisement meets standards of
636 + * either 10 or 10/100 or 10/100/1000 at all duplexes.
637 + * This is a function pointer entry point only called by
638 + * PHY setup routines.
640 +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
642 + struct e1000_phy_info *phy = &hw->phy;
643 + s32 ret_val = E1000_SUCCESS;
646 + DEBUGFUNC("e1000_set_d0_lplu_state_82575");
648 + if (!(hw->phy.ops.read_reg))
651 + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
656 + data |= IGP02E1000_PM_D0_LPLU;
657 + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
662 + /* When LPLU is enabled, we should disable SmartSpeed */
663 + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
665 + data &= ~IGP01E1000_PSCFR_SMART_SPEED;
666 + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
671 + data &= ~IGP02E1000_PM_D0_LPLU;
672 + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
675 + * LPLU and SmartSpeed are mutually exclusive. LPLU is used
676 + * during Dx states where the power conservation is most
677 + * important. During driver activity we should enable
678 + * SmartSpeed, so performance is maintained.
680 + if (phy->smart_speed == e1000_smart_speed_on) {
681 + ret_val = phy->ops.read_reg(hw,
682 + IGP01E1000_PHY_PORT_CONFIG,
687 + data |= IGP01E1000_PSCFR_SMART_SPEED;
688 + ret_val = phy->ops.write_reg(hw,
689 + IGP01E1000_PHY_PORT_CONFIG,
693 + } else if (phy->smart_speed == e1000_smart_speed_off) {
694 + ret_val = phy->ops.read_reg(hw,
695 + IGP01E1000_PHY_PORT_CONFIG,
700 + data &= ~IGP01E1000_PSCFR_SMART_SPEED;
701 + ret_val = phy->ops.write_reg(hw,
702 + IGP01E1000_PHY_PORT_CONFIG,
714 + * e1000_acquire_nvm_82575 - Request for access to EEPROM
715 + * @hw: pointer to the HW structure
717 + * Acquire the necessary semaphores for exclusive access to the EEPROM.
718 + * Set the EEPROM access request bit and wait for EEPROM access grant bit.
719 + * Return successful if access grant bit set, else clear the request for
720 + * EEPROM access and return -E1000_ERR_NVM (-1).
722 +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
726 + DEBUGFUNC("e1000_acquire_nvm_82575");
728 + ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
732 + ret_val = e1000_acquire_nvm_generic(hw);
735 + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
742 + * e1000_release_nvm_82575 - Release exclusive access to EEPROM
743 + * @hw: pointer to the HW structure
745 + * Stop any current commands to the EEPROM and clear the EEPROM request bit,
746 + * then release the semaphores acquired.
748 +static void e1000_release_nvm_82575(struct e1000_hw *hw)
750 + DEBUGFUNC("e1000_release_nvm_82575");
752 + e1000_release_nvm_generic(hw);
753 + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
757 + * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
758 + * @hw: pointer to the HW structure
759 + * @mask: specifies which semaphore to acquire
761 + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
762 + * will also specify which port we're acquiring the lock for.
764 +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
768 + u32 fwmask = mask << 16;
769 + s32 ret_val = E1000_SUCCESS;
770 + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
772 + DEBUGFUNC("e1000_acquire_swfw_sync_82575");
774 + while (i < timeout) {
775 + if (e1000_get_hw_semaphore_generic(hw)) {
776 + ret_val = -E1000_ERR_SWFW_SYNC;
780 + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
781 + if (!(swfw_sync & (fwmask | swmask)))
785 + * Firmware currently using resource (fwmask)
786 + * or other software thread using resource (swmask)
788 + e1000_put_hw_semaphore_generic(hw);
793 + if (i == timeout) {
794 + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
795 + ret_val = -E1000_ERR_SWFW_SYNC;
799 + swfw_sync |= swmask;
800 + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
802 + e1000_put_hw_semaphore_generic(hw);
809 + * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
810 + * @hw: pointer to the HW structure
811 + * @mask: specifies which semaphore to acquire
813 + * Release the SW/FW semaphore used to access the PHY or NVM. The mask
814 + * will also specify which port we're releasing the lock for.
816 +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
820 + DEBUGFUNC("e1000_release_swfw_sync_82575");
822 + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS);
825 + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
826 + swfw_sync &= ~mask;
827 + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
829 + e1000_put_hw_semaphore_generic(hw);
833 + * e1000_get_cfg_done_82575 - Read config done bit
834 + * @hw: pointer to the HW structure
836 + * Read the management control register for the config done bit for
837 + * completion status. NOTE: silicon which is EEPROM-less will fail trying
838 + * to read the config done bit, so an error is *ONLY* logged and returns
839 + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
840 + * would not be able to be reset or change link.
842 +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
844 + s32 timeout = PHY_CFG_TIMEOUT;
845 + s32 ret_val = E1000_SUCCESS;
846 + u32 mask = E1000_NVM_CFG_DONE_PORT_0;
848 + DEBUGFUNC("e1000_get_cfg_done_82575");
850 + if (hw->bus.func == E1000_FUNC_1)
851 + mask = E1000_NVM_CFG_DONE_PORT_1;
853 + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
859 + DEBUGOUT("MNG configuration cycle has not completed.\n");
861 + /* If EEPROM is not marked present, init the PHY manually */
862 + if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
863 + (hw->phy.type == e1000_phy_igp_3))
864 + e1000_phy_init_script_igp3(hw);
870 + * e1000_get_link_up_info_82575 - Get link speed/duplex info
871 + * @hw: pointer to the HW structure
872 + * @speed: stores the current speed
873 + * @duplex: stores the current duplex
875 + * This is a wrapper function, if using the serial gigabit media independent
876 + * interface, use PCS to retrieve the link speed and duplex information.
877 + * Otherwise, use the generic function to get the link speed and duplex info.
879 +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
884 + DEBUGFUNC("e1000_get_link_up_info_82575");
886 + if (hw->phy.media_type != e1000_media_type_copper)
887 + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
890 + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
897 + * e1000_check_for_link_82575 - Check for link
898 + * @hw: pointer to the HW structure
900 + * If sgmii is enabled, then use the pcs register to determine link, otherwise
901 + * use the generic interface for determining link.
903 +static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
908 + DEBUGFUNC("e1000_check_for_link_82575");
910 + if (hw->phy.media_type != e1000_media_type_copper) {
911 + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
914 + * Use this flag to determine if link needs to be checked or
915 + * not. If we have link clear the flag so that we do not
916 + * continue to check for link.
918 + hw->mac.get_link_status = !hw->mac.serdes_has_link;
920 + ret_val = e1000_check_for_copper_link_generic(hw);
927 + * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
928 + * @hw: pointer to the HW structure
929 + * @speed: stores the current speed
930 + * @duplex: stores the current duplex
932 + * Using the physical coding sub-layer (PCS), retrieve the current speed and
933 + * duplex, then store the values in the pointers provided.
935 +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
936 + u16 *speed, u16 *duplex)
938 + struct e1000_mac_info *mac = &hw->mac;
941 + DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
943 + /* Set up defaults for the return values of this function */
944 + mac->serdes_has_link = false;
949 + * Read the PCS Status register for link state. For non-copper mode,
950 + * the status register is not accurate. The PCS status register is
953 + pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
956 + * The link up bit determines when link is up on autoneg. The sync ok
957 + * gets set once both sides sync up and agree upon link. Stable link
958 + * can be determined by checking for both link up and link sync ok
960 + if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
961 + mac->serdes_has_link = true;
963 + /* Detect and store PCS speed */
964 + if (pcs & E1000_PCS_LSTS_SPEED_1000) {
965 + *speed = SPEED_1000;
966 + } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
967 + *speed = SPEED_100;
972 + /* Detect and store PCS duplex */
973 + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
974 + *duplex = FULL_DUPLEX;
976 + *duplex = HALF_DUPLEX;
980 + return E1000_SUCCESS;
984 + * e1000_shutdown_serdes_link_82575 - Remove link during power down
985 + * @hw: pointer to the HW structure
987 + * In the case of serdes shut down sfp and PCS on driver unload
988 + * when management pass thru is not enabled.
990 +void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
993 + u16 eeprom_data = 0;
995 + if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
996 + !e1000_sgmii_active_82575(hw))
999 + if (hw->bus.func == E1000_FUNC_0)
1000 + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1001 + else if (hw->bus.func == E1000_FUNC_1)
1002 + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1005 + * If APM is not enabled in the EEPROM and management interface is
1006 + * not enabled, then power down.
1008 + if (!(eeprom_data & E1000_NVM_APME_82575) &&
1009 + !e1000_enable_mng_pass_thru(hw)) {
1010 + /* Disable PCS to turn off link */
1011 + reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1012 + reg &= ~E1000_PCS_CFG_PCS_EN;
1013 + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1015 + /* shutdown the laser */
1016 + reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1017 + reg |= E1000_CTRL_EXT_SDP3_DATA;
1018 + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1020 + /* flush the write to verify completion */
1021 + E1000_WRITE_FLUSH(hw);
1029 + * e1000_reset_hw_82575 - Reset hardware
1030 + * @hw: pointer to the HW structure
1032 + * This resets the hardware into a known state.
1034 +static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
1039 + DEBUGFUNC("e1000_reset_hw_82575");
1042 + * Prevent the PCI-E bus from sticking if there is no TLP connection
1043 + * on the last TLP read/write transaction when MAC is reset.
1045 + ret_val = e1000_disable_pcie_master_generic(hw);
1047 + DEBUGOUT("PCI-E Master disable polling has failed.\n");
1050 + /* set the completion timeout for interface */
1051 + ret_val = e1000_set_pcie_completion_timeout(hw);
1053 + DEBUGOUT("PCI-E Set completion timeout has failed.\n");
1056 + DEBUGOUT("Masking off all interrupts\n");
1057 + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1059 + E1000_WRITE_REG(hw, E1000_RCTL, 0);
1060 + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1061 + E1000_WRITE_FLUSH(hw);
1065 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
1067 + DEBUGOUT("Issuing a global reset to MAC\n");
1068 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
1070 + ret_val = e1000_get_auto_rd_done_generic(hw);
1073 + * When auto config read does not complete, do not
1074 + * return with an error. This can happen in situations
1075 + * where there is no eeprom and prevents getting link.
1077 + DEBUGOUT("Auto Read Done did not complete\n");
1080 + /* If EEPROM is not present, run manual init scripts */
1081 + if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1082 + e1000_reset_init_script_82575(hw);
1084 + /* Clear any pending interrupt events. */
1085 + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1086 + icr = E1000_READ_REG(hw, E1000_ICR);
1088 + /* Install any alternate MAC address into RAR0 */
1089 + ret_val = e1000_check_alt_mac_addr_generic(hw);
1095 + * e1000_init_hw_82575 - Initialize hardware
1096 + * @hw: pointer to the HW structure
1098 + * This inits the hardware readying it for operation.
1100 +static s32 e1000_init_hw_82575(struct e1000_hw *hw)
1102 + struct e1000_mac_info *mac = &hw->mac;
1104 + u16 i, rar_count = mac->rar_entry_count;
1106 + DEBUGFUNC("e1000_init_hw_82575");
1108 + /* Initialize identification LED */
1109 + ret_val = mac->ops.id_led_init(hw);
1111 + DEBUGOUT("Error initializing identification LED\n");
1112 + /* This is not fatal and we should not stop init due to this */
1115 + /* Disabling VLAN filtering */
1116 + DEBUGOUT("Initializing the IEEE VLAN\n");
1117 + mac->ops.clear_vfta(hw);
1119 + /* Setup the receive address */
1120 + e1000_init_rx_addrs_generic(hw, rar_count);
1122 + /* Zero out the Multicast HASH table */
1123 + DEBUGOUT("Zeroing the MTA\n");
1124 + for (i = 0; i < mac->mta_reg_count; i++)
1125 + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
1127 + /* Zero out the Unicast HASH table */
1128 + DEBUGOUT("Zeroing the UTA\n");
1129 + for (i = 0; i < mac->uta_reg_count; i++)
1130 + E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
1132 + /* Setup link and flow control */
1133 + ret_val = mac->ops.setup_link(hw);
1136 + * Clear all of the statistics registers (clear on read). It is
1137 + * important that we do this after we have tried to establish link
1138 + * because the symbol error count will increment wildly if there
1141 + e1000_clear_hw_cntrs_82575(hw);
1147 + * e1000_setup_copper_link_82575 - Configure copper link settings
1148 + * @hw: pointer to the HW structure
1150 + * Configures the link for auto-neg or forced speed and duplex. Then we check
1151 + * for link, once link is established calls to configure collision distance
1152 + * and flow control are called.
1154 +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
1159 + DEBUGFUNC("e1000_setup_copper_link_82575");
1161 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
1162 + ctrl |= E1000_CTRL_SLU;
1163 + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1164 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1166 + ret_val = e1000_setup_serdes_link_82575(hw);
1170 + if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1171 + ret_val = hw->phy.ops.reset(hw);
1173 + DEBUGOUT("Error resetting the PHY.\n");
1177 + switch (hw->phy.type) {
1178 + case e1000_phy_m88:
1179 + ret_val = e1000_copper_link_setup_m88(hw);
1181 + case e1000_phy_igp_3:
1182 + ret_val = e1000_copper_link_setup_igp(hw);
1185 + ret_val = -E1000_ERR_PHY;
1192 + ret_val = e1000_setup_copper_link_generic(hw);
1198 + * e1000_setup_serdes_link_82575 - Setup link for serdes
1199 + * @hw: pointer to the HW structure
1201 + * Configure the physical coding sub-layer (PCS) link. The PCS link is
1202 + * used on copper connections where the serialized gigabit media independent
1203 + * interface (sgmii), or serdes fiber is being used. Configures the link
1204 + * for auto-negotiation or forces speed/duplex.
1206 +static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
1208 + u32 ctrl_reg, reg;
1210 + DEBUGFUNC("e1000_setup_serdes_link_82575");
1212 + if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1213 + !e1000_sgmii_active_82575(hw))
1214 + return E1000_SUCCESS;
1217 + * On the 82575, SerDes loopback mode persists until it is
1218 + * explicitly turned off or a power cycle is performed. A read to
1219 + * the register does not indicate its status. Therefore, we ensure
1220 + * loopback mode is disabled during initialization.
1222 + E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1224 + /* power on the sfp cage if present */
1225 + reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1226 + reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1227 + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1229 + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1230 + ctrl_reg |= E1000_CTRL_SLU;
1232 + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1233 + /* set both sw defined pins */
1234 + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1236 + /* Set switch control to serdes energy detect */
1237 + reg = E1000_READ_REG(hw, E1000_CONNSW);
1238 + reg |= E1000_CONNSW_ENRGSRC;
1239 + E1000_WRITE_REG(hw, E1000_CONNSW, reg);
1242 + reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1244 + if (e1000_sgmii_active_82575(hw)) {
1245 + /* allow time for SFP cage to power up phy */
1248 + /* AN time out should be disabled for SGMII mode */
1249 + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1251 + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1252 + E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1255 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1258 + * New SerDes mode allows for forcing speed or autonegotiating speed
1259 + * at 1gb. Autoneg should be default set by most drivers. This is the
1260 + * mode that will be compatible with older link partners and switches.
1261 + * However, both are supported by the hardware and some drivers/tools.
1264 + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1265 + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1268 + * We force flow control to prevent the CTRL register values from being
1269 + * overwritten by the autonegotiated flow control values
1271 + reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1274 + * we always set sgmii to autoneg since it is the phy that will be
1275 + * forcing the link and the serdes is just a go-between
1277 + if (hw->mac.autoneg || e1000_sgmii_active_82575(hw)) {
1278 + /* Set PCS register for autoneg */
1279 + reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
1280 + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full dplx */
1281 + E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1282 + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1283 + DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1285 + /* Check for duplex first */
1286 + if (hw->mac.forced_speed_duplex & E1000_ALL_FULL_DUPLEX)
1287 + reg |= E1000_PCS_LCTL_FDV_FULL;
1289 + /* No need to check for 1000/full since the spec states that
1290 + * it requires autoneg to be enabled */
1291 + /* Now set speed */
1292 + if (hw->mac.forced_speed_duplex & E1000_ALL_100_SPEED)
1293 + reg |= E1000_PCS_LCTL_FSV_100;
1295 + /* Force speed and force link */
1296 + reg |= E1000_PCS_LCTL_FSD |
1297 + E1000_PCS_LCTL_FORCE_LINK |
1298 + E1000_PCS_LCTL_FLV_LINK_UP;
1300 + DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1303 + E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1305 + if (!e1000_sgmii_active_82575(hw))
1306 + e1000_force_mac_fc_generic(hw);
1308 + return E1000_SUCCESS;
1312 + * e1000_valid_led_default_82575 - Verify a valid default LED config
1313 + * @hw: pointer to the HW structure
1314 + * @data: pointer to the NVM (EEPROM)
1316 + * Read the EEPROM for the current default LED configuration. If the
1317 + * LED configuration is not valid, set to a valid LED configuration.
1319 +static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
1323 + DEBUGFUNC("e1000_valid_led_default_82575");
1325 + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1327 + DEBUGOUT("NVM Read Error\n");
1331 + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1332 + switch(hw->phy.media_type) {
1333 + case e1000_media_type_internal_serdes:
1334 + *data = ID_LED_DEFAULT_82575_SERDES;
1336 + case e1000_media_type_copper:
1338 + *data = ID_LED_DEFAULT;
1347 + * e1000_sgmii_active_82575 - Return sgmii state
1348 + * @hw: pointer to the HW structure
1350 + * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1351 + * which can be enabled for use in the embedded applications. Simply
1352 + * return the current state of the sgmii interface.
1354 +static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
1356 + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1357 + return dev_spec->sgmii_active;
1361 + * e1000_reset_init_script_82575 - Inits HW defaults after reset
1362 + * @hw: pointer to the HW structure
1364 + * Inits recommended HW defaults after a reset when there is no EEPROM
1365 + * detected. This is only for the 82575.
1367 +static s32 e1000_reset_init_script_82575(struct e1000_hw* hw)
1369 + DEBUGFUNC("e1000_reset_init_script_82575");
1371 + if (hw->mac.type == e1000_82575) {
1372 + DEBUGOUT("Running reset init script for 82575\n");
1373 + /* SerDes configuration via SERDESCTRL */
1374 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
1375 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
1376 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
1377 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
1379 + /* CCM configuration via CCMCTL register */
1380 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
1381 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
1383 + /* PCIe lanes configuration */
1384 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
1385 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
1386 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
1387 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
1389 + /* PCIe PLL Configuration */
1390 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
1391 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
1392 + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
1395 + return E1000_SUCCESS;
1399 + * e1000_read_mac_addr_82575 - Read device MAC address
1400 + * @hw: pointer to the HW structure
1402 +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
1404 + s32 ret_val = E1000_SUCCESS;
1406 + DEBUGFUNC("e1000_read_mac_addr_82575");
1409 + * If there's an alternate MAC address place it in RAR0
1410 + * so that it will override the Si installed default perm
1413 + ret_val = e1000_check_alt_mac_addr_generic(hw);
1417 + ret_val = e1000_read_mac_addr_generic(hw);
1424 + * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
1425 + * @hw: pointer to the HW structure
1427 + * In the case of a PHY power down to save power, or to turn off link during a
1428 + * driver unload, or wake on lan is not enabled, remove the link.
1430 +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
1432 + struct e1000_phy_info *phy = &hw->phy;
1433 + struct e1000_mac_info *mac = &hw->mac;
1435 + if (!(phy->ops.check_reset_block))
1438 + /* If the management interface is not enabled, then power down */
1439 + if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
1440 + e1000_power_down_phy_copper(hw);
1446 + * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
1447 + * @hw: pointer to the HW structure
1449 + * Clears the hardware counters by reading the counter registers.
1451 +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
1453 + DEBUGFUNC("e1000_clear_hw_cntrs_82575");
1455 + e1000_clear_hw_cntrs_base_generic(hw);
1457 + E1000_READ_REG(hw, E1000_PRC64);
1458 + E1000_READ_REG(hw, E1000_PRC127);
1459 + E1000_READ_REG(hw, E1000_PRC255);
1460 + E1000_READ_REG(hw, E1000_PRC511);
1461 + E1000_READ_REG(hw, E1000_PRC1023);
1462 + E1000_READ_REG(hw, E1000_PRC1522);
1463 + E1000_READ_REG(hw, E1000_PTC64);
1464 + E1000_READ_REG(hw, E1000_PTC127);
1465 + E1000_READ_REG(hw, E1000_PTC255);
1466 + E1000_READ_REG(hw, E1000_PTC511);
1467 + E1000_READ_REG(hw, E1000_PTC1023);
1468 + E1000_READ_REG(hw, E1000_PTC1522);
1470 + E1000_READ_REG(hw, E1000_ALGNERRC);
1471 + E1000_READ_REG(hw, E1000_RXERRC);
1472 + E1000_READ_REG(hw, E1000_TNCRS);
1473 + E1000_READ_REG(hw, E1000_CEXTERR);
1474 + E1000_READ_REG(hw, E1000_TSCTC);
1475 + E1000_READ_REG(hw, E1000_TSCTFC);
1477 + E1000_READ_REG(hw, E1000_MGTPRC);
1478 + E1000_READ_REG(hw, E1000_MGTPDC);
1479 + E1000_READ_REG(hw, E1000_MGTPTC);
1481 + E1000_READ_REG(hw, E1000_IAC);
1482 + E1000_READ_REG(hw, E1000_ICRXOC);
1484 + E1000_READ_REG(hw, E1000_ICRXPTC);
1485 + E1000_READ_REG(hw, E1000_ICRXATC);
1486 + E1000_READ_REG(hw, E1000_ICTXPTC);
1487 + E1000_READ_REG(hw, E1000_ICTXATC);
1488 + E1000_READ_REG(hw, E1000_ICTXQEC);
1489 + E1000_READ_REG(hw, E1000_ICTXQMTC);
1490 + E1000_READ_REG(hw, E1000_ICRXDMTC);
1492 + E1000_READ_REG(hw, E1000_CBTMPC);
1493 + E1000_READ_REG(hw, E1000_HTDPMC);
1494 + E1000_READ_REG(hw, E1000_CBRMPC);
1495 + E1000_READ_REG(hw, E1000_RPTHC);
1496 + E1000_READ_REG(hw, E1000_HGPTC);
1497 + E1000_READ_REG(hw, E1000_HTCBDPC);
1498 + E1000_READ_REG(hw, E1000_HGORCL);
1499 + E1000_READ_REG(hw, E1000_HGORCH);
1500 + E1000_READ_REG(hw, E1000_HGOTCL);
1501 + E1000_READ_REG(hw, E1000_HGOTCH);
1502 + E1000_READ_REG(hw, E1000_LENERRS);
1504 + /* This register should not be read in copper configurations */
1505 + if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
1506 + e1000_sgmii_active_82575(hw))
1507 + E1000_READ_REG(hw, E1000_SCVPC);
1511 + * e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1512 + * @hw: pointer to the HW structure
1514 + * After rx enable if managability is enabled then there is likely some
1515 + * bad data at the start of the fifo and possibly in the DMA fifo. This
1516 + * function clears the fifos and flushes any packets that came in as rx was
1519 +void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
1521 + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1524 + DEBUGFUNC("e1000_rx_fifo_workaround_82575");
1525 + if (hw->mac.type != e1000_82575 ||
1526 + !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1529 + /* Disable all RX queues */
1530 + for (i = 0; i < 4; i++) {
1531 + rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
1532 + E1000_WRITE_REG(hw, E1000_RXDCTL(i),
1533 + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1535 + /* Poll all queues to verify they have shut down */
1536 + for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1539 + for (i = 0; i < 4; i++)
1540 + rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
1541 + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1545 + if (ms_wait == 10)
1546 + DEBUGOUT("Queue disable timed out after 10ms\n");
1548 + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1549 + * incoming packets are rejected. Set enable and wait 2ms so that
1550 + * any packet that was coming in as RCTL.EN was set is flushed
1552 + rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1553 + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1555 + rlpml = E1000_READ_REG(hw, E1000_RLPML);
1556 + E1000_WRITE_REG(hw, E1000_RLPML, 0);
1558 + rctl = E1000_READ_REG(hw, E1000_RCTL);
1559 + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1560 + temp_rctl |= E1000_RCTL_LPE;
1562 + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
1563 + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1564 + E1000_WRITE_FLUSH(hw);
1567 + /* Enable RX queues that were previously enabled and restore our
1570 + for (i = 0; i < 4; i++)
1571 + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
1572 + E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1573 + E1000_WRITE_FLUSH(hw);
1575 + E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
1576 + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1578 + /* Flush receive errors generated by workaround */
1579 + E1000_READ_REG(hw, E1000_ROC);
1580 + E1000_READ_REG(hw, E1000_RNBC);
1581 + E1000_READ_REG(hw, E1000_MPC);
1585 + * e1000_set_pcie_completion_timeout - set pci-e completion timeout
1586 + * @hw: pointer to the HW structure
1588 + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1589 + * however the hardware default for these parts is 500us to 1ms which is less
1590 + * than the 10ms recommended by the pci-e spec. To address this we need to
1591 + * increase the value to either 10ms to 200ms for capability version 1 config,
1592 + * or 16ms to 55ms for version 2.
1594 +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
1596 + u32 gcr = E1000_READ_REG(hw, E1000_GCR);
1597 + s32 ret_val = E1000_SUCCESS;
1600 + /* only take action if timeout value is defaulted to 0 */
1601 + if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1605 + * if capababilities version is type 1 we can write the
1606 + * timeout of 10ms to 200ms through the GCR register
1608 + if (!(gcr & E1000_GCR_CAP_VER2)) {
1609 + gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1614 + * for version 2 capabilities we need to write the config space
1615 + * directly in order to set the completion timeout value for
1618 + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1623 + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1625 + ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1628 + /* disable completion timeout resend */
1629 + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1631 + E1000_WRITE_REG(hw, E1000_GCR, gcr);
1636 + * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
1637 + * @hw: pointer to the hardware struct
1638 + * @enable: state to enter, either enabled or disabled
1640 + * enables/disables L2 switch loopback functionality.
1642 +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1644 + u32 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
1647 + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1649 + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1651 + E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
1655 + * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
1656 + * @hw: pointer to the hardware struct
1657 + * @enable: state to enter, either enabled or disabled
1659 + * enables/disables replication of packets across multiple pools.
1661 +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1663 + u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1666 + vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1668 + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1670 + E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1673 Index: linux-2.6.22/drivers/net/igb/e1000_82575.h
1674 ===================================================================
1675 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
1676 +++ linux-2.6.22/drivers/net/igb/e1000_82575.h 2009-12-18 12:39:22.000000000 -0500
1678 +/*******************************************************************************
1680 + Intel(R) Gigabit Ethernet Linux driver
1681 + Copyright(c) 2007-2009 Intel Corporation.
1683 + This program is free software; you can redistribute it and/or modify it
1684 + under the terms and conditions of the GNU General Public License,
1685 + version 2, as published by the Free Software Foundation.
1687 + This program is distributed in the hope it will be useful, but WITHOUT
1688 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1689 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1692 + You should have received a copy of the GNU General Public License along with
1693 + this program; if not, write to the Free Software Foundation, Inc.,
1694 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1696 + The full GNU General Public License is included in this distribution in
1697 + the file called "COPYING".
1699 + Contact Information:
1700 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1701 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
1703 +*******************************************************************************/
1705 +#ifndef _E1000_82575_H_
1706 +#define _E1000_82575_H_
1708 +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
1709 + (ID_LED_DEF1_DEF2 << 8) | \
1710 + (ID_LED_DEF1_DEF2 << 4) | \
1711 + (ID_LED_OFF1_ON2))
1713 + * Receive Address Register Count
1714 + * Number of high/low register pairs in the RAR. The RAR (Receive Address
1715 + * Registers) holds the directed and multicast addresses that we monitor.
1716 + * These entries are also used for MAC-based filtering.
1719 + * For 82576, there are an additional set of RARs that begin at an offset
1720 + * separate from the first set of RARs.
1722 +#define E1000_RAR_ENTRIES_82575 16
1723 +#define E1000_RAR_ENTRIES_82576 24
1725 +struct e1000_adv_data_desc {
1726 + __le64 buffer_addr; /* Address of the descriptor's data buffer */
1730 + u32 datalen :16; /* Data buffer length */
1732 + u32 dtyp :4; /* Descriptor type */
1733 + u32 dcmd :8; /* Descriptor command */
1739 + u32 status :4; /* Descriptor status */
1741 + u32 popts :6; /* Packet Options */
1742 + u32 paylen :18; /* Payload length */
1747 +#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */
1748 +#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */
1749 +#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */
1750 +#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */
1751 +#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */
1752 +#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */
1753 +#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */
1754 +#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */
1755 +#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */
1756 +#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */
1757 +#define E1000_ADV_DCMD_RS 0x8 /* Report Status */
1758 +#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */
1759 +#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */
1760 +/* Extended Device Control */
1761 +#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */
1763 +struct e1000_adv_context_desc {
1788 +/* SRRCTL bit definitions */
1789 +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
1790 +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
1791 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
1792 +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
1793 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
1794 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
1795 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
1796 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
1797 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
1798 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
1799 +#define E1000_SRRCTL_DROP_EN 0x80000000
1801 +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
1802 +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
1804 +#define E1000_TX_HEAD_WB_ENABLE 0x1
1805 +#define E1000_TX_SEQNUM_WB_ENABLE 0x2
1807 +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
1808 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003
1809 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
1810 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
1811 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
1812 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
1814 +#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8
1815 +#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << E1000_VMRCTL_MIRROR_PORT_SHIFT)
1816 +#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0)
1817 +#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1)
1818 +#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
1820 +#define E1000_EICR_TX_QUEUE ( \
1821 + E1000_EICR_TX_QUEUE0 | \
1822 + E1000_EICR_TX_QUEUE1 | \
1823 + E1000_EICR_TX_QUEUE2 | \
1824 + E1000_EICR_TX_QUEUE3)
1826 +#define E1000_EICR_RX_QUEUE ( \
1827 + E1000_EICR_RX_QUEUE0 | \
1828 + E1000_EICR_RX_QUEUE1 | \
1829 + E1000_EICR_RX_QUEUE2 | \
1830 + E1000_EICR_RX_QUEUE3)
1832 +#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
1833 +#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
1835 +#define EIMS_ENABLE_MASK ( \
1836 + E1000_EIMS_RX_QUEUE | \
1837 + E1000_EIMS_TX_QUEUE | \
1838 + E1000_EIMS_TCP_TIMER | \
1841 +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
1842 +#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
1843 +#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
1844 +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
1845 +#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
1846 +#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
1847 +#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
1848 +#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
1849 +#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
1850 +#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
1851 +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
1853 +/* Receive Descriptor - Advanced */
1854 +union e1000_adv_rx_desc {
1856 + __le64 pkt_addr; /* Packet buffer address */
1857 + __le64 hdr_addr; /* Header buffer address */
1864 + __le16 pkt_info; /*RSS type, Pkt type*/
1865 + __le16 hdr_info; /* Split Header,
1866 + * header buffer len*/
1870 + __le32 rss; /* RSS Hash */
1872 + __le16 ip_id; /* IP id */
1873 + __le16 csum; /* Packet Checksum */
1878 + __le32 status_error; /* ext status/error */
1879 + __le16 length; /* Packet length */
1880 + __le16 vlan; /* VLAN tag */
1882 + } wb; /* writeback */
1885 +#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F
1886 +#define E1000_RXDADV_RSSTYPE_SHIFT 12
1887 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
1888 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
1889 +#define E1000_RXDADV_SPLITHEADER_EN 0x00001000
1890 +#define E1000_RXDADV_SPH 0x8000
1891 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
1892 +#define E1000_RXDADV_ERR_HBO 0x00800000
1894 +/* RSS Hash results */
1895 +#define E1000_RXDADV_RSSTYPE_NONE 0x00000000
1896 +#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
1897 +#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002
1898 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
1899 +#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004
1900 +#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005
1901 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
1902 +#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
1903 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
1904 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
1906 +/* RSS Packet Types as indicated in the receive descriptor */
1907 +#define E1000_RXDADV_PKTTYPE_NONE 0x00000000
1908 +#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
1909 +#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */
1910 +#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */
1911 +#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */
1912 +#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
1913 +#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
1914 +#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
1915 +#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
1917 +#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
1918 +#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
1919 +#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
1920 +#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
1921 +#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
1922 +#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
1924 +/* LinkSec results */
1925 +/* Security Processing bit Indication */
1926 +#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000
1927 +#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
1928 +#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
1929 +#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
1930 +#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
1932 +#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000
1933 +#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
1934 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
1935 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
1936 +#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
1938 +/* Transmit Descriptor - Advanced */
1939 +union e1000_adv_tx_desc {
1941 + __le64 buffer_addr; /* Address of descriptor's data buf */
1942 + __le32 cmd_type_len;
1943 + __le32 olinfo_status;
1946 + __le64 rsvd; /* Reserved */
1947 + __le32 nxtseq_seed;
1952 +/* Adv Transmit Descriptor Config Masks */
1953 +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
1954 +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
1955 +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
1956 +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
1957 +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
1958 +#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
1959 +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
1960 +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
1961 +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
1962 +#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on packet */
1963 +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
1964 +#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */
1965 +#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
1966 +#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
1967 +#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
1968 +#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
1969 +#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/
1970 +#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
1971 +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
1973 +/* Context descriptors */
1974 +struct e1000_adv_tx_context_desc {
1975 + __le32 vlan_macip_lens;
1976 + __le32 seqnum_seed;
1977 + __le32 type_tucmd_mlhl;
1978 + __le32 mss_l4len_idx;
1981 +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
1982 +#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
1983 +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
1984 +#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
1985 +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
1986 +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
1987 +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
1988 +#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
1989 +/* IPSec Encrypt Enable for ESP */
1990 +#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
1991 +#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */
1992 +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
1993 +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
1994 +/* Adv ctxt IPSec SA IDX mask */
1995 +#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF
1996 +/* Adv ctxt IPSec ESP len mask */
1997 +#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF
1999 +/* Additional Transmit Descriptor Control definitions */
2000 +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
2001 +#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */
2002 +/* Tx Queue Arbitration Priority 0=low, 1=high */
2003 +#define E1000_TXDCTL_PRIORITY 0x08000000
2005 +/* Additional Receive Descriptor Control definitions */
2006 +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
2007 +#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */
2009 +/* Direct Cache Access (DCA) definitions */
2010 +#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
2011 +#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
2013 +#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
2014 +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
2016 +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
2017 +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
2018 +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
2019 +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
2021 +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
2022 +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
2023 +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
2025 +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
2026 +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
2027 +#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
2028 +#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
2030 +/* Additional interrupt register bit definitions */
2031 +#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */
2032 +#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
2033 +#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
2035 +/* ETQF register bit definitions */
2036 +#define E1000_ETQF_FILTER_ENABLE (1 << 26)
2037 +#define E1000_ETQF_IMM_INT (1 << 29)
2038 +#define E1000_ETQF_1588 (1 << 30)
2039 +#define E1000_ETQF_QUEUE_ENABLE (1 << 31)
2041 + * ETQF filter list: one static filter per filter consumer. This is
2042 + * to avoid filter collisions later. Add new filters
2045 + * Current filters:
2046 + * EAPOL 802.1x (0x888e): Filter 0
2048 +#define E1000_ETQF_FILTER_EAPOL 0
2050 +#define E1000_FTQF_VF_BP 0x00008000
2051 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000
2052 +#define E1000_FTQF_MASK 0xF0000000
2053 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000
2054 +#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
2055 +#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000
2056 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
2058 +#define E1000_NVM_APME_82575 0x0400
2059 +#define MAX_NUM_VFS 8
2061 +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */
2062 +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
2063 +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
2064 +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
2065 +#define E1000_DTXSWC_LLE_SHIFT 16
2066 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
2068 +/* Easy defines for setting default pool, would normally be left a zero */
2069 +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
2070 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
2072 +/* Other useful VMD_CTL register defines */
2073 +#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
2074 +#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
2075 +#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
2077 +/* Per VM Offload register setup */
2078 +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
2079 +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
2080 +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
2081 +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
2082 +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
2083 +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
2084 +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
2085 +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
2086 +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
2087 +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
2089 +#define E1000_VLVF_ARRAY_SIZE 32
2090 +#define E1000_VLVF_VLANID_MASK 0x00000FFF
2091 +#define E1000_VLVF_POOLSEL_SHIFT 12
2092 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
2093 +#define E1000_VLVF_LVLAN 0x00100000
2094 +#define E1000_VLVF_VLANID_ENABLE 0x80000000
2096 +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
2098 +#define E1000_IOVCTL 0x05BBC
2099 +#define E1000_IOVCTL_REUSE_VFQ 0x00000001
2101 +#define E1000_RPLOLR_STRVLAN 0x40000000
2102 +#define E1000_RPLOLR_STRCRC 0x80000000
2104 +#define E1000_DTXCTL_8023LL 0x0004
2105 +#define E1000_DTXCTL_VLAN_ADDED 0x0008
2106 +#define E1000_DTXCTL_OOS_ENABLE 0x0010
2107 +#define E1000_DTXCTL_MDP_EN 0x0020
2108 +#define E1000_DTXCTL_SPOOF_INT 0x0040
2110 +#define ALL_QUEUES 0xFFFF
2112 +/* RX packet buffer size defines */
2113 +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
2114 +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
2115 +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
2116 +#endif /* _E1000_82575_H_ */
2117 Index: linux-2.6.22/drivers/net/igb/e1000_api.c
2118 ===================================================================
2119 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
2120 +++ linux-2.6.22/drivers/net/igb/e1000_api.c 2009-12-18 12:39:22.000000000 -0500
2122 +/*******************************************************************************
2124 + Intel(R) Gigabit Ethernet Linux driver
2125 + Copyright(c) 2007-2009 Intel Corporation.
2127 + This program is free software; you can redistribute it and/or modify it
2128 + under the terms and conditions of the GNU General Public License,
2129 + version 2, as published by the Free Software Foundation.
2131 + This program is distributed in the hope it will be useful, but WITHOUT
2132 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
2133 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
2136 + You should have received a copy of the GNU General Public License along with
2137 + this program; if not, write to the Free Software Foundation, Inc.,
2138 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
2140 + The full GNU General Public License is included in this distribution in
2141 + the file called "COPYING".
2143 + Contact Information:
2144 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
2145 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
2147 +*******************************************************************************/
2149 +#include "e1000_api.h"
2152 + * e1000_init_mac_params - Initialize MAC function pointers
2153 + * @hw: pointer to the HW structure
2155 + * This function initializes the function pointers for the MAC
2156 + * set of functions. Called by drivers or by e1000_setup_init_funcs.
2158 +s32 e1000_init_mac_params(struct e1000_hw *hw)
2160 + s32 ret_val = E1000_SUCCESS;
2162 + if (hw->mac.ops.init_params) {
2163 + ret_val = hw->mac.ops.init_params(hw);
2165 + DEBUGOUT("MAC Initialization Error\n");
2169 + DEBUGOUT("mac.init_mac_params was NULL\n");
2170 + ret_val = -E1000_ERR_CONFIG;
2178 + * e1000_init_nvm_params - Initialize NVM function pointers
2179 + * @hw: pointer to the HW structure
2181 + * This function initializes the function pointers for the NVM
2182 + * set of functions. Called by drivers or by e1000_setup_init_funcs.
2184 +s32 e1000_init_nvm_params(struct e1000_hw *hw)
2186 + s32 ret_val = E1000_SUCCESS;
2188 + if (hw->nvm.ops.init_params) {
2189 + ret_val = hw->nvm.ops.init_params(hw);
2191 + DEBUGOUT("NVM Initialization Error\n");
2195 + DEBUGOUT("nvm.init_nvm_params was NULL\n");
2196 + ret_val = -E1000_ERR_CONFIG;
2204 + * e1000_init_phy_params - Initialize PHY function pointers
2205 + * @hw: pointer to the HW structure
2207 + * This function initializes the function pointers for the PHY
2208 + * set of functions. Called by drivers or by e1000_setup_init_funcs.
2210 +s32 e1000_init_phy_params(struct e1000_hw *hw)
2212 + s32 ret_val = E1000_SUCCESS;
2214 + if (hw->phy.ops.init_params) {
2215 + ret_val = hw->phy.ops.init_params(hw);
2217 + DEBUGOUT("PHY Initialization Error\n");
2221 + DEBUGOUT("phy.init_phy_params was NULL\n");
2222 + ret_val = -E1000_ERR_CONFIG;
2230 + * e1000_init_mbx_params - Initialize mailbox function pointers
2231 + * @hw: pointer to the HW structure
2233 + * This function initializes the function pointers for the PHY
2234 + * set of functions. Called by drivers or by e1000_setup_init_funcs.
2236 +s32 e1000_init_mbx_params(struct e1000_hw *hw)
2238 + s32 ret_val = E1000_SUCCESS;
2240 + if (hw->mbx.ops.init_params) {
2241 + ret_val = hw->mbx.ops.init_params(hw);
2243 + DEBUGOUT("Mailbox Initialization Error\n");
2247 + DEBUGOUT("mbx.init_mbx_params was NULL\n");
2248 + ret_val = -E1000_ERR_CONFIG;
2256 + * e1000_set_mac_type - Sets MAC type
2257 + * @hw: pointer to the HW structure
2259 + * This function sets the mac type of the adapter based on the
2260 + * device ID stored in the hw structure.
2261 + * MUST BE FIRST FUNCTION CALLED (explicitly or through
2262 + * e1000_setup_init_funcs()).
2264 +s32 e1000_set_mac_type(struct e1000_hw *hw)
2266 + struct e1000_mac_info *mac = &hw->mac;
2267 + s32 ret_val = E1000_SUCCESS;
2269 + DEBUGFUNC("e1000_set_mac_type");
2271 + switch (hw->device_id) {
2272 + case E1000_DEV_ID_82575EB_COPPER:
2273 + case E1000_DEV_ID_82575EB_FIBER_SERDES:
2274 + case E1000_DEV_ID_82575GB_QUAD_COPPER:
2275 + mac->type = e1000_82575;
2277 + case E1000_DEV_ID_82576:
2278 + case E1000_DEV_ID_82576_FIBER:
2279 + case E1000_DEV_ID_82576_SERDES:
2280 + case E1000_DEV_ID_82576_QUAD_COPPER:
2281 + case E1000_DEV_ID_82576_NS:
2282 + case E1000_DEV_ID_82576_NS_SERDES:
2283 + case E1000_DEV_ID_82576_SERDES_QUAD:
2284 + mac->type = e1000_82576;
2287 + /* Should never have loaded on this device */
2288 + ret_val = -E1000_ERR_MAC_INIT;
2296 + * e1000_setup_init_funcs - Initializes function pointers
2297 + * @hw: pointer to the HW structure
2298 + * @init_device: true will initialize the rest of the function pointers
2299 + * getting the device ready for use. false will only set
2300 + * MAC type and the function pointers for the other init
2301 + * functions. Passing false will not generate any hardware
2302 + * reads or writes.
2304 + * This function must be called by a driver in order to use the rest
2305 + * of the 'shared' code files. Called by drivers only.
2307 +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
2311 + /* Can't do much good without knowing the MAC type. */
2312 + ret_val = e1000_set_mac_type(hw);
2314 + DEBUGOUT("ERROR: MAC type could not be set properly.\n");
2318 + if (!hw->hw_addr) {
2319 + DEBUGOUT("ERROR: Registers not mapped\n");
2320 + ret_val = -E1000_ERR_CONFIG;
2325 + * Init function pointers to generic implementations. We do this first
2326 + * allowing a driver module to override it afterward.
2328 + e1000_init_mac_ops_generic(hw);
2329 + e1000_init_nvm_ops_generic(hw);
2330 + e1000_init_mbx_ops_generic(hw);
2333 + * Set up the init function pointers. These are functions within the
2334 + * adapter family file that sets up function pointers for the rest of
2335 + * the functions in that family.
2337 + switch (hw->mac.type) {
2340 + e1000_init_function_pointers_82575(hw);
2343 + DEBUGOUT("Hardware not supported\n");
2344 + ret_val = -E1000_ERR_CONFIG;
2349 + * Initialize the rest of the function pointers. These require some
2350 + * register reads/writes in some cases.
2352 + if (!(ret_val) && init_device) {
2353 + ret_val = e1000_init_mac_params(hw);
2357 + ret_val = e1000_init_nvm_params(hw);
2361 + ret_val = e1000_init_phy_params(hw);
2365 + ret_val = e1000_init_mbx_params(hw);
2375 + * e1000_get_bus_info - Obtain bus information for adapter
2376 + * @hw: pointer to the HW structure
2378 + * This will obtain information about the HW bus for which the
2379 + * adapter is attached and stores it in the hw structure. This is a
2380 + * function pointer entry point called by drivers.
2382 +s32 e1000_get_bus_info(struct e1000_hw *hw)
2384 + if (hw->mac.ops.get_bus_info)
2385 + return hw->mac.ops.get_bus_info(hw);
2387 + return E1000_SUCCESS;
2391 + * e1000_clear_vfta - Clear VLAN filter table
2392 + * @hw: pointer to the HW structure
2394 + * This clears the VLAN filter table on the adapter. This is a function
2395 + * pointer entry point called by drivers.
2397 +void e1000_clear_vfta(struct e1000_hw *hw)
2399 + if (hw->mac.ops.clear_vfta)
2400 + hw->mac.ops.clear_vfta(hw);
2404 + * e1000_write_vfta - Write value to VLAN filter table
2405 + * @hw: pointer to the HW structure
2406 + * @offset: the 32-bit offset in which to write the value to.
2407 + * @value: the 32-bit value to write at location offset.
2409 + * This writes a 32-bit value to a 32-bit offset in the VLAN filter
2410 + * table. This is a function pointer entry point called by drivers.
2412 +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
2414 + if (hw->mac.ops.write_vfta)
2415 + hw->mac.ops.write_vfta(hw, offset, value);
2419 + * e1000_update_mc_addr_list - Update Multicast addresses
2420 + * @hw: pointer to the HW structure
2421 + * @mc_addr_list: array of multicast addresses to program
2422 + * @mc_addr_count: number of multicast addresses to program
2424 + * Updates the Multicast Table Array.
2425 + * The caller must have a packed mc_addr_list of multicast addresses.
2427 +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
2428 + u32 mc_addr_count)
2430 + if (hw->mac.ops.update_mc_addr_list)
2431 + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
2436 + * e1000_force_mac_fc - Force MAC flow control
2437 + * @hw: pointer to the HW structure
2439 + * Force the MAC's flow control settings. Currently no func pointer exists
2440 + * and all implementations are handled in the generic version of this
2443 +s32 e1000_force_mac_fc(struct e1000_hw *hw)
2445 + return e1000_force_mac_fc_generic(hw);
2449 + * e1000_check_for_link - Check/Store link connection
2450 + * @hw: pointer to the HW structure
2452 + * This checks the link condition of the adapter and stores the
2453 + * results in the hw->mac structure. This is a function pointer entry
2454 + * point called by drivers.
2456 +s32 e1000_check_for_link(struct e1000_hw *hw)
2458 + if (hw->mac.ops.check_for_link)
2459 + return hw->mac.ops.check_for_link(hw);
2461 + return -E1000_ERR_CONFIG;
2465 + * e1000_check_mng_mode - Check management mode
2466 + * @hw: pointer to the HW structure
2468 + * This checks if the adapter has manageability enabled.
2469 + * This is a function pointer entry point called by drivers.
2471 +bool e1000_check_mng_mode(struct e1000_hw *hw)
2473 + if (hw->mac.ops.check_mng_mode)
2474 + return hw->mac.ops.check_mng_mode(hw);
2480 + * e1000_mng_write_dhcp_info - Writes DHCP info to host interface
2481 + * @hw: pointer to the HW structure
2482 + * @buffer: pointer to the host interface
2483 + * @length: size of the buffer
2485 + * Writes the DHCP information to the host interface.
2487 +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2489 + return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
2493 + * e1000_reset_hw - Reset hardware
2494 + * @hw: pointer to the HW structure
2496 + * This resets the hardware into a known state. This is a function pointer
2497 + * entry point called by drivers.
2499 +s32 e1000_reset_hw(struct e1000_hw *hw)
2501 + if (hw->mac.ops.reset_hw)
2502 + return hw->mac.ops.reset_hw(hw);
2504 + return -E1000_ERR_CONFIG;
2508 + * e1000_init_hw - Initialize hardware
2509 + * @hw: pointer to the HW structure
2511 + * This inits the hardware readying it for operation. This is a function
2512 + * pointer entry point called by drivers.
2514 +s32 e1000_init_hw(struct e1000_hw *hw)
2516 + if (hw->mac.ops.init_hw)
2517 + return hw->mac.ops.init_hw(hw);
2519 + return -E1000_ERR_CONFIG;
2523 + * e1000_setup_link - Configures link and flow control
2524 + * @hw: pointer to the HW structure
2526 + * This configures link and flow control settings for the adapter. This
2527 + * is a function pointer entry point called by drivers. While modules can
2528 + * also call this, they probably call their own version of this function.
2530 +s32 e1000_setup_link(struct e1000_hw *hw)
2532 + if (hw->mac.ops.setup_link)
2533 + return hw->mac.ops.setup_link(hw);
2535 + return -E1000_ERR_CONFIG;
2539 + * e1000_get_speed_and_duplex - Returns current speed and duplex
2540 + * @hw: pointer to the HW structure
2541 + * @speed: pointer to a 16-bit value to store the speed
2542 + * @duplex: pointer to a 16-bit value to store the duplex.
2544 + * This returns the speed and duplex of the adapter in the two 'out'
2545 + * variables passed in. This is a function pointer entry point called
2548 +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
2550 + if (hw->mac.ops.get_link_up_info)
2551 + return hw->mac.ops.get_link_up_info(hw, speed, duplex);
2553 + return -E1000_ERR_CONFIG;
2557 + * e1000_setup_led - Configures SW controllable LED
2558 + * @hw: pointer to the HW structure
2560 + * This prepares the SW controllable LED for use and saves the current state
2561 + * of the LED so it can be later restored. This is a function pointer entry
2562 + * point called by drivers.
2564 +s32 e1000_setup_led(struct e1000_hw *hw)
2566 + if (hw->mac.ops.setup_led)
2567 + return hw->mac.ops.setup_led(hw);
2569 + return E1000_SUCCESS;
2573 + * e1000_cleanup_led - Restores SW controllable LED
2574 + * @hw: pointer to the HW structure
2576 + * This restores the SW controllable LED to the value saved off by
2577 + * e1000_setup_led. This is a function pointer entry point called by drivers.
2579 +s32 e1000_cleanup_led(struct e1000_hw *hw)
2581 + if (hw->mac.ops.cleanup_led)
2582 + return hw->mac.ops.cleanup_led(hw);
2584 + return E1000_SUCCESS;
2588 + * e1000_blink_led - Blink SW controllable LED
2589 + * @hw: pointer to the HW structure
2591 + * This starts the adapter LED blinking. Request the LED to be setup first
2592 + * and cleaned up after. This is a function pointer entry point called by
2595 +s32 e1000_blink_led(struct e1000_hw *hw)
2597 + if (hw->mac.ops.blink_led)
2598 + return hw->mac.ops.blink_led(hw);
2600 + return E1000_SUCCESS;
2604 + * e1000_id_led_init - store LED configurations in SW
2605 + * @hw: pointer to the HW structure
2607 + * Initializes the LED config in SW. This is a function pointer entry point
2608 + * called by drivers.
2610 +s32 e1000_id_led_init(struct e1000_hw *hw)
2612 + if (hw->mac.ops.id_led_init)
2613 + return hw->mac.ops.id_led_init(hw);
2615 + return E1000_SUCCESS;
2619 + * e1000_led_on - Turn on SW controllable LED
2620 + * @hw: pointer to the HW structure
2622 + * Turns the SW defined LED on. This is a function pointer entry point
2623 + * called by drivers.
2625 +s32 e1000_led_on(struct e1000_hw *hw)
2627 + if (hw->mac.ops.led_on)
2628 + return hw->mac.ops.led_on(hw);
2630 + return E1000_SUCCESS;
2634 + * e1000_led_off - Turn off SW controllable LED
2635 + * @hw: pointer to the HW structure
2637 + * Turns the SW defined LED off. This is a function pointer entry point
2638 + * called by drivers.
2640 +s32 e1000_led_off(struct e1000_hw *hw)
2642 + if (hw->mac.ops.led_off)
2643 + return hw->mac.ops.led_off(hw);
2645 + return E1000_SUCCESS;
2649 + * e1000_reset_adaptive - Reset adaptive IFS
2650 + * @hw: pointer to the HW structure
2652 + * Resets the adaptive IFS. Currently no func pointer exists and all
2653 + * implementations are handled in the generic version of this function.
2655 +void e1000_reset_adaptive(struct e1000_hw *hw)
2657 + e1000_reset_adaptive_generic(hw);
2661 + * e1000_update_adaptive - Update adaptive IFS
2662 + * @hw: pointer to the HW structure
2664 + * Updates adapter IFS. Currently no func pointer exists and all
2665 + * implementations are handled in the generic version of this function.
2667 +void e1000_update_adaptive(struct e1000_hw *hw)
2669 + e1000_update_adaptive_generic(hw);
2673 + * e1000_disable_pcie_master - Disable PCI-Express master access
2674 + * @hw: pointer to the HW structure
2676 + * Disables PCI-Express master access and verifies there are no pending
2677 + * requests. Currently no func pointer exists and all implementations are
2678 + * handled in the generic version of this function.
2680 +s32 e1000_disable_pcie_master(struct e1000_hw *hw)
2682 + return e1000_disable_pcie_master_generic(hw);
2686 + * e1000_config_collision_dist - Configure collision distance
2687 + * @hw: pointer to the HW structure
2689 + * Configures the collision distance to the default value and is used
2690 + * during link setup.
2692 +void e1000_config_collision_dist(struct e1000_hw *hw)
2694 + if (hw->mac.ops.config_collision_dist)
2695 + hw->mac.ops.config_collision_dist(hw);
2699 + * e1000_rar_set - Sets a receive address register
2700 + * @hw: pointer to the HW structure
2701 + * @addr: address to set the RAR to
2702 + * @index: the RAR to set
2704 + * Sets a Receive Address Register (RAR) to the specified address.
2706 +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
2708 + if (hw->mac.ops.rar_set)
2709 + hw->mac.ops.rar_set(hw, addr, index);
2713 + * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
2714 + * @hw: pointer to the HW structure
2716 + * Ensures that the MDI/MDIX SW state is valid.
2718 +s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
2720 + if (hw->mac.ops.validate_mdi_setting)
2721 + return hw->mac.ops.validate_mdi_setting(hw);
2723 + return E1000_SUCCESS;
2727 + * e1000_mta_set - Sets multicast table bit
2728 + * @hw: pointer to the HW structure
2729 + * @hash_value: Multicast hash value.
2731 + * This sets the bit in the multicast table corresponding to the
2732 + * hash value. This is a function pointer entry point called by drivers.
2734 +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
2736 + if (hw->mac.ops.mta_set)
2737 + hw->mac.ops.mta_set(hw, hash_value);
2741 + * e1000_hash_mc_addr - Determines address location in multicast table
2742 + * @hw: pointer to the HW structure
2743 + * @mc_addr: Multicast address to hash.
2745 + * This hashes an address to determine its location in the multicast
2746 + * table. Currently no func pointer exists and all implementations
2747 + * are handled in the generic version of this function.
2749 +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
2751 + return e1000_hash_mc_addr_generic(hw, mc_addr);
2755 + * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
2756 + * @hw: pointer to the HW structure
2758 + * Enables packet filtering on transmit packets if manageability is enabled
2759 + * and host interface is enabled.
2760 + * Currently no func pointer exists and all implementations are handled in the
2761 + * generic version of this function.
2763 +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
2765 + return e1000_enable_tx_pkt_filtering_generic(hw);
2769 + * e1000_mng_host_if_write - Writes to the manageability host interface
2770 + * @hw: pointer to the HW structure
2771 + * @buffer: pointer to the host interface buffer
2772 + * @length: size of the buffer
2773 + * @offset: location in the buffer to write to
2774 + * @sum: sum of the data (not checksum)
2776 + * This function writes the buffer content at the offset given on the host if.
2777 + * It also does alignment considerations to do the writes in most efficient
2778 + * way. Also fills up the sum of the buffer in *buffer parameter.
2780 +s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length,
2781 + u16 offset, u8 *sum)
2783 + if (hw->mac.ops.mng_host_if_write)
2784 + return hw->mac.ops.mng_host_if_write(hw, buffer, length,
2787 + return E1000_NOT_IMPLEMENTED;
2791 + * e1000_mng_write_cmd_header - Writes manageability command header
2792 + * @hw: pointer to the HW structure
2793 + * @hdr: pointer to the host interface command header
2795 + * Writes the command header after does the checksum calculation.
2797 +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2798 + struct e1000_host_mng_command_header *hdr)
2800 + if (hw->mac.ops.mng_write_cmd_header)
2801 + return hw->mac.ops.mng_write_cmd_header(hw, hdr);
2803 + return E1000_NOT_IMPLEMENTED;
2807 + * e1000_mng_enable_host_if - Checks host interface is enabled
2808 + * @hw: pointer to the HW structure
2810 + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2812 + * This function checks whether the HOST IF is enabled for command operation
2813 + * and also checks whether the previous command is completed. It busy waits
2814 + * in case of previous command is not completed.
2816 +s32 e1000_mng_enable_host_if(struct e1000_hw * hw)
2818 + if (hw->mac.ops.mng_enable_host_if)
2819 + return hw->mac.ops.mng_enable_host_if(hw);
2821 + return E1000_NOT_IMPLEMENTED;
2825 + * e1000_wait_autoneg - Waits for autonegotiation completion
2826 + * @hw: pointer to the HW structure
2828 + * Waits for autoneg to complete. Currently no func pointer exists and all
2829 + * implementations are handled in the generic version of this function.
2831 +s32 e1000_wait_autoneg(struct e1000_hw *hw)
2833 + if (hw->mac.ops.wait_autoneg)
2834 + return hw->mac.ops.wait_autoneg(hw);
2836 + return E1000_SUCCESS;
2840 + * e1000_check_reset_block - Verifies PHY can be reset
2841 + * @hw: pointer to the HW structure
2843 + * Checks if the PHY is in a state that can be reset or if manageability
2844 + * has it tied up. This is a function pointer entry point called by drivers.
2846 +s32 e1000_check_reset_block(struct e1000_hw *hw)
2848 + if (hw->phy.ops.check_reset_block)
2849 + return hw->phy.ops.check_reset_block(hw);
2851 + return E1000_SUCCESS;
2855 + * e1000_read_phy_reg - Reads PHY register
2856 + * @hw: pointer to the HW structure
2857 + * @offset: the register to read
2858 + * @data: the buffer to store the 16-bit read.
2860 + * Reads the PHY register and returns the value in data.
2861 + * This is a function pointer entry point called by drivers.
2863 +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
2865 + if (hw->phy.ops.read_reg)
2866 + return hw->phy.ops.read_reg(hw, offset, data);
2868 + return E1000_SUCCESS;
2872 + * e1000_write_phy_reg - Writes PHY register
2873 + * @hw: pointer to the HW structure
2874 + * @offset: the register to write
2875 + * @data: the value to write.
2877 + * Writes the PHY register at offset with the value in data.
2878 + * This is a function pointer entry point called by drivers.
2880 +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
2882 + if (hw->phy.ops.write_reg)
2883 + return hw->phy.ops.write_reg(hw, offset, data);
2885 + return E1000_SUCCESS;
2889 + * e1000_release_phy - Generic release PHY
2890 + * @hw: pointer to the HW structure
2892 + * Return if silicon family does not require a semaphore when accessing the
2895 +void e1000_release_phy(struct e1000_hw *hw)
2897 + if (hw->phy.ops.release)
2898 + hw->phy.ops.release(hw);
2902 + * e1000_acquire_phy - Generic acquire PHY
2903 + * @hw: pointer to the HW structure
2905 + * Return success if silicon family does not require a semaphore when
2906 + * accessing the PHY.
2908 +s32 e1000_acquire_phy(struct e1000_hw *hw)
2910 + if (hw->phy.ops.acquire)
2911 + return hw->phy.ops.acquire(hw);
2913 + return E1000_SUCCESS;
2917 + * e1000_read_kmrn_reg - Reads register using Kumeran interface
2918 + * @hw: pointer to the HW structure
2919 + * @offset: the register to read
2920 + * @data: the location to store the 16-bit value read.
2922 + * Reads a register out of the Kumeran interface. Currently no func pointer
2923 + * exists and all implementations are handled in the generic version of
2926 +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
2928 + return e1000_read_kmrn_reg_generic(hw, offset, data);
2932 + * e1000_write_kmrn_reg - Writes register using Kumeran interface
2933 + * @hw: pointer to the HW structure
2934 + * @offset: the register to write
2935 + * @data: the value to write.
2937 + * Writes a register to the Kumeran interface. Currently no func pointer
2938 + * exists and all implementations are handled in the generic version of
2941 +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
2943 + return e1000_write_kmrn_reg_generic(hw, offset, data);
2947 + * e1000_get_cable_length - Retrieves cable length estimation
2948 + * @hw: pointer to the HW structure
2950 + * This function estimates the cable length and stores them in
2951 + * hw->phy.min_length and hw->phy.max_length. This is a function pointer
2952 + * entry point called by drivers.
2954 +s32 e1000_get_cable_length(struct e1000_hw *hw)
2956 + if (hw->phy.ops.get_cable_length)
2957 + return hw->phy.ops.get_cable_length(hw);
2959 + return E1000_SUCCESS;
2963 + * e1000_get_phy_info - Retrieves PHY information from registers
2964 + * @hw: pointer to the HW structure
2966 + * This function gets some information from various PHY registers and
2967 + * populates hw->phy values with it. This is a function pointer entry
2968 + * point called by drivers.
2970 +s32 e1000_get_phy_info(struct e1000_hw *hw)
2972 + if (hw->phy.ops.get_info)
2973 + return hw->phy.ops.get_info(hw);
2975 + return E1000_SUCCESS;
2979 + * e1000_phy_hw_reset - Hard PHY reset
2980 + * @hw: pointer to the HW structure
2982 + * Performs a hard PHY reset. This is a function pointer entry point called
2985 +s32 e1000_phy_hw_reset(struct e1000_hw *hw)
2987 + if (hw->phy.ops.reset)
2988 + return hw->phy.ops.reset(hw);
2990 + return E1000_SUCCESS;
2994 + * e1000_phy_commit - Soft PHY reset
2995 + * @hw: pointer to the HW structure
2997 + * Performs a soft PHY reset on those that apply. This is a function pointer
2998 + * entry point called by drivers.
3000 +s32 e1000_phy_commit(struct e1000_hw *hw)
3002 + if (hw->phy.ops.commit)
3003 + return hw->phy.ops.commit(hw);
3005 + return E1000_SUCCESS;
3009 + * e1000_set_d0_lplu_state - Sets low power link up state for D0
3010 + * @hw: pointer to the HW structure
3011 + * @active: boolean used to enable/disable lplu
3013 + * Success returns 0, Failure returns 1
3015 + * The low power link up (lplu) state is set to the power management level D0
3016 + * and SmartSpeed is disabled when active is true, else clear lplu for D0
3017 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
3018 + * is used during Dx states where the power conservation is most important.
3019 + * During driver activity, SmartSpeed should be enabled so performance is
3020 + * maintained. This is a function pointer entry point called by drivers.
3022 +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
3024 + if (hw->phy.ops.set_d0_lplu_state)
3025 + return hw->phy.ops.set_d0_lplu_state(hw, active);
3027 + return E1000_SUCCESS;
3031 + * e1000_set_d3_lplu_state - Sets low power link up state for D3
3032 + * @hw: pointer to the HW structure
3033 + * @active: boolean used to enable/disable lplu
3035 + * Success returns 0, Failure returns 1
3037 + * The low power link up (lplu) state is set to the power management level D3
3038 + * and SmartSpeed is disabled when active is true, else clear lplu for D3
3039 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
3040 + * is used during Dx states where the power conservation is most important.
3041 + * During driver activity, SmartSpeed should be enabled so performance is
3042 + * maintained. This is a function pointer entry point called by drivers.
3044 +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
3046 + if (hw->phy.ops.set_d3_lplu_state)
3047 + return hw->phy.ops.set_d3_lplu_state(hw, active);
3049 + return E1000_SUCCESS;
3053 + * e1000_read_mac_addr - Reads MAC address
3054 + * @hw: pointer to the HW structure
3056 + * Reads the MAC address out of the adapter and stores it in the HW structure.
3057 + * Currently no func pointer exists and all implementations are handled in the
3058 + * generic version of this function.
3060 +s32 e1000_read_mac_addr(struct e1000_hw *hw)
3062 + if (hw->mac.ops.read_mac_addr)
3063 + return hw->mac.ops.read_mac_addr(hw);
3065 + return e1000_read_mac_addr_generic(hw);
3069 + * e1000_read_pba_num - Read device part number
3070 + * @hw: pointer to the HW structure
3071 + * @pba_num: pointer to device part number
3073 + * Reads the product board assembly (PBA) number from the EEPROM and stores
3074 + * the value in pba_num.
3075 + * Currently no func pointer exists and all implementations are handled in the
3076 + * generic version of this function.
3078 +s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
3080 + return e1000_read_pba_num_generic(hw, pba_num);
3084 + * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
3085 + * @hw: pointer to the HW structure
3087 + * Validates the NVM checksum is correct. This is a function pointer entry
3088 + * point called by drivers.
3090 +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
3092 + if (hw->nvm.ops.validate)
3093 + return hw->nvm.ops.validate(hw);
3095 + return -E1000_ERR_CONFIG;
3099 + * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
3100 + * @hw: pointer to the HW structure
3102 + * Updates the NVM checksum. Currently no func pointer exists and all
3103 + * implementations are handled in the generic version of this function.
3105 +s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
3107 + if (hw->nvm.ops.update)
3108 + return hw->nvm.ops.update(hw);
3110 + return -E1000_ERR_CONFIG;
3114 + * e1000_reload_nvm - Reloads EEPROM
3115 + * @hw: pointer to the HW structure
3117 + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
3118 + * extended control register.
3120 +void e1000_reload_nvm(struct e1000_hw *hw)
3122 + if (hw->nvm.ops.reload)
3123 + hw->nvm.ops.reload(hw);
3127 + * e1000_read_nvm - Reads NVM (EEPROM)
3128 + * @hw: pointer to the HW structure
3129 + * @offset: the word offset to read
3130 + * @words: number of 16-bit words to read
3131 + * @data: pointer to the properly sized buffer for the data.
3133 + * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
3134 + * pointer entry point called by drivers.
3136 +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
3138 + if (hw->nvm.ops.read)
3139 + return hw->nvm.ops.read(hw, offset, words, data);
3141 + return -E1000_ERR_CONFIG;
3145 + * e1000_write_nvm - Writes to NVM (EEPROM)
3146 + * @hw: pointer to the HW structure
3147 + * @offset: the word offset to read
3148 + * @words: number of 16-bit words to write
3149 + * @data: pointer to the properly sized buffer for the data.
3151 + * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
3152 + * pointer entry point called by drivers.
3154 +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
3156 + if (hw->nvm.ops.write)
3157 + return hw->nvm.ops.write(hw, offset, words, data);
3159 + return E1000_SUCCESS;
3163 + * e1000_write_8bit_ctrl_reg - Writes 8bit Control register
3164 + * @hw: pointer to the HW structure
3165 + * @reg: 32bit register offset
3166 + * @offset: the register to write
3167 + * @data: the value to write.
3169 + * Writes the PHY register at offset with the value in data.
3170 + * This is a function pointer entry point called by drivers.
3172 +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
3175 + return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
3179 + * e1000_power_up_phy - Restores link in case of PHY power down
3180 + * @hw: pointer to the HW structure
3182 + * The phy may be powered down to save power, to turn off link when the
3183 + * driver is unloaded, or wake on lan is not enabled (among others).
3185 +void e1000_power_up_phy(struct e1000_hw *hw)
3187 + if (hw->phy.ops.power_up)
3188 + hw->phy.ops.power_up(hw);
3190 + e1000_setup_link(hw);
3194 + * e1000_power_down_phy - Power down PHY
3195 + * @hw: pointer to the HW structure
3197 + * The phy may be powered down to save power, to turn off link when the
3198 + * driver is unloaded, or wake on lan is not enabled (among others).
3200 +void e1000_power_down_phy(struct e1000_hw *hw)
3202 + if (hw->phy.ops.power_down)
3203 + hw->phy.ops.power_down(hw);
3207 + * e1000_shutdown_fiber_serdes_link - Remove link during power down
3208 + * @hw: pointer to the HW structure
3210 + * Shutdown the optics and PCS on driver unload.
3212 +void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
3214 + if (hw->mac.ops.shutdown_serdes)
3215 + hw->mac.ops.shutdown_serdes(hw);
3218 Index: linux-2.6.22/drivers/net/igb/e1000_api.h
3219 ===================================================================
3220 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
3221 +++ linux-2.6.22/drivers/net/igb/e1000_api.h 2009-12-18 12:39:22.000000000 -0500
3223 +/*******************************************************************************
3225 + Intel(R) Gigabit Ethernet Linux driver
3226 + Copyright(c) 2007-2009 Intel Corporation.
3228 + This program is free software; you can redistribute it and/or modify it
3229 + under the terms and conditions of the GNU General Public License,
3230 + version 2, as published by the Free Software Foundation.
3232 + This program is distributed in the hope it will be useful, but WITHOUT
3233 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3234 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3237 + You should have received a copy of the GNU General Public License along with
3238 + this program; if not, write to the Free Software Foundation, Inc.,
3239 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3241 + The full GNU General Public License is included in this distribution in
3242 + the file called "COPYING".
3244 + Contact Information:
3245 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
3246 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3248 +*******************************************************************************/
3250 +#ifndef _E1000_API_H_
3251 +#define _E1000_API_H_
3253 +#include "e1000_hw.h"
3255 +extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
3256 +extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
3257 +extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
3258 +extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
3260 +s32 e1000_set_mac_type(struct e1000_hw *hw);
3261 +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
3262 +s32 e1000_init_mac_params(struct e1000_hw *hw);
3263 +s32 e1000_init_nvm_params(struct e1000_hw *hw);
3264 +s32 e1000_init_phy_params(struct e1000_hw *hw);
3265 +s32 e1000_init_mbx_params(struct e1000_hw *hw);
3266 +s32 e1000_get_bus_info(struct e1000_hw *hw);
3267 +void e1000_clear_vfta(struct e1000_hw *hw);
3268 +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
3269 +s32 e1000_force_mac_fc(struct e1000_hw *hw);
3270 +s32 e1000_check_for_link(struct e1000_hw *hw);
3271 +s32 e1000_reset_hw(struct e1000_hw *hw);
3272 +s32 e1000_init_hw(struct e1000_hw *hw);
3273 +s32 e1000_setup_link(struct e1000_hw *hw);
3274 +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed,
3276 +s32 e1000_disable_pcie_master(struct e1000_hw *hw);
3277 +void e1000_config_collision_dist(struct e1000_hw *hw);
3278 +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
3279 +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
3280 +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
3281 +void e1000_update_mc_addr_list(struct e1000_hw *hw,
3282 + u8 *mc_addr_list, u32 mc_addr_count);
3283 +s32 e1000_setup_led(struct e1000_hw *hw);
3284 +s32 e1000_cleanup_led(struct e1000_hw *hw);
3285 +s32 e1000_check_reset_block(struct e1000_hw *hw);
3286 +s32 e1000_blink_led(struct e1000_hw *hw);
3287 +s32 e1000_led_on(struct e1000_hw *hw);
3288 +s32 e1000_led_off(struct e1000_hw *hw);
3289 +s32 e1000_id_led_init(struct e1000_hw *hw);
3290 +void e1000_reset_adaptive(struct e1000_hw *hw);
3291 +void e1000_update_adaptive(struct e1000_hw *hw);
3292 +s32 e1000_get_cable_length(struct e1000_hw *hw);
3293 +s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
3294 +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
3295 +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
3296 +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
3297 + u32 offset, u8 data);
3298 +s32 e1000_get_phy_info(struct e1000_hw *hw);
3299 +void e1000_release_phy(struct e1000_hw *hw);
3300 +s32 e1000_acquire_phy(struct e1000_hw *hw);
3301 +s32 e1000_phy_hw_reset(struct e1000_hw *hw);
3302 +s32 e1000_phy_commit(struct e1000_hw *hw);
3303 +void e1000_power_up_phy(struct e1000_hw *hw);
3304 +void e1000_power_down_phy(struct e1000_hw *hw);
3305 +s32 e1000_read_mac_addr(struct e1000_hw *hw);
3306 +s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num);
3307 +void e1000_reload_nvm(struct e1000_hw *hw);
3308 +s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
3309 +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
3310 +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
3311 +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
3312 +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
3313 +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
3315 +s32 e1000_wait_autoneg(struct e1000_hw *hw);
3316 +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
3317 +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
3318 +bool e1000_check_mng_mode(struct e1000_hw *hw);
3319 +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
3320 +s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
3321 +s32 e1000_mng_host_if_write(struct e1000_hw *hw,
3322 + u8 *buffer, u16 length, u16 offset, u8 *sum);
3323 +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
3324 + struct e1000_host_mng_command_header *hdr);
3325 +s32 e1000_mng_write_dhcp_info(struct e1000_hw * hw,
3326 + u8 *buffer, u16 length);
3329 + * TBI_ACCEPT macro definition:
3331 + * This macro requires:
3332 + * adapter = a pointer to struct e1000_hw
3333 + * status = the 8 bit status field of the Rx descriptor with EOP set
3334 + * error = the 8 bit error field of the Rx descriptor with EOP set
3335 + * length = the sum of all the length fields of the Rx descriptors that
3336 + * make up the current frame
3337 + * last_byte = the last byte of the frame DMAed by the hardware
3338 + * max_frame_length = the maximum frame length we want to accept.
3339 + * min_frame_length = the minimum frame length we want to accept.
3341 + * This macro is a conditional that should be used in the interrupt
3342 + * handler's Rx processing routine when RxErrors have been detected.
3346 + * if (TBI_ACCEPT) {
3347 + * accept_frame = true;
3348 + * e1000_tbi_adjust_stats(adapter, MacAddress);
3351 + * accept_frame = false;
3356 +/* The carrier extension symbol, as received by the NIC. */
3357 +#define CARRIER_EXTENSION 0x0F
3359 +#define TBI_ACCEPT(a, status, errors, length, last_byte, min_frame_size, max_frame_size) \
3360 + (e1000_tbi_sbp_enabled_82543(a) && \
3361 + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
3362 + ((last_byte) == CARRIER_EXTENSION) && \
3363 + (((status) & E1000_RXD_STAT_VP) ? \
3364 + (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
3365 + ((length) <= (max_frame_size + 1))) : \
3366 + (((length) > min_frame_size) && \
3367 + ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
3370 Index: linux-2.6.22/drivers/net/igb/e1000_defines.h
3371 ===================================================================
3372 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
3373 +++ linux-2.6.22/drivers/net/igb/e1000_defines.h 2009-12-18 12:39:22.000000000 -0500
3375 +/*******************************************************************************
3377 + Intel(R) Gigabit Ethernet Linux driver
3378 + Copyright(c) 2007-2009 Intel Corporation.
3380 + This program is free software; you can redistribute it and/or modify it
3381 + under the terms and conditions of the GNU General Public License,
3382 + version 2, as published by the Free Software Foundation.
3384 + This program is distributed in the hope it will be useful, but WITHOUT
3385 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3386 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3389 + You should have received a copy of the GNU General Public License along with
3390 + this program; if not, write to the Free Software Foundation, Inc.,
3391 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3393 + The full GNU General Public License is included in this distribution in
3394 + the file called "COPYING".
3396 + Contact Information:
3397 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
3398 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3400 +*******************************************************************************/
3402 +#ifndef _E1000_DEFINES_H_
3403 +#define _E1000_DEFINES_H_
3405 +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
3406 +#define REQ_TX_DESCRIPTOR_MULTIPLE 8
3407 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8
3409 +/* Definitions for power management and wakeup registers */
3410 +/* Wake Up Control */
3411 +#define E1000_WUC_APME 0x00000001 /* APM Enable */
3412 +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
3413 +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
3414 +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
3415 +#define E1000_WUC_LSCWE 0x00000010 /* Link Status wake up enable */
3416 +#define E1000_WUC_LSCWO 0x00000020 /* Link Status wake up override */
3417 +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
3418 +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
3420 +/* Wake Up Filter Control */
3421 +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
3422 +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
3423 +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
3424 +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
3425 +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
3426 +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
3427 +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
3428 +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
3429 +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
3430 +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
3431 +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
3432 +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
3433 +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
3434 +#define E1000_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
3435 +#define E1000_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
3436 +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
3437 +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
3438 +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /*Mask for the 4 flexible filters */
3440 + * For 82576 to utilize Extended filter masks in addition to
3441 + * existing (filter) masks
3443 +#define E1000_WUFC_EXT_FLX_FILTERS 0x00300000 /* Ext. FLX filter mask */
3445 +/* Wake Up Status */
3446 +#define E1000_WUS_LNKC E1000_WUFC_LNKC
3447 +#define E1000_WUS_MAG E1000_WUFC_MAG
3448 +#define E1000_WUS_EX E1000_WUFC_EX
3449 +#define E1000_WUS_MC E1000_WUFC_MC
3450 +#define E1000_WUS_BC E1000_WUFC_BC
3451 +#define E1000_WUS_ARP E1000_WUFC_ARP
3452 +#define E1000_WUS_IPV4 E1000_WUFC_IPV4
3453 +#define E1000_WUS_IPV6 E1000_WUFC_IPV6
3454 +#define E1000_WUS_FLX0 E1000_WUFC_FLX0
3455 +#define E1000_WUS_FLX1 E1000_WUFC_FLX1
3456 +#define E1000_WUS_FLX2 E1000_WUFC_FLX2
3457 +#define E1000_WUS_FLX3 E1000_WUFC_FLX3
3458 +#define E1000_WUS_FLX_FILTERS E1000_WUFC_FLX_FILTERS
3460 +/* Wake Up Packet Length */
3461 +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
3463 +/* Four Flexible Filters are supported */
3464 +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
3465 +/* Two Extended Flexible Filters are supported (82576) */
3466 +#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
3467 +#define E1000_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
3468 +#define E1000_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
3470 +/* Each Flexible Filter is at most 128 (0x80) bytes in length */
3471 +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128
3473 +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
3474 +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
3475 +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
3477 +/* Extended Device Control */
3478 +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */
3479 +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
3480 +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
3481 +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */
3482 +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */
3483 +/* Reserved (bits 4,5) in >= 82575 */
3484 +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Definable Pin 4 */
3485 +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Definable Pin 5 */
3486 +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA
3487 +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Definable Pin 6 */
3488 +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
3489 +/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
3490 +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
3491 +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
3492 +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
3493 +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */
3494 +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
3495 +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
3496 +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
3497 +/* Physical Func Reset Done Indication */
3498 +#define E1000_CTRL_EXT_PFRSTD 0x00004000
3499 +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
3500 +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
3501 +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
3502 +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
3503 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
3504 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
3505 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000
3506 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
3507 +#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES 0x00800000
3508 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
3509 +#define E1000_CTRL_EXT_EIAME 0x01000000
3510 +#define E1000_CTRL_EXT_IRCA 0x00000001
3511 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000
3512 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000
3513 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
3514 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
3515 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
3516 +#define E1000_CTRL_EXT_CANC 0x04000000 /* Int delay cancellation */
3517 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
3518 +/* IAME enable bit (27) was removed in >= 82575 */
3519 +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int acknowledge Auto-mask */
3520 +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error
3521 + * detection enabled */
3522 +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity
3523 + * error detection enable */
3524 +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000
3525 +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
3526 +#define E1000_I2CCMD_REG_ADDR_SHIFT 16
3527 +#define E1000_I2CCMD_REG_ADDR 0x00FF0000
3528 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
3529 +#define E1000_I2CCMD_PHY_ADDR 0x07000000
3530 +#define E1000_I2CCMD_OPCODE_READ 0x08000000
3531 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
3532 +#define E1000_I2CCMD_RESET 0x10000000
3533 +#define E1000_I2CCMD_READY 0x20000000
3534 +#define E1000_I2CCMD_INTERRUPT_ENA 0x40000000
3535 +#define E1000_I2CCMD_ERROR 0x80000000
3536 +#define E1000_MAX_SGMII_PHY_REG_ADDR 255
3537 +#define E1000_I2CCMD_PHY_TIMEOUT 200
3538 +#define E1000_IVAR_VALID 0x80
3539 +#define E1000_GPIE_NSICR 0x00000001
3540 +#define E1000_GPIE_MSIX_MODE 0x00000010
3541 +#define E1000_GPIE_EIAME 0x40000000
3542 +#define E1000_GPIE_PBA 0x80000000
3544 +/* Receive Descriptor bit definitions */
3545 +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
3546 +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
3547 +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
3548 +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
3549 +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
3550 +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
3551 +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
3552 +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
3553 +#define E1000_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
3554 +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
3555 +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
3556 +#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
3557 +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
3558 +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
3559 +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
3560 +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
3561 +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
3562 +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
3563 +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
3564 +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
3565 +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
3566 +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
3567 +#define E1000_RXD_SPC_PRI_SHIFT 13
3568 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
3569 +#define E1000_RXD_SPC_CFI_SHIFT 12
3571 +#define E1000_RXDEXT_STATERR_CE 0x01000000
3572 +#define E1000_RXDEXT_STATERR_SE 0x02000000
3573 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000
3574 +#define E1000_RXDEXT_STATERR_CXE 0x10000000
3575 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000
3576 +#define E1000_RXDEXT_STATERR_IPE 0x40000000
3577 +#define E1000_RXDEXT_STATERR_RXE 0x80000000
3579 +/* mask to determine if packets should be dropped due to frame errors */
3580 +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
3581 + E1000_RXD_ERR_CE | \
3582 + E1000_RXD_ERR_SE | \
3583 + E1000_RXD_ERR_SEQ | \
3584 + E1000_RXD_ERR_CXE | \
3585 + E1000_RXD_ERR_RXE)
3587 +/* Same mask, but for extended and packet split descriptors */
3588 +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
3589 + E1000_RXDEXT_STATERR_CE | \
3590 + E1000_RXDEXT_STATERR_SE | \
3591 + E1000_RXDEXT_STATERR_SEQ | \
3592 + E1000_RXDEXT_STATERR_CXE | \
3593 + E1000_RXDEXT_STATERR_RXE)
3595 +#define E1000_MRQC_ENABLE_MASK 0x00000007
3596 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
3597 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004
3598 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
3599 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
3600 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
3601 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
3602 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
3603 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
3604 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
3606 +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
3607 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
3609 +/* Management Control */
3610 +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
3611 +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
3612 +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
3613 +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
3614 +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
3615 +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
3616 +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
3617 +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
3618 +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
3619 +/* Enable Neighbor Discovery Filtering */
3620 +#define E1000_MANC_NEIGHBOR_EN 0x00004000
3621 +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
3622 +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
3623 +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
3624 +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
3625 +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
3626 +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
3627 +/* Enable MAC address filtering */
3628 +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
3629 +/* Enable MNG packets to host memory */
3630 +#define E1000_MANC_EN_MNG2HOST 0x00200000
3631 +/* Enable IP address filtering */
3632 +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000
3633 +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
3634 +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
3635 +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
3636 +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
3637 +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
3638 +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
3639 +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
3640 +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
3642 +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
3643 +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
3645 +/* Receive Control */
3646 +#define E1000_RCTL_RST 0x00000001 /* Software reset */
3647 +#define E1000_RCTL_EN 0x00000002 /* enable */
3648 +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
3649 +#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */
3650 +#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */
3651 +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
3652 +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
3653 +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
3654 +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
3655 +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
3656 +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
3657 +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
3658 +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min thresh size */
3659 +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min thresh size */
3660 +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min thresh size */
3661 +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
3662 +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
3663 +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
3664 +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
3665 +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
3666 +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
3667 +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
3668 +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
3669 +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
3670 +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
3671 +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
3672 +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
3673 +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
3674 +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
3675 +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
3676 +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
3677 +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
3678 +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
3679 +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
3680 +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
3681 +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
3682 +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
3683 +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
3684 +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
3685 +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
3688 + * Use byte values for the following shift parameters
3690 + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
3691 + * E1000_PSRCTL_BSIZE0_MASK) |
3692 + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
3693 + * E1000_PSRCTL_BSIZE1_MASK) |
3694 + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
3695 + * E1000_PSRCTL_BSIZE2_MASK) |
3696 + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
3697 + * E1000_PSRCTL_BSIZE3_MASK))
3698 + * where value0 = [128..16256], default=256
3699 + * value1 = [1024..64512], default=4096
3700 + * value2 = [0..64512], default=4096
3701 + * value3 = [0..64512], default=0
3704 +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
3705 +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
3706 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
3707 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
3709 +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
3710 +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
3711 +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
3712 +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
3714 +/* SWFW_SYNC Definitions */
3715 +#define E1000_SWFW_EEP_SM 0x01
3716 +#define E1000_SWFW_PHY0_SM 0x02
3717 +#define E1000_SWFW_PHY1_SM 0x04
3718 +#define E1000_SWFW_CSR_SM 0x08
3720 +/* FACTPS Definitions */
3721 +#define E1000_FACTPS_LFS 0x40000000 /* LAN Function Select */
3722 +/* Device Control */
3723 +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
3724 +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
3725 +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
3726 +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
3727 +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
3728 +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
3729 +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
3730 +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
3731 +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
3732 +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
3733 +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
3734 +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
3735 +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
3736 +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
3737 +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
3738 +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
3739 +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
3740 +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
3741 +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock
3742 + * indication in SDP[0] */
3743 +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through
3745 +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external
3746 + * LINK_0 and LINK_1 pins */
3747 +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
3748 +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
3749 +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
3750 +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
3751 +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
3752 +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
3753 +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
3754 +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
3755 +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
3756 +#define E1000_CTRL_RST 0x04000000 /* Global reset */
3757 +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
3758 +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
3759 +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
3760 +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
3761 +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
3762 +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to ME */
3763 +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
3766 + * Bit definitions for the Management Data IO (MDIO) and Management Data
3767 + * Clock (MDC) pins in the Device Control Register.
3769 +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0
3770 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0
3771 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2
3772 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2
3773 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3
3774 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3
3775 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
3776 +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA
3778 +#define E1000_CONNSW_ENRGSRC 0x4
3779 +#define E1000_PCS_CFG_PCS_EN 8
3780 +#define E1000_PCS_LCTL_FLV_LINK_UP 1
3781 +#define E1000_PCS_LCTL_FSV_10 0
3782 +#define E1000_PCS_LCTL_FSV_100 2
3783 +#define E1000_PCS_LCTL_FSV_1000 4
3784 +#define E1000_PCS_LCTL_FDV_FULL 8
3785 +#define E1000_PCS_LCTL_FSD 0x10
3786 +#define E1000_PCS_LCTL_FORCE_LINK 0x20
3787 +#define E1000_PCS_LCTL_LOW_LINK_LATCH 0x40
3788 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
3789 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000
3790 +#define E1000_PCS_LCTL_AN_RESTART 0x20000
3791 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
3792 +#define E1000_PCS_LCTL_AN_SGMII_BYPASS 0x80000
3793 +#define E1000_PCS_LCTL_AN_SGMII_TRIGGER 0x100000
3794 +#define E1000_PCS_LCTL_FAST_LINK_TIMER 0x1000000
3795 +#define E1000_PCS_LCTL_LINK_OK_FIX 0x2000000
3796 +#define E1000_PCS_LCTL_CRS_ON_NI 0x4000000
3797 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
3799 +#define E1000_PCS_LSTS_LINK_OK 1
3800 +#define E1000_PCS_LSTS_SPEED_10 0
3801 +#define E1000_PCS_LSTS_SPEED_100 2
3802 +#define E1000_PCS_LSTS_SPEED_1000 4
3803 +#define E1000_PCS_LSTS_DUPLEX_FULL 8
3804 +#define E1000_PCS_LSTS_SYNK_OK 0x10
3805 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
3806 +#define E1000_PCS_LSTS_AN_PAGE_RX 0x20000
3807 +#define E1000_PCS_LSTS_AN_TIMED_OUT 0x40000
3808 +#define E1000_PCS_LSTS_AN_REMOTE_FAULT 0x80000
3809 +#define E1000_PCS_LSTS_AN_ERROR_RWS 0x100000
3811 +/* Device Status */
3812 +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
3813 +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
3814 +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
3815 +#define E1000_STATUS_FUNC_SHIFT 2
3816 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
3817 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
3818 +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
3819 +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
3820 +#define E1000_STATUS_SPEED_MASK 0x000000C0
3821 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
3822 +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
3823 +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
3824 +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
3825 +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
3826 +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
3827 +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state.
3828 + * Clear on write '0'. */
3829 +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
3830 +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
3831 +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
3832 +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
3833 +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
3834 +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
3835 +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */
3836 +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */
3837 +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */
3838 +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
3839 +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution
3841 +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
3842 +#define E1000_STATUS_FUSE_8 0x04000000
3843 +#define E1000_STATUS_FUSE_9 0x08000000
3844 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */
3845 +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */
3847 +/* Constants used to interpret the masked PCI-X bus speed. */
3848 +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
3849 +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
3850 +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /*PCI-X bus speed 100-133 MHz*/
3852 +#define SPEED_10 10
3853 +#define SPEED_100 100
3854 +#define SPEED_1000 1000
3855 +#define HALF_DUPLEX 1
3856 +#define FULL_DUPLEX 2
3858 +#define PHY_FORCE_TIME 20
3860 +#define ADVERTISE_10_HALF 0x0001
3861 +#define ADVERTISE_10_FULL 0x0002
3862 +#define ADVERTISE_100_HALF 0x0004
3863 +#define ADVERTISE_100_FULL 0x0008
3864 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
3865 +#define ADVERTISE_1000_FULL 0x0020
3867 +/* 1000/H is not supported, nor spec-compliant. */
3868 +#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
3869 + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
3870 + ADVERTISE_1000_FULL)
3871 +#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
3872 + ADVERTISE_100_HALF | ADVERTISE_100_FULL)
3873 +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
3874 +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
3875 +#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \
3876 + ADVERTISE_1000_FULL)
3877 +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
3879 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
3882 +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
3883 +#define E1000_LEDCTL_LED0_MODE_SHIFT 0
3884 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x00000020
3885 +#define E1000_LEDCTL_LED0_IVRT 0x00000040
3886 +#define E1000_LEDCTL_LED0_BLINK 0x00000080
3887 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00
3888 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8
3889 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x00002000
3890 +#define E1000_LEDCTL_LED1_IVRT 0x00004000
3891 +#define E1000_LEDCTL_LED1_BLINK 0x00008000
3892 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000
3893 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16
3894 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000
3895 +#define E1000_LEDCTL_LED2_IVRT 0x00400000
3896 +#define E1000_LEDCTL_LED2_BLINK 0x00800000
3897 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
3898 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24
3899 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000
3900 +#define E1000_LEDCTL_LED3_IVRT 0x40000000
3901 +#define E1000_LEDCTL_LED3_BLINK 0x80000000
3903 +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0
3904 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
3905 +#define E1000_LEDCTL_MODE_LINK_UP 0x2
3906 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3
3907 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
3908 +#define E1000_LEDCTL_MODE_LINK_10 0x5
3909 +#define E1000_LEDCTL_MODE_LINK_100 0x6
3910 +#define E1000_LEDCTL_MODE_LINK_1000 0x7
3911 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8
3912 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9
3913 +#define E1000_LEDCTL_MODE_COLLISION 0xA
3914 +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB
3915 +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC
3916 +#define E1000_LEDCTL_MODE_PAUSED 0xD
3917 +#define E1000_LEDCTL_MODE_LED_ON 0xE
3918 +#define E1000_LEDCTL_MODE_LED_OFF 0xF
3920 +/* Transmit Descriptor bit definitions */
3921 +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
3922 +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
3923 +#define E1000_TXD_POPTS_SHIFT 8 /* POPTS shift */
3924 +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
3925 +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
3926 +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
3927 +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
3928 +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
3929 +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
3930 +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
3931 +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
3932 +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
3933 +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
3934 +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
3935 +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
3936 +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
3937 +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
3938 +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
3939 +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
3940 +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
3941 +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
3942 +/* Extended desc bits for Linksec and timesync */
3944 +/* Transmit Control */
3945 +#define E1000_TCTL_RST 0x00000001 /* software reset */
3946 +#define E1000_TCTL_EN 0x00000002 /* enable tx */
3947 +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
3948 +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
3949 +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
3950 +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
3951 +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
3952 +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
3953 +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
3954 +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
3955 +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
3957 +/* Transmit Arbitration Count */
3958 +#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */
3960 +/* SerDes Control */
3961 +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
3963 +/* Receive Checksum Control */
3964 +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
3965 +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
3966 +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
3967 +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
3968 +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
3969 +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
3970 +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
3972 +/* Header split receive */
3973 +#define E1000_RFCTL_ISCSI_DIS 0x00000001
3974 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E
3975 +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1
3976 +#define E1000_RFCTL_NFSW_DIS 0x00000040
3977 +#define E1000_RFCTL_NFSR_DIS 0x00000080
3978 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300
3979 +#define E1000_RFCTL_NFS_VER_SHIFT 8
3980 +#define E1000_RFCTL_IPV6_DIS 0x00000400
3981 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800
3982 +#define E1000_RFCTL_ACK_DIS 0x00001000
3983 +#define E1000_RFCTL_ACKD_DIS 0x00002000
3984 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000
3985 +#define E1000_RFCTL_EXTEN 0x00008000
3986 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
3987 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
3988 +#define E1000_RFCTL_LEF 0x00040000
3990 +/* Collision related configuration parameters */
3991 +#define E1000_COLLISION_THRESHOLD 15
3992 +#define E1000_CT_SHIFT 4
3993 +#define E1000_COLLISION_DISTANCE 63
3994 +#define E1000_COLD_SHIFT 12
3996 +/* Default values for the transmit IPG register */
3997 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9
3998 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8
4000 +#define E1000_TIPG_IPGT_MASK 0x000003FF
4001 +#define E1000_TIPG_IPGR1_MASK 0x000FFC00
4002 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000
4004 +#define DEFAULT_82543_TIPG_IPGR1 8
4005 +#define E1000_TIPG_IPGR1_SHIFT 10
4007 +#define DEFAULT_82543_TIPG_IPGR2 6
4008 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
4009 +#define E1000_TIPG_IPGR2_SHIFT 20
4011 +/* Ethertype field values */
4012 +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
4014 +#define ETHERNET_FCS_SIZE 4
4015 +#define MAX_JUMBO_FRAME_SIZE 0x3F00
4017 +/* Extended Configuration Control and Size */
4018 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
4019 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
4020 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
4021 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
4022 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
4023 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
4024 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
4025 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
4027 +#define E1000_PHY_CTRL_SPD_EN 0x00000001
4028 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
4029 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
4030 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
4031 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
4033 +#define E1000_KABGTXD_BGSQLBIAS 0x00050000
4035 +/* PBA constants */
4036 +#define E1000_PBA_6K 0x0006 /* 6KB */
4037 +#define E1000_PBA_8K 0x0008 /* 8KB */
4038 +#define E1000_PBA_10K 0x000A /* 10KB */
4039 +#define E1000_PBA_12K 0x000C /* 12KB */
4040 +#define E1000_PBA_14K 0x000E /* 14KB */
4041 +#define E1000_PBA_16K 0x0010 /* 16KB */
4042 +#define E1000_PBA_18K 0x0012
4043 +#define E1000_PBA_20K 0x0014
4044 +#define E1000_PBA_22K 0x0016
4045 +#define E1000_PBA_24K 0x0018
4046 +#define E1000_PBA_26K 0x001A
4047 +#define E1000_PBA_30K 0x001E
4048 +#define E1000_PBA_32K 0x0020
4049 +#define E1000_PBA_34K 0x0022
4050 +#define E1000_PBA_35K 0x0023
4051 +#define E1000_PBA_38K 0x0026
4052 +#define E1000_PBA_40K 0x0028
4053 +#define E1000_PBA_48K 0x0030 /* 48KB */
4054 +#define E1000_PBA_64K 0x0040 /* 64KB */
4056 +#define E1000_PBS_16K E1000_PBA_16K
4057 +#define E1000_PBS_24K E1000_PBA_24K
4061 +#define IFS_RATIO 4
4062 +#define IFS_STEP 10
4063 +#define MIN_NUM_XMITS 1000
4065 +/* SW Semaphore Register */
4066 +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
4067 +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
4068 +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
4069 +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
4071 +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
4073 +/* Interrupt Cause Read */
4074 +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
4075 +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
4076 +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
4077 +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
4078 +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
4079 +#define E1000_ICR_RXO 0x00000040 /* rx overrun */
4080 +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
4081 +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
4082 +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
4083 +#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
4084 +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
4085 +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
4086 +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
4087 +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
4088 +#define E1000_ICR_TXD_LOW 0x00008000
4089 +#define E1000_ICR_SRPD 0x00010000
4090 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
4091 +#define E1000_ICR_MNG 0x00040000 /* Manageability event */
4092 +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
4093 +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver
4094 + * should claim the interrupt */
4095 +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */
4096 +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* Q0 Tx desc FIFO parity error */
4097 +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */
4098 +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
4099 +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* Q1 Rx desc FIFO parity error */
4100 +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* Q1 Tx desc FIFO parity error */
4101 +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
4102 +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW
4103 + * bit in the FWSM */
4104 +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates
4106 +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
4107 +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */
4110 +/* Extended Interrupt Cause Read */
4111 +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
4112 +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
4113 +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
4114 +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
4115 +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
4116 +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
4117 +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
4118 +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
4119 +#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
4120 +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
4122 +#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */
4123 +#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */
4124 +#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */
4125 +#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */
4128 + * This defines the bits that are set in the Interrupt Mask
4129 + * Set/Read Register. Each bit is documented below:
4130 + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
4131 + * o RXSEQ = Receive Sequence Error
4133 +#define POLL_IMS_ENABLE_MASK ( \
4134 + E1000_IMS_RXDMT0 | \
4138 + * This defines the bits that are set in the Interrupt Mask
4139 + * Set/Read Register. Each bit is documented below:
4140 + * o RXT0 = Receiver Timer Interrupt (ring 0)
4141 + * o TXDW = Transmit Descriptor Written Back
4142 + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
4143 + * o RXSEQ = Receive Sequence Error
4144 + * o LSC = Link Status Change
4146 +#define IMS_ENABLE_MASK ( \
4147 + E1000_IMS_RXT0 | \
4148 + E1000_IMS_TXDW | \
4149 + E1000_IMS_RXDMT0 | \
4150 + E1000_IMS_RXSEQ | \
4153 +/* Interrupt Mask Set */
4154 +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */
4155 +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
4156 +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
4157 +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
4158 +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
4159 +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
4160 +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
4161 +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
4162 +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
4163 +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */
4164 +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
4165 +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
4166 +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
4167 +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
4168 +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
4169 +#define E1000_IMS_SRPD E1000_ICR_SRPD
4170 +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
4171 +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
4172 +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
4173 +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
4175 +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
4177 +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer
4179 +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity
4181 +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
4183 +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
4185 +#define E1000_IMS_DSW E1000_ICR_DSW
4186 +#define E1000_IMS_PHYINT E1000_ICR_PHYINT
4187 +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
4188 +#define E1000_IMS_EPRST E1000_ICR_EPRST
4190 +/* Extended Interrupt Mask Set */
4191 +#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
4192 +#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
4193 +#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
4194 +#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
4195 +#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
4196 +#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
4197 +#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
4198 +#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
4199 +#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
4200 +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
4202 +/* Interrupt Cause Set */
4203 +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Tx desc written back */
4204 +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
4205 +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
4206 +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
4207 +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
4208 +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
4209 +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
4210 +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
4211 +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */
4212 +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
4213 +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
4214 +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
4215 +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
4216 +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
4217 +#define E1000_ICS_SRPD E1000_ICR_SRPD
4218 +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
4219 +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
4220 +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
4221 +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
4223 +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
4225 +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer
4227 +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity
4229 +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
4231 +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
4233 +#define E1000_ICS_DSW E1000_ICR_DSW
4234 +#define E1000_ICS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
4235 +#define E1000_ICS_PHYINT E1000_ICR_PHYINT
4236 +#define E1000_ICS_EPRST E1000_ICR_EPRST
4238 +/* Extended Interrupt Cause Set */
4239 +#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
4240 +#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
4241 +#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
4242 +#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
4243 +#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
4244 +#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
4245 +#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
4246 +#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
4247 +#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
4248 +#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
4250 +#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
4252 +/* Transmit Descriptor Control */
4253 +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
4254 +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
4255 +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
4256 +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
4257 +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
4258 +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
4259 +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
4260 +/* Enable the counting of descriptors still to be processed. */
4261 +#define E1000_TXDCTL_COUNT_DESC 0x00400000
4263 +/* Flow Control Constants */
4264 +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
4265 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
4266 +#define FLOW_CONTROL_TYPE 0x8808
4268 +/* 802.1q VLAN Packet Size */
4269 +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
4270 +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
4272 +/* Receive Address */
4274 + * Number of high/low register pairs in the RAR. The RAR (Receive Address
4275 + * Registers) holds the directed and multicast addresses that we monitor.
4276 + * Technically, we have 16 spots. However, we reserve one of these spots
4277 + * (RAR[15]) for our directed address used by controllers with
4278 + * manageability enabled, allowing us room for 15 multicast addresses.
4280 +#define E1000_RAR_ENTRIES 15
4281 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
4282 +#define E1000_RAL_MAC_ADDR_LEN 4
4283 +#define E1000_RAH_MAC_ADDR_LEN 2
4284 +#define E1000_RAH_POOL_MASK 0x03FC0000
4285 +#define E1000_RAH_POOL_1 0x00040000
4288 +#define E1000_SUCCESS 0
4289 +#define E1000_ERR_NVM 1
4290 +#define E1000_ERR_PHY 2
4291 +#define E1000_ERR_CONFIG 3
4292 +#define E1000_ERR_PARAM 4
4293 +#define E1000_ERR_MAC_INIT 5
4294 +#define E1000_ERR_PHY_TYPE 6
4295 +#define E1000_ERR_RESET 9
4296 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10
4297 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11
4298 +#define E1000_BLK_PHY_RESET 12
4299 +#define E1000_ERR_SWFW_SYNC 13
4300 +#define E1000_NOT_IMPLEMENTED 14
4301 +#define E1000_ERR_MBX 15
4303 +/* Loop limit on how long we wait for auto-negotiation to complete */
4304 +#define FIBER_LINK_UP_LIMIT 50
4305 +#define COPPER_LINK_UP_LIMIT 10
4306 +#define PHY_AUTO_NEG_LIMIT 45
4307 +#define PHY_FORCE_LIMIT 20
4308 +/* Number of 100 microseconds we wait for PCI Express master disable */
4309 +#define MASTER_DISABLE_TIMEOUT 800
4310 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */
4311 +#define PHY_CFG_TIMEOUT 100
4312 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
4313 +#define MDIO_OWNERSHIP_TIMEOUT 10
4314 +/* Number of milliseconds for NVM auto read done after MAC reset. */
4315 +#define AUTO_READ_DONE_TIMEOUT 10
4318 +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
4319 +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */
4320 +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
4321 +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
4323 +/* Transmit Configuration Word */
4324 +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
4325 +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
4326 +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
4327 +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
4328 +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
4329 +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */
4330 +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */
4331 +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */
4332 +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */
4333 +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
4335 +/* Receive Configuration Word */
4336 +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
4337 +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */
4338 +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
4339 +#define E1000_RXCW_CC 0x10000000 /* Receive config change */
4340 +#define E1000_RXCW_C 0x20000000 /* Receive config */
4341 +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
4342 +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
4344 +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
4345 +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
4347 +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
4348 +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
4349 +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
4350 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
4351 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
4352 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
4353 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
4354 +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
4356 +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
4357 +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
4358 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
4359 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
4360 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
4361 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
4363 +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
4364 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
4365 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
4366 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
4367 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
4368 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
4369 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
4370 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
4371 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
4372 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
4373 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
4375 +#define E1000_TIMINCA_16NS_SHIFT 24
4377 +/* PCI Express Control */
4378 +#define E1000_GCR_RXD_NO_SNOOP 0x00000001
4379 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
4380 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
4381 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008
4382 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
4383 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
4384 +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
4385 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
4386 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
4387 +#define E1000_GCR_CAP_VER2 0x00040000
4389 +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
4390 + E1000_GCR_RXDSCW_NO_SNOOP | \
4391 + E1000_GCR_RXDSCR_NO_SNOOP | \
4392 + E1000_GCR_TXD_NO_SNOOP | \
4393 + E1000_GCR_TXDSCW_NO_SNOOP | \
4394 + E1000_GCR_TXDSCR_NO_SNOOP)
4396 +/* PHY Control Register */
4397 +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
4398 +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
4399 +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
4400 +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
4401 +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
4402 +#define MII_CR_POWER_DOWN 0x0800 /* Power down */
4403 +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
4404 +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
4405 +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
4406 +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
4407 +#define MII_CR_SPEED_1000 0x0040
4408 +#define MII_CR_SPEED_100 0x2000
4409 +#define MII_CR_SPEED_10 0x0000
4411 +/* PHY Status Register */
4412 +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
4413 +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
4414 +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
4415 +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
4416 +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
4417 +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
4418 +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
4419 +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
4420 +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
4421 +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
4422 +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
4423 +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
4424 +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
4425 +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
4426 +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
4428 +/* Autoneg Advertisement Register */
4429 +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
4430 +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
4431 +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
4432 +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
4433 +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
4434 +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
4435 +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
4436 +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
4437 +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
4438 +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
4440 +/* Link Partner Ability Register (Base Page) */
4441 +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
4442 +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */
4443 +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */
4444 +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */
4445 +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */
4446 +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
4447 +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
4448 +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
4449 +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */
4450 +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */
4451 +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
4453 +/* Autoneg Expansion Register */
4454 +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
4455 +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */
4456 +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */
4457 +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
4458 +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */
4460 +/* 1000BASE-T Control Register */
4461 +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
4462 +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
4463 +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
4464 +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
4465 + /* 0=DTE device */
4466 +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
4467 + /* 0=Configure PHY as Slave */
4468 +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
4469 + /* 0=Automatic Master/Slave config */
4470 +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
4471 +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
4472 +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
4473 +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
4474 +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
4476 +/* 1000BASE-T Status Register */
4477 +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
4478 +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
4479 +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
4480 +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
4481 +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
4482 +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
4483 +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx is Master, 0=Slave */
4484 +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
4486 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
4488 +/* PHY 1000 MII Register/Bit Definitions */
4489 +/* PHY Registers defined by IEEE */
4490 +#define PHY_CONTROL 0x00 /* Control Register */
4491 +#define PHY_STATUS 0x01 /* Status Register */
4492 +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
4493 +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
4494 +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
4495 +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
4496 +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
4497 +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
4498 +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
4499 +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
4500 +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
4501 +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
4503 +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
4506 +#define E1000_EECD_SK 0x00000001 /* NVM Clock */
4507 +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
4508 +#define E1000_EECD_DI 0x00000004 /* NVM Data In */
4509 +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
4510 +#define E1000_EECD_FWE_MASK 0x00000030
4511 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
4512 +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
4513 +#define E1000_EECD_FWE_SHIFT 4
4514 +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
4515 +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
4516 +#define E1000_EECD_PRES 0x00000100 /* NVM Present */
4517 +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
4518 +/* NVM Addressing bits based on type 0=small, 1=large */
4519 +#define E1000_EECD_ADDR_BITS 0x00000400
4520 +#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
4521 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
4522 +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
4523 +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
4524 +#define E1000_EECD_SIZE_EX_SHIFT 11
4525 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
4526 +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
4527 +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
4528 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
4529 +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
4530 +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
4531 +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
4532 +#define E1000_EECD_SECVAL_SHIFT 22
4533 +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
4535 +#define E1000_NVM_SWDPIN0 0x0001 /* SWDPIN 0 NVM Value */
4536 +#define E1000_NVM_LED_LOGIC 0x0020 /* Led Logic Word */
4537 +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
4538 +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
4539 +#define E1000_NVM_RW_REG_START 1 /* Start operation */
4540 +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
4541 +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
4542 +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
4543 +#define E1000_FLASH_UPDATES 2000
4545 +/* NVM Word Offsets */
4546 +#define NVM_COMPAT 0x0003
4547 +#define NVM_ID_LED_SETTINGS 0x0004
4548 +#define NVM_VERSION 0x0005
4549 +#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */
4550 +#define NVM_PHY_CLASS_WORD 0x0007
4551 +#define NVM_INIT_CONTROL1_REG 0x000A
4552 +#define NVM_INIT_CONTROL2_REG 0x000F
4553 +#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010
4554 +#define NVM_INIT_CONTROL3_PORT_B 0x0014
4555 +#define NVM_INIT_3GIO_3 0x001A
4556 +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
4557 +#define NVM_INIT_CONTROL3_PORT_A 0x0024
4558 +#define NVM_CFG 0x0012
4559 +#define NVM_FLASH_VERSION 0x0032
4560 +#define NVM_ALT_MAC_ADDR_PTR 0x0037
4561 +#define NVM_CHECKSUM_REG 0x003F
4563 +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
4564 +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
4566 +/* Mask bits for fields in Word 0x0f of the NVM */
4567 +#define NVM_WORD0F_PAUSE_MASK 0x3000
4568 +#define NVM_WORD0F_PAUSE 0x1000
4569 +#define NVM_WORD0F_ASM_DIR 0x2000
4570 +#define NVM_WORD0F_ANE 0x0800
4571 +#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0
4572 +#define NVM_WORD0F_LPLU 0x0001
4574 +/* Mask bits for fields in Word 0x1a of the NVM */
4575 +#define NVM_WORD1A_ASPM_MASK 0x000C
4577 +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
4578 +#define NVM_SUM 0xBABA
4580 +#define NVM_MAC_ADDR_OFFSET 0
4581 +#define NVM_PBA_OFFSET_0 8
4582 +#define NVM_PBA_OFFSET_1 9
4583 +#define NVM_RESERVED_WORD 0xFFFF
4584 +#define NVM_PHY_CLASS_A 0x8000
4585 +#define NVM_SERDES_AMPLITUDE_MASK 0x000F
4586 +#define NVM_SIZE_MASK 0x1C00
4587 +#define NVM_SIZE_SHIFT 10
4588 +#define NVM_WORD_SIZE_BASE_SHIFT 6
4589 +#define NVM_SWDPIO_EXT_SHIFT 4
4591 +/* NVM Commands - SPI */
4592 +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
4593 +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
4594 +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
4595 +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
4596 +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
4597 +#define NVM_WRDI_OPCODE_SPI 0x04 /* NVM reset Write Enable latch */
4598 +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
4599 +#define NVM_WRSR_OPCODE_SPI 0x01 /* NVM write Status register */
4601 +/* SPI NVM Status Register */
4602 +#define NVM_STATUS_RDY_SPI 0x01
4603 +#define NVM_STATUS_WEN_SPI 0x02
4604 +#define NVM_STATUS_BP0_SPI 0x04
4605 +#define NVM_STATUS_BP1_SPI 0x08
4606 +#define NVM_STATUS_WPEN_SPI 0x80
4608 +/* Word definitions for ID LED Settings */
4609 +#define ID_LED_RESERVED_0000 0x0000
4610 +#define ID_LED_RESERVED_FFFF 0xFFFF
4611 +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
4612 + (ID_LED_OFF1_OFF2 << 8) | \
4613 + (ID_LED_DEF1_DEF2 << 4) | \
4614 + (ID_LED_DEF1_DEF2))
4615 +#define ID_LED_DEF1_DEF2 0x1
4616 +#define ID_LED_DEF1_ON2 0x2
4617 +#define ID_LED_DEF1_OFF2 0x3
4618 +#define ID_LED_ON1_DEF2 0x4
4619 +#define ID_LED_ON1_ON2 0x5
4620 +#define ID_LED_ON1_OFF2 0x6
4621 +#define ID_LED_OFF1_DEF2 0x7
4622 +#define ID_LED_OFF1_ON2 0x8
4623 +#define ID_LED_OFF1_OFF2 0x9
4625 +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
4626 +#define IGP_ACTIVITY_LED_ENABLE 0x0300
4627 +#define IGP_LED3_MODE 0x07000000
4629 +/* PCI/PCI-X/PCI-EX Config space */
4630 +#define PCI_HEADER_TYPE_REGISTER 0x0E
4631 +#define PCIE_LINK_STATUS 0x12
4632 +#define PCIE_DEVICE_CONTROL2 0x28
4634 +#define PCI_HEADER_TYPE_MULTIFUNC 0x80
4635 +#define PCIE_LINK_WIDTH_MASK 0x3F0
4636 +#define PCIE_LINK_WIDTH_SHIFT 4
4637 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005
4639 +#ifndef ETH_ADDR_LEN
4640 +#define ETH_ADDR_LEN 6
4643 +#define PHY_REVISION_MASK 0xFFFFFFF0
4644 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
4645 +#define MAX_PHY_MULTI_PAGE_REG 0xF
4647 +/* Bit definitions for valid PHY IDs. */
4652 +#define M88E1000_E_PHY_ID 0x01410C50
4653 +#define M88E1000_I_PHY_ID 0x01410C30
4654 +#define M88E1011_I_PHY_ID 0x01410C20
4655 +#define IGP01E1000_I_PHY_ID 0x02A80380
4656 +#define M88E1011_I_REV_4 0x04
4657 +#define M88E1111_I_PHY_ID 0x01410CC0
4658 +#define GG82563_E_PHY_ID 0x01410CA0
4659 +#define IGP03E1000_E_PHY_ID 0x02A80390
4660 +#define IFE_E_PHY_ID 0x02A80330
4661 +#define IFE_PLUS_E_PHY_ID 0x02A80320
4662 +#define IFE_C_E_PHY_ID 0x02A80310
4663 +#define IGP04E1000_E_PHY_ID 0x02A80391
4664 +#define M88_VENDOR 0x0141
4666 +/* M88E1000 Specific Registers */
4667 +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
4668 +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
4669 +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */
4670 +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */
4671 +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
4672 +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
4674 +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
4675 +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
4676 +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
4677 +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
4678 +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
4680 +/* M88E1000 PHY Specific Control Register */
4681 +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
4682 +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
4683 +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
4684 +/* 1=CLK125 low, 0=CLK125 toggling */
4685 +#define M88E1000_PSCR_CLK125_DISABLE 0x0010
4686 +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
4687 + /* Manual MDI configuration */
4688 +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
4689 +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
4690 +#define M88E1000_PSCR_AUTO_X_1000T 0x0040
4691 +/* Auto crossover enabled all speeds */
4692 +#define M88E1000_PSCR_AUTO_X_MODE 0x0060
4694 + * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
4695 + * 0=Normal 10BASE-T Rx Threshold
4697 +#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080
4698 +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
4699 +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100
4700 +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
4701 +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
4702 +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */
4704 +/* M88E1000 PHY Specific Status Register */
4705 +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
4706 +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
4707 +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
4708 +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
4716 +#define M88E1000_PSSR_CABLE_LENGTH 0x0380
4717 +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
4718 +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
4719 +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */
4720 +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
4721 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
4722 +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */
4723 +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
4724 +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
4726 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
4728 +/* M88E1000 Extended PHY Specific Control Register */
4729 +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
4731 + * 1 = Lost lock detect enabled.
4732 + * Will assert lost lock and bring
4733 + * link down if idle not seen
4734 + * within 1ms in 1000BASE-T
4736 +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000
4738 + * Number of times we will attempt to autonegotiate before downshifting if we
4741 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
4742 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
4743 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400
4744 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800
4745 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00
4747 + * Number of times we will attempt to autonegotiate before downshifting if we
4750 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
4751 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000
4752 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
4753 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200
4754 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300
4755 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */
4756 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
4757 +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
4759 +/* M88EC018 Rev 2 specific DownShift settings */
4760 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
4761 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000
4762 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200
4763 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400
4764 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600
4765 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
4766 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00
4767 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00
4768 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00
4773 + * 4-0: register offset
4775 +#define GG82563_PAGE_SHIFT 5
4776 +#define GG82563_REG(page, reg) \
4777 + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
4778 +#define GG82563_MIN_ALT_REG 30
4780 +/* GG82563 Specific Registers */
4781 +#define GG82563_PHY_SPEC_CTRL \
4782 + GG82563_REG(0, 16) /* PHY Specific Control */
4783 +#define GG82563_PHY_SPEC_STATUS \
4784 + GG82563_REG(0, 17) /* PHY Specific Status */
4785 +#define GG82563_PHY_INT_ENABLE \
4786 + GG82563_REG(0, 18) /* Interrupt Enable */
4787 +#define GG82563_PHY_SPEC_STATUS_2 \
4788 + GG82563_REG(0, 19) /* PHY Specific Status 2 */
4789 +#define GG82563_PHY_RX_ERR_CNTR \
4790 + GG82563_REG(0, 21) /* Receive Error Counter */
4791 +#define GG82563_PHY_PAGE_SELECT \
4792 + GG82563_REG(0, 22) /* Page Select */
4793 +#define GG82563_PHY_SPEC_CTRL_2 \
4794 + GG82563_REG(0, 26) /* PHY Specific Control 2 */
4795 +#define GG82563_PHY_PAGE_SELECT_ALT \
4796 + GG82563_REG(0, 29) /* Alternate Page Select */
4797 +#define GG82563_PHY_TEST_CLK_CTRL \
4798 + GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
4800 +#define GG82563_PHY_MAC_SPEC_CTRL \
4801 + GG82563_REG(2, 21) /* MAC Specific Control Register */
4802 +#define GG82563_PHY_MAC_SPEC_CTRL_2 \
4803 + GG82563_REG(2, 26) /* MAC Specific Control 2 */
4805 +#define GG82563_PHY_DSP_DISTANCE \
4806 + GG82563_REG(5, 26) /* DSP Distance */
4808 +/* Page 193 - Port Control Registers */
4809 +#define GG82563_PHY_KMRN_MODE_CTRL \
4810 + GG82563_REG(193, 16) /* Kumeran Mode Control */
4811 +#define GG82563_PHY_PORT_RESET \
4812 + GG82563_REG(193, 17) /* Port Reset */
4813 +#define GG82563_PHY_REVISION_ID \
4814 + GG82563_REG(193, 18) /* Revision ID */
4815 +#define GG82563_PHY_DEVICE_ID \
4816 + GG82563_REG(193, 19) /* Device ID */
4817 +#define GG82563_PHY_PWR_MGMT_CTRL \
4818 + GG82563_REG(193, 20) /* Power Management Control */
4819 +#define GG82563_PHY_RATE_ADAPT_CTRL \
4820 + GG82563_REG(193, 25) /* Rate Adaptation Control */
4822 +/* Page 194 - KMRN Registers */
4823 +#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
4824 + GG82563_REG(194, 16) /* FIFO's Control/Status */
4825 +#define GG82563_PHY_KMRN_CTRL \
4826 + GG82563_REG(194, 17) /* Control */
4827 +#define GG82563_PHY_INBAND_CTRL \
4828 + GG82563_REG(194, 18) /* Inband Control */
4829 +#define GG82563_PHY_KMRN_DIAGNOSTIC \
4830 + GG82563_REG(194, 19) /* Diagnostic */
4831 +#define GG82563_PHY_ACK_TIMEOUTS \
4832 + GG82563_REG(194, 20) /* Acknowledge Timeouts */
4833 +#define GG82563_PHY_ADV_ABILITY \
4834 + GG82563_REG(194, 21) /* Advertised Ability */
4835 +#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
4836 + GG82563_REG(194, 23) /* Link Partner Advertised Ability */
4837 +#define GG82563_PHY_ADV_NEXT_PAGE \
4838 + GG82563_REG(194, 24) /* Advertised Next Page */
4839 +#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
4840 + GG82563_REG(194, 25) /* Link Partner Advertised Next page */
4841 +#define GG82563_PHY_KMRN_MISC \
4842 + GG82563_REG(194, 26) /* Misc. */
4845 +#define E1000_MDIC_DATA_MASK 0x0000FFFF
4846 +#define E1000_MDIC_REG_MASK 0x001F0000
4847 +#define E1000_MDIC_REG_SHIFT 16
4848 +#define E1000_MDIC_PHY_MASK 0x03E00000
4849 +#define E1000_MDIC_PHY_SHIFT 21
4850 +#define E1000_MDIC_OP_WRITE 0x04000000
4851 +#define E1000_MDIC_OP_READ 0x08000000
4852 +#define E1000_MDIC_READY 0x10000000
4853 +#define E1000_MDIC_INT_EN 0x20000000
4854 +#define E1000_MDIC_ERROR 0x40000000
4856 +/* SerDes Control */
4857 +#define E1000_GEN_CTL_READY 0x80000000
4858 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8
4859 +#define E1000_GEN_POLL_TIMEOUT 640
4861 +/* LinkSec register fields */
4862 +#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000
4863 +#define E1000_LSECTXCAP_SUM_SHIFT 16
4864 +#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000
4865 +#define E1000_LSECRXCAP_SUM_SHIFT 16
4867 +#define E1000_LSECTXCTRL_EN_MASK 0x00000003
4868 +#define E1000_LSECTXCTRL_DISABLE 0x0
4869 +#define E1000_LSECTXCTRL_AUTH 0x1
4870 +#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2
4871 +#define E1000_LSECTXCTRL_AISCI 0x00000020
4872 +#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
4873 +#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8
4875 +#define E1000_LSECRXCTRL_EN_MASK 0x0000000C
4876 +#define E1000_LSECRXCTRL_EN_SHIFT 2
4877 +#define E1000_LSECRXCTRL_DISABLE 0x0
4878 +#define E1000_LSECRXCTRL_CHECK 0x1
4879 +#define E1000_LSECRXCTRL_STRICT 0x2
4880 +#define E1000_LSECRXCTRL_DROP 0x3
4881 +#define E1000_LSECRXCTRL_PLSH 0x00000040
4882 +#define E1000_LSECRXCTRL_RP 0x00000080
4883 +#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33
4887 +#endif /* _E1000_DEFINES_H_ */
4888 Index: linux-2.6.22/drivers/net/igb/e1000_hw.h
4889 ===================================================================
4890 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
4891 +++ linux-2.6.22/drivers/net/igb/e1000_hw.h 2009-12-18 12:39:22.000000000 -0500
4893 +/*******************************************************************************
4895 + Intel(R) Gigabit Ethernet Linux driver
4896 + Copyright(c) 2007-2009 Intel Corporation.
4898 + This program is free software; you can redistribute it and/or modify it
4899 + under the terms and conditions of the GNU General Public License,
4900 + version 2, as published by the Free Software Foundation.
4902 + This program is distributed in the hope it will be useful, but WITHOUT
4903 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4904 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4907 + You should have received a copy of the GNU General Public License along with
4908 + this program; if not, write to the Free Software Foundation, Inc.,
4909 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4911 + The full GNU General Public License is included in this distribution in
4912 + the file called "COPYING".
4914 + Contact Information:
4915 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
4916 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
4918 +*******************************************************************************/
4920 +#ifndef _E1000_HW_H_
4921 +#define _E1000_HW_H_
4923 +#include "e1000_osdep.h"
4924 +#include "e1000_regs.h"
4925 +#include "e1000_defines.h"
4929 +#define E1000_DEV_ID_82576 0x10C9
4930 +#define E1000_DEV_ID_82576_FIBER 0x10E6
4931 +#define E1000_DEV_ID_82576_SERDES 0x10E7
4932 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
4933 +#define E1000_DEV_ID_82576_NS 0x150A
4934 +#define E1000_DEV_ID_82576_NS_SERDES 0x1518
4935 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
4936 +#define E1000_DEV_ID_82575EB_COPPER 0x10A7
4937 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
4938 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
4939 +#define E1000_REVISION_0 0
4940 +#define E1000_REVISION_1 1
4941 +#define E1000_REVISION_2 2
4942 +#define E1000_REVISION_3 3
4943 +#define E1000_REVISION_4 4
4945 +#define E1000_FUNC_0 0
4946 +#define E1000_FUNC_1 1
4948 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
4949 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
4951 +enum e1000_mac_type {
4952 + e1000_undefined = 0,
4955 + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
4958 +enum e1000_media_type {
4959 + e1000_media_type_unknown = 0,
4960 + e1000_media_type_copper = 1,
4961 + e1000_media_type_fiber = 2,
4962 + e1000_media_type_internal_serdes = 3,
4963 + e1000_num_media_types
4966 +enum e1000_nvm_type {
4967 + e1000_nvm_unknown = 0,
4969 + e1000_nvm_eeprom_spi,
4970 + e1000_nvm_flash_hw,
4971 + e1000_nvm_flash_sw
4974 +enum e1000_nvm_override {
4975 + e1000_nvm_override_none = 0,
4976 + e1000_nvm_override_spi_small,
4977 + e1000_nvm_override_spi_large,
4980 +enum e1000_phy_type {
4981 + e1000_phy_unknown = 0,
4986 + e1000_phy_gg82563,
4992 +enum e1000_bus_type {
4993 + e1000_bus_type_unknown = 0,
4994 + e1000_bus_type_pci,
4995 + e1000_bus_type_pcix,
4996 + e1000_bus_type_pci_express,
4997 + e1000_bus_type_reserved
5000 +enum e1000_bus_speed {
5001 + e1000_bus_speed_unknown = 0,
5002 + e1000_bus_speed_33,
5003 + e1000_bus_speed_66,
5004 + e1000_bus_speed_100,
5005 + e1000_bus_speed_120,
5006 + e1000_bus_speed_133,
5007 + e1000_bus_speed_2500,
5008 + e1000_bus_speed_5000,
5009 + e1000_bus_speed_reserved
5012 +enum e1000_bus_width {
5013 + e1000_bus_width_unknown = 0,
5014 + e1000_bus_width_pcie_x1,
5015 + e1000_bus_width_pcie_x2,
5016 + e1000_bus_width_pcie_x4 = 4,
5017 + e1000_bus_width_pcie_x8 = 8,
5018 + e1000_bus_width_32,
5019 + e1000_bus_width_64,
5020 + e1000_bus_width_reserved
5023 +enum e1000_1000t_rx_status {
5024 + e1000_1000t_rx_status_not_ok = 0,
5025 + e1000_1000t_rx_status_ok,
5026 + e1000_1000t_rx_status_undefined = 0xFF
5029 +enum e1000_rev_polarity {
5030 + e1000_rev_polarity_normal = 0,
5031 + e1000_rev_polarity_reversed,
5032 + e1000_rev_polarity_undefined = 0xFF
5035 +enum e1000_fc_mode {
5036 + e1000_fc_none = 0,
5037 + e1000_fc_rx_pause,
5038 + e1000_fc_tx_pause,
5040 + e1000_fc_default = 0xFF
5043 +enum e1000_ms_type {
5044 + e1000_ms_hw_default = 0,
5045 + e1000_ms_force_master,
5046 + e1000_ms_force_slave,
5050 +enum e1000_smart_speed {
5051 + e1000_smart_speed_default = 0,
5052 + e1000_smart_speed_on,
5053 + e1000_smart_speed_off
5056 +enum e1000_serdes_link_state {
5057 + e1000_serdes_link_down = 0,
5058 + e1000_serdes_link_autoneg_progress,
5059 + e1000_serdes_link_autoneg_complete,
5060 + e1000_serdes_link_forced_up
5063 +/* Receive Descriptor */
5064 +struct e1000_rx_desc {
5065 + __le64 buffer_addr; /* Address of the descriptor's data buffer */
5066 + __le16 length; /* Length of data DMAed into data buffer */
5067 + __le16 csum; /* Packet checksum */
5068 + u8 status; /* Descriptor status */
5069 + u8 errors; /* Descriptor Errors */
5073 +/* Receive Descriptor - Extended */
5074 +union e1000_rx_desc_extended {
5076 + __le64 buffer_addr;
5081 + __le32 mrq; /* Multiple Rx Queues */
5083 + __le32 rss; /* RSS Hash */
5085 + __le16 ip_id; /* IP id */
5086 + __le16 csum; /* Packet Checksum */
5091 + __le32 status_error; /* ext status/error */
5093 + __le16 vlan; /* VLAN tag */
5095 + } wb; /* writeback */
5098 +#define MAX_PS_BUFFERS 4
5099 +/* Receive Descriptor - Packet Split */
5100 +union e1000_rx_desc_packet_split {
5102 + /* one buffer for protocol header(s), three data buffers */
5103 + __le64 buffer_addr[MAX_PS_BUFFERS];
5107 + __le32 mrq; /* Multiple Rx Queues */
5109 + __le32 rss; /* RSS Hash */
5111 + __le16 ip_id; /* IP id */
5112 + __le16 csum; /* Packet Checksum */
5117 + __le32 status_error; /* ext status/error */
5118 + __le16 length0; /* length of buffer 0 */
5119 + __le16 vlan; /* VLAN tag */
5122 + __le16 header_status;
5123 + __le16 length[3]; /* length of buffers 1-3 */
5126 + } wb; /* writeback */
5129 +/* Transmit Descriptor */
5130 +struct e1000_tx_desc {
5131 + __le64 buffer_addr; /* Address of the descriptor's data buffer */
5135 + __le16 length; /* Data buffer length */
5136 + u8 cso; /* Checksum offset */
5137 + u8 cmd; /* Descriptor control */
5143 + u8 status; /* Descriptor status */
5144 + u8 css; /* Checksum start */
5150 +/* Offload Context Descriptor */
5151 +struct e1000_context_desc {
5155 + u8 ipcss; /* IP checksum start */
5156 + u8 ipcso; /* IP checksum offset */
5157 + __le16 ipcse; /* IP checksum end */
5161 + __le32 tcp_config;
5163 + u8 tucss; /* TCP checksum start */
5164 + u8 tucso; /* TCP checksum offset */
5165 + __le16 tucse; /* TCP checksum end */
5168 + __le32 cmd_and_length;
5172 + u8 status; /* Descriptor status */
5173 + u8 hdr_len; /* Header length */
5174 + __le16 mss; /* Maximum segment size */
5179 +/* Offload data descriptor */
5180 +struct e1000_data_desc {
5181 + __le64 buffer_addr; /* Address of the descriptor's buffer address */
5185 + __le16 length; /* Data buffer length */
5193 + u8 status; /* Descriptor status */
5194 + u8 popts; /* Packet Options */
5200 +/* Statistics counters collected by the MAC */
5201 +struct e1000_hw_stats {
5281 +struct e1000_phy_stats {
5283 + u32 receive_errors;
5286 +struct e1000_host_mng_dhcp_cookie {
5297 +/* Host Interface "Rev 1" */
5298 +struct e1000_host_command_header {
5300 + u8 command_length;
5301 + u8 command_options;
5305 +#define E1000_HI_MAX_DATA_LENGTH 252
5306 +struct e1000_host_command_info {
5307 + struct e1000_host_command_header command_header;
5308 + u8 command_data[E1000_HI_MAX_DATA_LENGTH];
5311 +/* Host Interface "Rev 2" */
5312 +struct e1000_host_mng_command_header {
5317 + u16 command_length;
5320 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
5321 +struct e1000_host_mng_command_info {
5322 + struct e1000_host_mng_command_header command_header;
5323 + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
5326 +#include "e1000_mac.h"
5327 +#include "e1000_phy.h"
5328 +#include "e1000_nvm.h"
5329 +#include "e1000_manage.h"
5330 +#include "e1000_mbx.h"
5332 +struct e1000_mac_operations {
5333 + /* Function pointers for the MAC. */
5334 + s32 (*init_params)(struct e1000_hw *);
5335 + s32 (*id_led_init)(struct e1000_hw *);
5336 + s32 (*blink_led)(struct e1000_hw *);
5337 + s32 (*check_for_link)(struct e1000_hw *);
5338 + bool (*check_mng_mode)(struct e1000_hw *hw);
5339 + s32 (*cleanup_led)(struct e1000_hw *);
5340 + void (*clear_hw_cntrs)(struct e1000_hw *);
5341 + void (*clear_vfta)(struct e1000_hw *);
5342 + s32 (*get_bus_info)(struct e1000_hw *);
5343 + void (*set_lan_id)(struct e1000_hw *);
5344 + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
5345 + s32 (*led_on)(struct e1000_hw *);
5346 + s32 (*led_off)(struct e1000_hw *);
5347 + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
5348 + s32 (*reset_hw)(struct e1000_hw *);
5349 + s32 (*init_hw)(struct e1000_hw *);
5350 + void (*shutdown_serdes)(struct e1000_hw *);
5351 + s32 (*setup_link)(struct e1000_hw *);
5352 + s32 (*setup_physical_interface)(struct e1000_hw *);
5353 + s32 (*setup_led)(struct e1000_hw *);
5354 + void (*write_vfta)(struct e1000_hw *, u32, u32);
5355 + void (*mta_set)(struct e1000_hw *, u32);
5356 + void (*config_collision_dist)(struct e1000_hw *);
5357 + void (*rar_set)(struct e1000_hw *, u8*, u32);
5358 + s32 (*read_mac_addr)(struct e1000_hw *);
5359 + s32 (*validate_mdi_setting)(struct e1000_hw *);
5360 + s32 (*mng_host_if_write)(struct e1000_hw *, u8*, u16, u16, u8*);
5361 + s32 (*mng_write_cmd_header)(struct e1000_hw *hw,
5362 + struct e1000_host_mng_command_header*);
5363 + s32 (*mng_enable_host_if)(struct e1000_hw *);
5364 + s32 (*wait_autoneg)(struct e1000_hw *);
5367 +struct e1000_phy_operations {
5368 + s32 (*init_params)(struct e1000_hw *);
5369 + s32 (*acquire)(struct e1000_hw *);
5370 + s32 (*check_polarity)(struct e1000_hw *);
5371 + s32 (*check_reset_block)(struct e1000_hw *);
5372 + s32 (*commit)(struct e1000_hw *);
5373 + s32 (*force_speed_duplex)(struct e1000_hw *);
5374 + s32 (*get_cfg_done)(struct e1000_hw *hw);
5375 + s32 (*get_cable_length)(struct e1000_hw *);
5376 + s32 (*get_info)(struct e1000_hw *);
5377 + s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
5378 + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
5379 + void (*release)(struct e1000_hw *);
5380 + s32 (*reset)(struct e1000_hw *);
5381 + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
5382 + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
5383 + s32 (*write_reg)(struct e1000_hw *, u32, u16);
5384 + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
5385 + void (*power_up)(struct e1000_hw *);
5386 + void (*power_down)(struct e1000_hw *);
5389 +struct e1000_nvm_operations {
5390 + s32 (*init_params)(struct e1000_hw *);
5391 + s32 (*acquire)(struct e1000_hw *);
5392 + s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
5393 + void (*release)(struct e1000_hw *);
5394 + void (*reload)(struct e1000_hw *);
5395 + s32 (*update)(struct e1000_hw *);
5396 + s32 (*valid_led_default)(struct e1000_hw *, u16 *);
5397 + s32 (*validate)(struct e1000_hw *);
5398 + s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
5401 +struct e1000_mac_info {
5402 + struct e1000_mac_operations ops;
5406 + enum e1000_mac_type type;
5408 + u32 collision_delta;
5409 + u32 ledctl_default;
5412 + u32 mc_filter_type;
5413 + u32 tx_packet_delta;
5416 + u16 current_ifs_val;
5420 + u16 ifs_step_size;
5421 + u16 mta_reg_count;
5422 + u16 uta_reg_count;
5424 + /* Maximum size of the MTA register table in all supported adapters */
5425 + #define MAX_MTA_REG 128
5426 + u32 mta_shadow[MAX_MTA_REG];
5427 + u16 rar_entry_count;
5429 + u8 forced_speed_duplex;
5431 + bool adaptive_ifs;
5432 + bool arc_subsystem_valid;
5433 + bool asf_firmware_present;
5435 + bool autoneg_failed;
5436 + bool get_link_status;
5438 + enum e1000_serdes_link_state serdes_link_state;
5439 + bool serdes_has_link;
5440 + bool tx_pkt_filtering;
5443 +struct e1000_phy_info {
5444 + struct e1000_phy_operations ops;
5445 + enum e1000_phy_type type;
5447 + enum e1000_1000t_rx_status local_rx;
5448 + enum e1000_1000t_rx_status remote_rx;
5449 + enum e1000_ms_type ms_type;
5450 + enum e1000_ms_type original_ms_type;
5451 + enum e1000_rev_polarity cable_polarity;
5452 + enum e1000_smart_speed smart_speed;
5456 + u32 reset_delay_us; /* in usec */
5459 + enum e1000_media_type media_type;
5461 + u16 autoneg_advertised;
5464 + u16 max_cable_length;
5465 + u16 min_cable_length;
5469 + bool disable_polarity_correction;
5471 + bool polarity_correction;
5472 + bool reset_disable;
5473 + bool speed_downgraded;
5474 + bool autoneg_wait_to_complete;
5477 +struct e1000_nvm_info {
5478 + struct e1000_nvm_operations ops;
5479 + enum e1000_nvm_type type;
5480 + enum e1000_nvm_override override;
5482 + u32 flash_bank_size;
5483 + u32 flash_base_addr;
5492 +struct e1000_bus_info {
5493 + enum e1000_bus_type type;
5494 + enum e1000_bus_speed speed;
5495 + enum e1000_bus_width width;
5501 +struct e1000_fc_info {
5502 + u32 high_water; /* Flow control high-water mark */
5503 + u32 low_water; /* Flow control low-water mark */
5504 + u16 pause_time; /* Flow control pause timer */
5505 + bool send_xon; /* Flow control send XON */
5506 + bool strict_ieee; /* Strict IEEE mode */
5507 + enum e1000_fc_mode current_mode; /* FC mode in effect */
5508 + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
5511 +struct e1000_mbx_operations {
5512 + s32 (*init_params)(struct e1000_hw *hw);
5513 + s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
5514 + s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
5515 + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
5516 + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
5517 + s32 (*check_for_msg)(struct e1000_hw *, u16);
5518 + s32 (*check_for_ack)(struct e1000_hw *, u16);
5519 + s32 (*check_for_rst)(struct e1000_hw *, u16);
5522 +struct e1000_mbx_stats {
5531 +struct e1000_mbx_info {
5532 + struct e1000_mbx_operations ops;
5533 + struct e1000_mbx_stats stats;
5539 +struct e1000_dev_spec_82575 {
5540 + bool sgmii_active;
5541 + bool global_device_reset;
5544 +struct e1000_dev_spec_vf {
5553 + u8 __iomem *hw_addr;
5554 + u8 __iomem *flash_address;
5555 + unsigned long io_base;
5557 + struct e1000_mac_info mac;
5558 + struct e1000_fc_info fc;
5559 + struct e1000_phy_info phy;
5560 + struct e1000_nvm_info nvm;
5561 + struct e1000_bus_info bus;
5562 + struct e1000_mbx_info mbx;
5563 + struct e1000_host_mng_dhcp_cookie mng_cookie;
5566 + struct e1000_dev_spec_82575 _82575;
5567 + struct e1000_dev_spec_vf vf;
5571 + u16 subsystem_vendor_id;
5572 + u16 subsystem_device_id;
5578 +#include "e1000_82575.h"
5580 +/* These functions must be implemented by drivers */
5581 +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
5582 +s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
5585 Index: linux-2.6.22/drivers/net/igb/e1000_mac.c
5586 ===================================================================
5587 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
5588 +++ linux-2.6.22/drivers/net/igb/e1000_mac.c 2009-12-18 12:39:22.000000000 -0500
5590 +/*******************************************************************************
5592 + Intel(R) Gigabit Ethernet Linux driver
5593 + Copyright(c) 2007-2009 Intel Corporation.
5595 + This program is free software; you can redistribute it and/or modify it
5596 + under the terms and conditions of the GNU General Public License,
5597 + version 2, as published by the Free Software Foundation.
5599 + This program is distributed in the hope it will be useful, but WITHOUT
5600 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5601 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5604 + You should have received a copy of the GNU General Public License along with
5605 + this program; if not, write to the Free Software Foundation, Inc.,
5606 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5608 + The full GNU General Public License is included in this distribution in
5609 + the file called "COPYING".
5611 + Contact Information:
5612 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
5613 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
5615 +*******************************************************************************/
5617 +#include "e1000_api.h"
5619 +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw);
5620 +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw);
5621 +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw);
5622 +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
5623 +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
5626 + * e1000_init_mac_ops_generic - Initialize MAC function pointers
5627 + * @hw: pointer to the HW structure
5629 + * Setups up the function pointers to no-op functions
5631 +void e1000_init_mac_ops_generic(struct e1000_hw *hw)
5633 + struct e1000_mac_info *mac = &hw->mac;
5634 + DEBUGFUNC("e1000_init_mac_ops_generic");
5636 + /* General Setup */
5637 + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
5638 + mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
5639 + mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
5641 + mac->ops.wait_autoneg = e1000_wait_autoneg_generic;
5643 + mac->ops.mng_host_if_write = e1000_mng_host_if_write_generic;
5644 + mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header_generic;
5645 + mac->ops.mng_enable_host_if = e1000_mng_enable_host_if_generic;
5646 + /* VLAN, MC, etc. */
5647 + mac->ops.rar_set = e1000_rar_set_generic;
5648 + mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
5652 + * e1000_get_bus_info_pcie_generic - Get PCIe bus information
5653 + * @hw: pointer to the HW structure
5655 + * Determines and stores the system bus information for a particular
5656 + * network interface. The following bus information is determined and stored:
5657 + * bus speed, bus width, type (PCIe), and PCIe function.
5659 +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
5661 + struct e1000_mac_info *mac = &hw->mac;
5662 + struct e1000_bus_info *bus = &hw->bus;
5665 + u16 pcie_link_status;
5667 + DEBUGFUNC("e1000_get_bus_info_pcie_generic");
5669 + bus->type = e1000_bus_type_pci_express;
5670 + bus->speed = e1000_bus_speed_2500;
5672 + ret_val = e1000_read_pcie_cap_reg(hw,
5674 + &pcie_link_status);
5676 + bus->width = e1000_bus_width_unknown;
5678 + bus->width = (enum e1000_bus_width)((pcie_link_status &
5679 + PCIE_LINK_WIDTH_MASK) >>
5680 + PCIE_LINK_WIDTH_SHIFT);
5682 + mac->ops.set_lan_id(hw);
5684 + return E1000_SUCCESS;
5688 + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
5690 + * @hw: pointer to the HW structure
5692 + * Determines the LAN function id by reading memory-mapped registers
5693 + * and swaps the port value if requested.
5695 +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
5697 + struct e1000_bus_info *bus = &hw->bus;
5701 + * The status register reports the correct function number
5702 + * for the device regardless of function swap state.
5704 + reg = E1000_READ_REG(hw, E1000_STATUS);
5705 + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
5709 + * e1000_set_lan_id_single_port - Set LAN id for a single port device
5710 + * @hw: pointer to the HW structure
5712 + * Sets the LAN function id to zero for a single port device.
5714 +void e1000_set_lan_id_single_port(struct e1000_hw *hw)
5716 + struct e1000_bus_info *bus = &hw->bus;
5722 + * e1000_clear_vfta_generic - Clear VLAN filter table
5723 + * @hw: pointer to the HW structure
5725 + * Clears the register array which contains the VLAN filter table by
5726 + * setting all the values to 0.
5728 +void e1000_clear_vfta_generic(struct e1000_hw *hw)
5732 + DEBUGFUNC("e1000_clear_vfta_generic");
5734 + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
5735 + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
5736 + E1000_WRITE_FLUSH(hw);
5741 + * e1000_write_vfta_generic - Write value to VLAN filter table
5742 + * @hw: pointer to the HW structure
5743 + * @offset: register offset in VLAN filter table
5744 + * @value: register value written to VLAN filter table
5746 + * Writes value at the given offset in the register array which stores
5747 + * the VLAN filter table.
5749 +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
5751 + DEBUGFUNC("e1000_write_vfta_generic");
5753 + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
5754 + E1000_WRITE_FLUSH(hw);
5758 + * e1000_init_rx_addrs_generic - Initialize receive address's
5759 + * @hw: pointer to the HW structure
5760 + * @rar_count: receive address registers
5762 + * Setups the receive address registers by setting the base receive address
5763 + * register to the devices MAC address and clearing all the other receive
5764 + * address registers to 0.
5766 +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
5769 + u8 mac_addr[ETH_ADDR_LEN] = {0};
5771 + DEBUGFUNC("e1000_init_rx_addrs_generic");
5773 + /* Setup the receive address */
5774 + DEBUGOUT("Programming MAC Address into RAR[0]\n");
5776 + hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
5778 + /* Zero out the other (rar_entry_count - 1) receive addresses */
5779 + DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
5780 + for (i = 1; i < rar_count; i++)
5781 + hw->mac.ops.rar_set(hw, mac_addr, i);
5785 + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
5786 + * @hw: pointer to the HW structure
5788 + * Checks the nvm for an alternate MAC address. An alternate MAC address
5789 + * can be setup by pre-boot software and must be treated like a permanent
5790 + * address and must override the actual permanent MAC address. If an
5791 + * alternate MAC address is found it is programmed into RAR0, replacing
5792 + * the permanent address that was installed into RAR0 by the Si on reset.
5793 + * This function will return SUCCESS unless it encounters an error while
5794 + * reading the EEPROM.
5796 +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
5799 + s32 ret_val = E1000_SUCCESS;
5800 + u16 offset, nvm_alt_mac_addr_offset, nvm_data;
5801 + u8 alt_mac_addr[ETH_ADDR_LEN];
5803 + DEBUGFUNC("e1000_check_alt_mac_addr_generic");
5805 + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
5806 + &nvm_alt_mac_addr_offset);
5808 + DEBUGOUT("NVM Read Error\n");
5812 + if (nvm_alt_mac_addr_offset == 0xFFFF) {
5813 + /* There is no Alternate MAC Address */
5817 + if (hw->bus.func == E1000_FUNC_1)
5818 + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
5819 + for (i = 0; i < ETH_ADDR_LEN; i += 2) {
5820 + offset = nvm_alt_mac_addr_offset + (i >> 1);
5821 + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
5823 + DEBUGOUT("NVM Read Error\n");
5827 + alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
5828 + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
5831 + /* if multicast bit is set, the alternate address will not be used */
5832 + if (alt_mac_addr[0] & 0x01) {
5833 + DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
5838 + * We have a valid alternate MAC address, and we want to treat it the
5839 + * same as the normal permanent MAC address stored by the HW into the
5840 + * RAR. Do this by mapping this address into RAR0.
5842 + hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
5849 + * e1000_rar_set_generic - Set receive address register
5850 + * @hw: pointer to the HW structure
5851 + * @addr: pointer to the receive address
5852 + * @index: receive address array register
5854 + * Sets the receive address array register at index to the address passed
5857 +void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
5859 + u32 rar_low, rar_high;
5861 + DEBUGFUNC("e1000_rar_set_generic");
5864 + * HW expects these in little endian so we reverse the byte order
5865 + * from network order (big endian) to little endian
5867 + rar_low = ((u32) addr[0] |
5868 + ((u32) addr[1] << 8) |
5869 + ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5871 + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5873 + /* If MAC address zero, no need to set the AV bit */
5874 + if (rar_low || rar_high)
5875 + rar_high |= E1000_RAH_AV;
5878 + * Some bridges will combine consecutive 32-bit writes into
5879 + * a single burst write, which will malfunction on some parts.
5880 + * The flushes avoid this.
5882 + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
5883 + E1000_WRITE_FLUSH(hw);
5884 + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
5885 + E1000_WRITE_FLUSH(hw);
5889 + * e1000_mta_set_generic - Set multicast filter table address
5890 + * @hw: pointer to the HW structure
5891 + * @hash_value: determines the MTA register and bit to set
5893 + * The multicast table address is a register array of 32-bit registers.
5894 + * The hash_value is used to determine what register the bit is in, the
5895 + * current value is read, the new bit is OR'd in and the new value is
5896 + * written back into the register.
5898 +void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value)
5900 + u32 hash_bit, hash_reg, mta;
5902 + DEBUGFUNC("e1000_mta_set_generic");
5904 + * The MTA is a register array of 32-bit registers. It is
5905 + * treated like an array of (32*mta_reg_count) bits. We want to
5906 + * set bit BitArray[hash_value]. So we figure out what register
5907 + * the bit is in, read it, OR in the new bit, then write
5908 + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
5909 + * mask to bits 31:5 of the hash value which gives us the
5910 + * register we're modifying. The hash bit within that register
5911 + * is determined by the lower 5 bits of the hash value.
5913 + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
5914 + hash_bit = hash_value & 0x1F;
5916 + mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
5918 + mta |= (1 << hash_bit);
5920 + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
5921 + E1000_WRITE_FLUSH(hw);
5925 + * e1000_update_mc_addr_list_generic - Update Multicast addresses
5926 + * @hw: pointer to the HW structure
5927 + * @mc_addr_list: array of multicast addresses to program
5928 + * @mc_addr_count: number of multicast addresses to program
5930 + * Updates entire Multicast Table Array.
5931 + * The caller must have a packed mc_addr_list of multicast addresses.
5933 +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
5934 + u8 *mc_addr_list, u32 mc_addr_count)
5936 + u32 hash_value, hash_bit, hash_reg;
5939 + DEBUGFUNC("e1000_update_mc_addr_list_generic");
5941 + /* clear mta_shadow */
5942 + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
5944 + /* update mta_shadow from mc_addr_list */
5945 + for (i = 0; (u32) i < mc_addr_count; i++) {
5946 + hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
5948 + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
5949 + hash_bit = hash_value & 0x1F;
5951 + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
5952 + mc_addr_list += (ETH_ADDR_LEN);
5955 + /* replace the entire MTA table */
5956 + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
5957 + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
5958 + E1000_WRITE_FLUSH(hw);
5962 + * e1000_hash_mc_addr_generic - Generate a multicast hash value
5963 + * @hw: pointer to the HW structure
5964 + * @mc_addr: pointer to a multicast address
5966 + * Generates a multicast address hash value which is used to determine
5967 + * the multicast filter table array address and new table value. See
5968 + * e1000_mta_set_generic()
5970 +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
5972 + u32 hash_value, hash_mask;
5975 + DEBUGFUNC("e1000_hash_mc_addr_generic");
5977 + /* Register count multiplied by bits per register */
5978 + hash_mask = (hw->mac.mta_reg_count * 32) - 1;
5981 + * For a mc_filter_type of 0, bit_shift is the number of left-shifts
5982 + * where 0xFF would still fall within the hash mask.
5984 + while (hash_mask >> bit_shift != 0xFF)
5988 + * The portion of the address that is used for the hash table
5989 + * is determined by the mc_filter_type setting.
5990 + * The algorithm is such that there is a total of 8 bits of shifting.
5991 + * The bit_shift for a mc_filter_type of 0 represents the number of
5992 + * left-shifts where the MSB of mc_addr[5] would still fall within
5993 + * the hash_mask. Case 0 does this exactly. Since there are a total
5994 + * of 8 bits of shifting, then mc_addr[4] will shift right the
5995 + * remaining number of bits. Thus 8 - bit_shift. The rest of the
5996 + * cases are a variation of this algorithm...essentially raising the
5997 + * number of bits to shift mc_addr[5] left, while still keeping the
5998 + * 8-bit shifting total.
6000 + * For example, given the following Destination MAC Address and an
6001 + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
6002 + * we can see that the bit_shift for case 0 is 4. These are the hash
6003 + * values resulting from each mc_filter_type...
6004 + * [0] [1] [2] [3] [4] [5]
6005 + * 01 AA 00 12 34 56
6008 + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
6009 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
6010 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
6011 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
6013 + switch (hw->mac.mc_filter_type) {
6028 + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
6029 + (((u16) mc_addr[5]) << bit_shift)));
6031 + return hash_value;
6035 + * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
6036 + * @hw: pointer to the HW structure
6038 + * Clears the base hardware counters by reading the counter registers.
6040 +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
6042 + DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
6044 + E1000_READ_REG(hw, E1000_CRCERRS);
6045 + E1000_READ_REG(hw, E1000_SYMERRS);
6046 + E1000_READ_REG(hw, E1000_MPC);
6047 + E1000_READ_REG(hw, E1000_SCC);
6048 + E1000_READ_REG(hw, E1000_ECOL);
6049 + E1000_READ_REG(hw, E1000_MCC);
6050 + E1000_READ_REG(hw, E1000_LATECOL);
6051 + E1000_READ_REG(hw, E1000_COLC);
6052 + E1000_READ_REG(hw, E1000_DC);
6053 + E1000_READ_REG(hw, E1000_SEC);
6054 + E1000_READ_REG(hw, E1000_RLEC);
6055 + E1000_READ_REG(hw, E1000_XONRXC);
6056 + E1000_READ_REG(hw, E1000_XONTXC);
6057 + E1000_READ_REG(hw, E1000_XOFFRXC);
6058 + E1000_READ_REG(hw, E1000_XOFFTXC);
6059 + E1000_READ_REG(hw, E1000_FCRUC);
6060 + E1000_READ_REG(hw, E1000_GPRC);
6061 + E1000_READ_REG(hw, E1000_BPRC);
6062 + E1000_READ_REG(hw, E1000_MPRC);
6063 + E1000_READ_REG(hw, E1000_GPTC);
6064 + E1000_READ_REG(hw, E1000_GORCL);
6065 + E1000_READ_REG(hw, E1000_GORCH);
6066 + E1000_READ_REG(hw, E1000_GOTCL);
6067 + E1000_READ_REG(hw, E1000_GOTCH);
6068 + E1000_READ_REG(hw, E1000_RNBC);
6069 + E1000_READ_REG(hw, E1000_RUC);
6070 + E1000_READ_REG(hw, E1000_RFC);
6071 + E1000_READ_REG(hw, E1000_ROC);
6072 + E1000_READ_REG(hw, E1000_RJC);
6073 + E1000_READ_REG(hw, E1000_TORL);
6074 + E1000_READ_REG(hw, E1000_TORH);
6075 + E1000_READ_REG(hw, E1000_TOTL);
6076 + E1000_READ_REG(hw, E1000_TOTH);
6077 + E1000_READ_REG(hw, E1000_TPR);
6078 + E1000_READ_REG(hw, E1000_TPT);
6079 + E1000_READ_REG(hw, E1000_MPTC);
6080 + E1000_READ_REG(hw, E1000_BPTC);
6084 + * e1000_check_for_copper_link_generic - Check for link (Copper)
6085 + * @hw: pointer to the HW structure
6087 + * Checks to see of the link status of the hardware has changed. If a
6088 + * change in link status has been detected, then we read the PHY registers
6089 + * to get the current speed/duplex if link exists.
6091 +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
6093 + struct e1000_mac_info *mac = &hw->mac;
6097 + DEBUGFUNC("e1000_check_for_copper_link");
6100 + * We only want to go out to the PHY registers to see if Auto-Neg
6101 + * has completed and/or if our link status has changed. The
6102 + * get_link_status flag is set upon receiving a Link Status
6103 + * Change or Rx Sequence Error interrupt.
6105 + if (!mac->get_link_status) {
6106 + ret_val = E1000_SUCCESS;
6111 + * First we want to see if the MII Status Register reports
6112 + * link. If so, then we want to get the current speed/duplex
6115 + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
6120 + goto out; /* No link detected */
6122 + mac->get_link_status = false;
6125 + * Check if there was DownShift, must be checked
6126 + * immediately after link-up
6128 + e1000_check_downshift_generic(hw);
6131 + * If we are forcing speed/duplex, then we simply return since
6132 + * we have already determined whether we have link or not.
6134 + if (!mac->autoneg) {
6135 + ret_val = -E1000_ERR_CONFIG;
6140 + * Auto-Neg is enabled. Auto Speed Detection takes care
6141 + * of MAC speed/duplex configuration. So we only need to
6142 + * configure Collision Distance in the MAC.
6144 + e1000_config_collision_dist_generic(hw);
6147 + * Configure Flow Control now that Auto-Neg has completed.
6148 + * First, we need to restore the desired flow control
6149 + * settings because we may have had to re-autoneg with a
6150 + * different link partner.
6152 + ret_val = e1000_config_fc_after_link_up_generic(hw);
6154 + DEBUGOUT("Error configuring flow control\n");
6161 + * e1000_check_for_fiber_link_generic - Check for link (Fiber)
6162 + * @hw: pointer to the HW structure
6164 + * Checks for link up on the hardware. If link is not up and we have
6165 + * a signal, then we need to force link up.
6167 +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
6169 + struct e1000_mac_info *mac = &hw->mac;
6173 + s32 ret_val = E1000_SUCCESS;
6175 + DEBUGFUNC("e1000_check_for_fiber_link_generic");
6177 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
6178 + status = E1000_READ_REG(hw, E1000_STATUS);
6179 + rxcw = E1000_READ_REG(hw, E1000_RXCW);
6182 + * If we don't have link (auto-negotiation failed or link partner
6183 + * cannot auto-negotiate), the cable is plugged in (we have signal),
6184 + * and our link partner is not trying to auto-negotiate with us (we
6185 + * are receiving idles or data), we need to force link up. We also
6186 + * need to give auto-negotiation time to complete, in case the cable
6187 + * was just plugged in. The autoneg_failed flag does this.
6189 + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
6190 + if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
6191 + (!(rxcw & E1000_RXCW_C))) {
6192 + if (mac->autoneg_failed == 0) {
6193 + mac->autoneg_failed = 1;
6196 + DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
6198 + /* Disable auto-negotiation in the TXCW register */
6199 + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
6201 + /* Force link-up and also force full-duplex. */
6202 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
6203 + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
6204 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
6206 + /* Configure Flow Control after forcing link up. */
6207 + ret_val = e1000_config_fc_after_link_up_generic(hw);
6209 + DEBUGOUT("Error configuring flow control\n");
6212 + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
6214 + * If we are forcing link and we are receiving /C/ ordered
6215 + * sets, re-enable auto-negotiation in the TXCW register
6216 + * and disable forced link in the Device Control register
6217 + * in an attempt to auto-negotiate with our link partner.
6219 + DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
6220 + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
6221 + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
6223 + mac->serdes_has_link = true;
6231 + * e1000_check_for_serdes_link_generic - Check for link (Serdes)
6232 + * @hw: pointer to the HW structure
6234 + * Checks for link up on the hardware. If link is not up and we have
6235 + * a signal, then we need to force link up.
6237 +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
6239 + struct e1000_mac_info *mac = &hw->mac;
6243 + s32 ret_val = E1000_SUCCESS;
6245 + DEBUGFUNC("e1000_check_for_serdes_link_generic");
6247 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
6248 + status = E1000_READ_REG(hw, E1000_STATUS);
6249 + rxcw = E1000_READ_REG(hw, E1000_RXCW);
6252 + * If we don't have link (auto-negotiation failed or link partner
6253 + * cannot auto-negotiate), and our link partner is not trying to
6254 + * auto-negotiate with us (we are receiving idles or data),
6255 + * we need to force link up. We also need to give auto-negotiation
6256 + * time to complete.
6258 + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
6259 + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
6260 + if (mac->autoneg_failed == 0) {
6261 + mac->autoneg_failed = 1;
6264 + DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
6266 + /* Disable auto-negotiation in the TXCW register */
6267 + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
6269 + /* Force link-up and also force full-duplex. */
6270 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
6271 + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
6272 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
6274 + /* Configure Flow Control after forcing link up. */
6275 + ret_val = e1000_config_fc_after_link_up_generic(hw);
6277 + DEBUGOUT("Error configuring flow control\n");
6280 + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
6282 + * If we are forcing link and we are receiving /C/ ordered
6283 + * sets, re-enable auto-negotiation in the TXCW register
6284 + * and disable forced link in the Device Control register
6285 + * in an attempt to auto-negotiate with our link partner.
6287 + DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
6288 + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
6289 + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
6291 + mac->serdes_has_link = true;
6292 + } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
6294 + * If we force link for non-auto-negotiation switch, check
6295 + * link status based on MAC synchronization for internal
6296 + * serdes media type.
6298 + /* SYNCH bit and IV bit are sticky. */
6300 + rxcw = E1000_READ_REG(hw, E1000_RXCW);
6301 + if (rxcw & E1000_RXCW_SYNCH) {
6302 + if (!(rxcw & E1000_RXCW_IV)) {
6303 + mac->serdes_has_link = true;
6304 + DEBUGOUT("SERDES: Link up - forced.\n");
6307 + mac->serdes_has_link = false;
6308 + DEBUGOUT("SERDES: Link down - force failed.\n");
6312 + if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
6313 + status = E1000_READ_REG(hw, E1000_STATUS);
6314 + if (status & E1000_STATUS_LU) {
6315 + /* SYNCH bit and IV bit are sticky, so reread rxcw. */
6317 + rxcw = E1000_READ_REG(hw, E1000_RXCW);
6318 + if (rxcw & E1000_RXCW_SYNCH) {
6319 + if (!(rxcw & E1000_RXCW_IV)) {
6320 + mac->serdes_has_link = true;
6321 + DEBUGOUT("SERDES: Link up - autoneg "
6322 + "completed sucessfully.\n");
6324 + mac->serdes_has_link = false;
6325 + DEBUGOUT("SERDES: Link down - invalid"
6326 + "codewords detected in autoneg.\n");
6329 + mac->serdes_has_link = false;
6330 + DEBUGOUT("SERDES: Link down - no sync.\n");
6333 + mac->serdes_has_link = false;
6334 + DEBUGOUT("SERDES: Link down - autoneg failed\n");
6343 + * e1000_setup_link_generic - Setup flow control and link settings
6344 + * @hw: pointer to the HW structure
6346 + * Determines which flow control settings to use, then configures flow
6347 + * control. Calls the appropriate media-specific link configuration
6348 + * function. Assuming the adapter has a valid link partner, a valid link
6349 + * should be established. Assumes the hardware has previously been reset
6350 + * and the transmitter and receiver are not enabled.
6352 +s32 e1000_setup_link_generic(struct e1000_hw *hw)
6354 + s32 ret_val = E1000_SUCCESS;
6356 + DEBUGFUNC("e1000_setup_link_generic");
6359 + * In the case of the phy reset being blocked, we already have a link.
6360 + * We do not need to set it up again.
6362 + if (hw->phy.ops.check_reset_block)
6363 + if (hw->phy.ops.check_reset_block(hw))
6367 + * If requested flow control is set to default, set flow control
6368 + * based on the EEPROM flow control settings.
6370 + if (hw->fc.requested_mode == e1000_fc_default) {
6371 + ret_val = e1000_set_default_fc_generic(hw);
6377 + * Save off the requested flow control mode for use later. Depending
6378 + * on the link partner's capabilities, we may or may not use this mode.
6380 + hw->fc.current_mode = hw->fc.requested_mode;
6382 + DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
6383 + hw->fc.current_mode);
6385 + /* Call the necessary media_type subroutine to configure the link. */
6386 + ret_val = hw->mac.ops.setup_physical_interface(hw);
6391 + * Initialize the flow control address, type, and PAUSE timer
6392 + * registers to their default values. This is done even if flow
6393 + * control is disabled, because it does not hurt anything to
6394 + * initialize these registers.
6396 + DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
6397 + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
6398 + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
6399 + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
6401 + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
6403 + ret_val = e1000_set_fc_watermarks_generic(hw);
6410 + * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
6411 + * @hw: pointer to the HW structure
6413 + * Configures collision distance and flow control for fiber and serdes
6414 + * links. Upon successful setup, poll for link.
6416 +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
6419 + s32 ret_val = E1000_SUCCESS;
6421 + DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
6423 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
6425 + /* Take the link out of reset */
6426 + ctrl &= ~E1000_CTRL_LRST;
6428 + e1000_config_collision_dist_generic(hw);
6430 + ret_val = e1000_commit_fc_settings_generic(hw);
6435 + * Since auto-negotiation is enabled, take the link out of reset (the
6436 + * link will be in reset, because we previously reset the chip). This
6437 + * will restart auto-negotiation. If auto-negotiation is successful
6438 + * then the link-up status bit will be set and the flow control enable
6439 + * bits (RFCE and TFCE) will be set according to their negotiated value.
6441 + DEBUGOUT("Auto-negotiation enabled\n");
6443 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
6444 + E1000_WRITE_FLUSH(hw);
6448 + * For these adapters, the SW definable pin 1 is set when the optics
6449 + * detect a signal. If we have a signal, then poll for a "Link-Up"
6452 + if (hw->phy.media_type == e1000_media_type_internal_serdes ||
6453 + (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
6454 + ret_val = e1000_poll_fiber_serdes_link_generic(hw);
6456 + DEBUGOUT("No signal detected\n");
6464 + * e1000_config_collision_dist_generic - Configure collision distance
6465 + * @hw: pointer to the HW structure
6467 + * Configures the collision distance to the default value and is used
6468 + * during link setup. Currently no func pointer exists and all
6469 + * implementations are handled in the generic version of this function.
6471 +void e1000_config_collision_dist_generic(struct e1000_hw *hw)
6475 + DEBUGFUNC("e1000_config_collision_dist_generic");
6477 + tctl = E1000_READ_REG(hw, E1000_TCTL);
6479 + tctl &= ~E1000_TCTL_COLD;
6480 + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
6482 + E1000_WRITE_REG(hw, E1000_TCTL, tctl);
6483 + E1000_WRITE_FLUSH(hw);
6487 + * e1000_poll_fiber_serdes_link_generic - Poll for link up
6488 + * @hw: pointer to the HW structure
6490 + * Polls for link up by reading the status register, if link fails to come
6491 + * up with auto-negotiation, then the link is forced if a signal is detected.
6493 +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
6495 + struct e1000_mac_info *mac = &hw->mac;
6497 + s32 ret_val = E1000_SUCCESS;
6499 + DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
6502 + * If we have a signal (the cable is plugged in, or assumed true for
6503 + * serdes media) then poll for a "Link-Up" indication in the Device
6504 + * Status Register. Time-out if a link isn't seen in 500 milliseconds
6505 + * seconds (Auto-negotiation should complete in less than 500
6506 + * milliseconds even if the other end is doing it in SW).
6508 + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
6510 + status = E1000_READ_REG(hw, E1000_STATUS);
6511 + if (status & E1000_STATUS_LU)
6514 + if (i == FIBER_LINK_UP_LIMIT) {
6515 + DEBUGOUT("Never got a valid link from auto-neg!!!\n");
6516 + mac->autoneg_failed = 1;
6518 + * AutoNeg failed to achieve a link, so we'll call
6519 + * mac->check_for_link. This routine will force the
6520 + * link up if we detect a signal. This will allow us to
6521 + * communicate with non-autonegotiating link partners.
6523 + ret_val = hw->mac.ops.check_for_link(hw);
6525 + DEBUGOUT("Error while checking for link\n");
6528 + mac->autoneg_failed = 0;
6530 + mac->autoneg_failed = 0;
6531 + DEBUGOUT("Valid Link Found\n");
6539 + * e1000_commit_fc_settings_generic - Configure flow control
6540 + * @hw: pointer to the HW structure
6542 + * Write the flow control settings to the Transmit Config Word Register (TXCW)
6543 + * base on the flow control settings in e1000_mac_info.
6545 +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
6547 + struct e1000_mac_info *mac = &hw->mac;
6549 + s32 ret_val = E1000_SUCCESS;
6551 + DEBUGFUNC("e1000_commit_fc_settings_generic");
6554 + * Check for a software override of the flow control settings, and
6555 + * setup the device accordingly. If auto-negotiation is enabled, then
6556 + * software will have to set the "PAUSE" bits to the correct value in
6557 + * the Transmit Config Word Register (TXCW) and re-start auto-
6558 + * negotiation. However, if auto-negotiation is disabled, then
6559 + * software will have to manually configure the two flow control enable
6560 + * bits in the CTRL register.
6562 + * The possible values of the "fc" parameter are:
6563 + * 0: Flow control is completely disabled
6564 + * 1: Rx flow control is enabled (we can receive pause frames,
6565 + * but not send pause frames).
6566 + * 2: Tx flow control is enabled (we can send pause frames but we
6567 + * do not support receiving pause frames).
6568 + * 3: Both Rx and Tx flow control (symmetric) are enabled.
6570 + switch (hw->fc.current_mode) {
6571 + case e1000_fc_none:
6572 + /* Flow control completely disabled by a software over-ride. */
6573 + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
6575 + case e1000_fc_rx_pause:
6577 + * Rx Flow control is enabled and Tx Flow control is disabled
6578 + * by a software over-ride. Since there really isn't a way to
6579 + * advertise that we are capable of Rx Pause ONLY, we will
6580 + * advertise that we support both symmetric and asymmetric RX
6581 + * PAUSE. Later, we will disable the adapter's ability to send
6584 + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
6586 + case e1000_fc_tx_pause:
6588 + * Tx Flow control is enabled, and Rx Flow control is disabled,
6589 + * by a software over-ride.
6591 + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
6593 + case e1000_fc_full:
6595 + * Flow control (both Rx and Tx) is enabled by a software
6598 + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
6601 + DEBUGOUT("Flow control param set incorrectly\n");
6602 + ret_val = -E1000_ERR_CONFIG;
6607 + E1000_WRITE_REG(hw, E1000_TXCW, txcw);
6615 + * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
6616 + * @hw: pointer to the HW structure
6618 + * Sets the flow control high/low threshold (watermark) registers. If
6619 + * flow control XON frame transmission is enabled, then set XON frame
6620 + * transmission as well.
6622 +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
6624 + s32 ret_val = E1000_SUCCESS;
6625 + u32 fcrtl = 0, fcrth = 0;
6627 + DEBUGFUNC("e1000_set_fc_watermarks_generic");
6630 + * Set the flow control receive threshold registers. Normally,
6631 + * these registers will be set to a default threshold that may be
6632 + * adjusted later by the driver's runtime code. However, if the
6633 + * ability to transmit pause frames is not enabled, then these
6634 + * registers will be set to 0.
6636 + if (hw->fc.current_mode & e1000_fc_tx_pause) {
6638 + * We need to set up the Receive Threshold high and low water
6639 + * marks as well as (optionally) enabling the transmission of
6642 + fcrtl = hw->fc.low_water;
6643 + if (hw->fc.send_xon)
6644 + fcrtl |= E1000_FCRTL_XONE;
6646 + fcrth = hw->fc.high_water;
6648 + E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
6649 + E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
6655 + * e1000_set_default_fc_generic - Set flow control default values
6656 + * @hw: pointer to the HW structure
6658 + * Read the EEPROM for the default values for flow control and store the
6661 +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
6663 + s32 ret_val = E1000_SUCCESS;
6666 + DEBUGFUNC("e1000_set_default_fc_generic");
6669 + * Read and store word 0x0F of the EEPROM. This word contains bits
6670 + * that determine the hardware's default PAUSE (flow control) mode,
6671 + * a bit that determines whether the HW defaults to enabling or
6672 + * disabling auto-negotiation, and the direction of the
6673 + * SW defined pins. If there is no SW over-ride of the flow
6674 + * control setting, then the variable hw->fc will
6675 + * be initialized based on a value in the EEPROM.
6677 + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
6680 + DEBUGOUT("NVM Read Error\n");
6684 + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
6685 + hw->fc.requested_mode = e1000_fc_none;
6686 + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
6687 + NVM_WORD0F_ASM_DIR)
6688 + hw->fc.requested_mode = e1000_fc_tx_pause;
6690 + hw->fc.requested_mode = e1000_fc_full;
6697 + * e1000_force_mac_fc_generic - Force the MAC's flow control settings
6698 + * @hw: pointer to the HW structure
6700 + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
6701 + * device control register to reflect the adapter settings. TFCE and RFCE
6702 + * need to be explicitly set by software when a copper PHY is used because
6703 + * autonegotiation is managed by the PHY rather than the MAC. Software must
6704 + * also configure these bits when link is forced on a fiber connection.
6706 +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
6709 + s32 ret_val = E1000_SUCCESS;
6711 + DEBUGFUNC("e1000_force_mac_fc_generic");
6713 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
6716 + * Because we didn't get link via the internal auto-negotiation
6717 + * mechanism (we either forced link or we got link via PHY
6718 + * auto-neg), we have to manually enable/disable transmit an
6719 + * receive flow control.
6721 + * The "Case" statement below enables/disable flow control
6722 + * according to the "hw->fc.current_mode" parameter.
6724 + * The possible values of the "fc" parameter are:
6725 + * 0: Flow control is completely disabled
6726 + * 1: Rx flow control is enabled (we can receive pause
6727 + * frames but not send pause frames).
6728 + * 2: Tx flow control is enabled (we can send pause frames
6729 + * frames but we do not receive pause frames).
6730 + * 3: Both Rx and Tx flow control (symmetric) is enabled.
6731 + * other: No other values should be possible at this point.
6733 + DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
6735 + switch (hw->fc.current_mode) {
6736 + case e1000_fc_none:
6737 + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
6739 + case e1000_fc_rx_pause:
6740 + ctrl &= (~E1000_CTRL_TFCE);
6741 + ctrl |= E1000_CTRL_RFCE;
6743 + case e1000_fc_tx_pause:
6744 + ctrl &= (~E1000_CTRL_RFCE);
6745 + ctrl |= E1000_CTRL_TFCE;
6747 + case e1000_fc_full:
6748 + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
6751 + DEBUGOUT("Flow control param set incorrectly\n");
6752 + ret_val = -E1000_ERR_CONFIG;
6756 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
6763 + * e1000_config_fc_after_link_up_generic - Configures flow control after link
6764 + * @hw: pointer to the HW structure
6766 + * Checks the status of auto-negotiation after link up to ensure that the
6767 + * speed and duplex were not forced. If the link needed to be forced, then
6768 + * flow control needs to be forced also. If auto-negotiation is enabled
6769 + * and did not fail, then we configure flow control based on our link
6772 +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
6774 + struct e1000_mac_info *mac = &hw->mac;
6775 + s32 ret_val = E1000_SUCCESS;
6776 + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
6777 + u16 speed, duplex;
6779 + DEBUGFUNC("e1000_config_fc_after_link_up_generic");
6782 + * Check for the case where we have fiber media and auto-neg failed
6783 + * so we had to force link. In this case, we need to force the
6784 + * configuration of the MAC to match the "fc" parameter.
6786 + if (mac->autoneg_failed) {
6787 + if (hw->phy.media_type == e1000_media_type_fiber ||
6788 + hw->phy.media_type == e1000_media_type_internal_serdes)
6789 + ret_val = e1000_force_mac_fc_generic(hw);
6791 + if (hw->phy.media_type == e1000_media_type_copper)
6792 + ret_val = e1000_force_mac_fc_generic(hw);
6796 + DEBUGOUT("Error forcing flow control settings\n");
6801 + * Check for the case where we have copper media and auto-neg is
6802 + * enabled. In this case, we need to check and see if Auto-Neg
6803 + * has completed, and if so, how the PHY and link partner has
6804 + * flow control configured.
6806 + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
6808 + * Read the MII Status Register and check to see if AutoNeg
6809 + * has completed. We read this twice because this reg has
6810 + * some "sticky" (latched) bits.
6812 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
6815 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
6819 + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
6820 + DEBUGOUT("Copper PHY and Auto Neg "
6821 + "has not completed.\n");
6826 + * The AutoNeg process has completed, so we now need to
6827 + * read both the Auto Negotiation Advertisement
6828 + * Register (Address 4) and the Auto_Negotiation Base
6829 + * Page Ability Register (Address 5) to determine how
6830 + * flow control was negotiated.
6832 + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
6833 + &mii_nway_adv_reg);
6836 + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
6837 + &mii_nway_lp_ability_reg);
6842 + * Two bits in the Auto Negotiation Advertisement Register
6843 + * (Address 4) and two bits in the Auto Negotiation Base
6844 + * Page Ability Register (Address 5) determine flow control
6845 + * for both the PHY and the link partner. The following
6846 + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
6847 + * 1999, describes these PAUSE resolution bits and how flow
6848 + * control is determined based upon these settings.
6849 + * NOTE: DC = Don't Care
6851 + * LOCAL DEVICE | LINK PARTNER
6852 + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
6853 + *-------|---------|-------|---------|--------------------
6854 + * 0 | 0 | DC | DC | e1000_fc_none
6855 + * 0 | 1 | 0 | DC | e1000_fc_none
6856 + * 0 | 1 | 1 | 0 | e1000_fc_none
6857 + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
6858 + * 1 | 0 | 0 | DC | e1000_fc_none
6859 + * 1 | DC | 1 | DC | e1000_fc_full
6860 + * 1 | 1 | 0 | 0 | e1000_fc_none
6861 + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
6863 + * Are both PAUSE bits set to 1? If so, this implies
6864 + * Symmetric Flow Control is enabled at both ends. The
6865 + * ASM_DIR bits are irrelevant per the spec.
6867 + * For Symmetric Flow Control:
6869 + * LOCAL DEVICE | LINK PARTNER
6870 + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
6871 + *-------|---------|-------|---------|--------------------
6872 + * 1 | DC | 1 | DC | E1000_fc_full
6875 + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
6876 + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
6878 + * Now we need to check if the user selected Rx ONLY
6879 + * of pause frames. In this case, we had to advertise
6880 + * FULL flow control because we could not advertise RX
6881 + * ONLY. Hence, we must now check to see if we need to
6882 + * turn OFF the TRANSMISSION of PAUSE frames.
6884 + if (hw->fc.requested_mode == e1000_fc_full) {
6885 + hw->fc.current_mode = e1000_fc_full;
6886 + DEBUGOUT("Flow Control = FULL.\r\n");
6888 + hw->fc.current_mode = e1000_fc_rx_pause;
6889 + DEBUGOUT("Flow Control = "
6890 + "RX PAUSE frames only.\r\n");
6894 + * For receiving PAUSE frames ONLY.
6896 + * LOCAL DEVICE | LINK PARTNER
6897 + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
6898 + *-------|---------|-------|---------|--------------------
6899 + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
6901 + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
6902 + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
6903 + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
6904 + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
6905 + hw->fc.current_mode = e1000_fc_tx_pause;
6906 + DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
6909 + * For transmitting PAUSE frames ONLY.
6911 + * LOCAL DEVICE | LINK PARTNER
6912 + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
6913 + *-------|---------|-------|---------|--------------------
6914 + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
6916 + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
6917 + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
6918 + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
6919 + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
6920 + hw->fc.current_mode = e1000_fc_rx_pause;
6921 + DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
6924 + * Per the IEEE spec, at this point flow control
6925 + * should be disabled.
6927 + hw->fc.current_mode = e1000_fc_none;
6928 + DEBUGOUT("Flow Control = NONE.\r\n");
6932 + * Now we need to do one last check... If we auto-
6933 + * negotiated to HALF DUPLEX, flow control should not be
6934 + * enabled per IEEE 802.3 spec.
6936 + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
6938 + DEBUGOUT("Error getting link speed and duplex\n");
6942 + if (duplex == HALF_DUPLEX)
6943 + hw->fc.current_mode = e1000_fc_none;
6946 + * Now we call a subroutine to actually force the MAC
6947 + * controller to use the correct flow control settings.
6949 + ret_val = e1000_force_mac_fc_generic(hw);
6951 + DEBUGOUT("Error forcing flow control settings\n");
6961 + * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
6962 + * @hw: pointer to the HW structure
6963 + * @speed: stores the current speed
6964 + * @duplex: stores the current duplex
6966 + * Read the status register for the current speed/duplex and store the current
6967 + * speed and duplex for copper connections.
6969 +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
6974 + DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
6976 + status = E1000_READ_REG(hw, E1000_STATUS);
6977 + if (status & E1000_STATUS_SPEED_1000) {
6978 + *speed = SPEED_1000;
6979 + DEBUGOUT("1000 Mbs, ");
6980 + } else if (status & E1000_STATUS_SPEED_100) {
6981 + *speed = SPEED_100;
6982 + DEBUGOUT("100 Mbs, ");
6984 + *speed = SPEED_10;
6985 + DEBUGOUT("10 Mbs, ");
6988 + if (status & E1000_STATUS_FD) {
6989 + *duplex = FULL_DUPLEX;
6990 + DEBUGOUT("Full Duplex\n");
6992 + *duplex = HALF_DUPLEX;
6993 + DEBUGOUT("Half Duplex\n");
6996 + return E1000_SUCCESS;
7000 + * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
7001 + * @hw: pointer to the HW structure
7002 + * @speed: stores the current speed
7003 + * @duplex: stores the current duplex
7005 + * Sets the speed and duplex to gigabit full duplex (the only possible option)
7006 + * for fiber/serdes links.
7008 +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
7009 + u16 *speed, u16 *duplex)
7011 + DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
7013 + *speed = SPEED_1000;
7014 + *duplex = FULL_DUPLEX;
7016 + return E1000_SUCCESS;
7020 + * e1000_get_hw_semaphore_generic - Acquire hardware semaphore
7021 + * @hw: pointer to the HW structure
7023 + * Acquire the HW semaphore to access the PHY or NVM
7025 +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
7028 + s32 ret_val = E1000_SUCCESS;
7029 + s32 timeout = hw->nvm.word_size + 1;
7032 + DEBUGFUNC("e1000_get_hw_semaphore_generic");
7034 + /* Get the SW semaphore */
7035 + while (i < timeout) {
7036 + swsm = E1000_READ_REG(hw, E1000_SWSM);
7037 + if (!(swsm & E1000_SWSM_SMBI))
7044 + if (i == timeout) {
7045 + DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
7046 + ret_val = -E1000_ERR_NVM;
7050 + /* Get the FW semaphore. */
7051 + for (i = 0; i < timeout; i++) {
7052 + swsm = E1000_READ_REG(hw, E1000_SWSM);
7053 + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
7055 + /* Semaphore acquired if bit latched */
7056 + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
7062 + if (i == timeout) {
7063 + /* Release semaphores */
7064 + e1000_put_hw_semaphore_generic(hw);
7065 + DEBUGOUT("Driver can't access the NVM\n");
7066 + ret_val = -E1000_ERR_NVM;
7075 + * e1000_put_hw_semaphore_generic - Release hardware semaphore
7076 + * @hw: pointer to the HW structure
7078 + * Release hardware semaphore used to access the PHY or NVM
7080 +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
7084 + DEBUGFUNC("e1000_put_hw_semaphore_generic");
7086 + swsm = E1000_READ_REG(hw, E1000_SWSM);
7088 + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
7090 + E1000_WRITE_REG(hw, E1000_SWSM, swsm);
7094 + * e1000_get_auto_rd_done_generic - Check for auto read completion
7095 + * @hw: pointer to the HW structure
7097 + * Check EEPROM for Auto Read done bit.
7099 +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
7102 + s32 ret_val = E1000_SUCCESS;
7104 + DEBUGFUNC("e1000_get_auto_rd_done_generic");
7106 + while (i < AUTO_READ_DONE_TIMEOUT) {
7107 + if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
7113 + if (i == AUTO_READ_DONE_TIMEOUT) {
7114 + DEBUGOUT("Auto read by HW from NVM has not completed.\n");
7115 + ret_val = -E1000_ERR_RESET;
7124 + * e1000_valid_led_default_generic - Verify a valid default LED config
7125 + * @hw: pointer to the HW structure
7126 + * @data: pointer to the NVM (EEPROM)
7128 + * Read the EEPROM for the current default LED configuration. If the
7129 + * LED configuration is not valid, set to a valid LED configuration.
7131 +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
7135 + DEBUGFUNC("e1000_valid_led_default_generic");
7137 + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
7139 + DEBUGOUT("NVM Read Error\n");
7143 + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
7144 + *data = ID_LED_DEFAULT;
7151 + * e1000_id_led_init_generic -
7152 + * @hw: pointer to the HW structure
7155 +s32 e1000_id_led_init_generic(struct e1000_hw *hw)
7157 + struct e1000_mac_info *mac = &hw->mac;
7159 + const u32 ledctl_mask = 0x000000FF;
7160 + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
7161 + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
7162 + u16 data, i, temp;
7163 + const u16 led_mask = 0x0F;
7165 + DEBUGFUNC("e1000_id_led_init_generic");
7167 + ret_val = hw->nvm.ops.valid_led_default(hw, &data);
7171 + mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
7172 + mac->ledctl_mode1 = mac->ledctl_default;
7173 + mac->ledctl_mode2 = mac->ledctl_default;
7175 + for (i = 0; i < 4; i++) {
7176 + temp = (data >> (i << 2)) & led_mask;
7178 + case ID_LED_ON1_DEF2:
7179 + case ID_LED_ON1_ON2:
7180 + case ID_LED_ON1_OFF2:
7181 + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
7182 + mac->ledctl_mode1 |= ledctl_on << (i << 3);
7184 + case ID_LED_OFF1_DEF2:
7185 + case ID_LED_OFF1_ON2:
7186 + case ID_LED_OFF1_OFF2:
7187 + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
7188 + mac->ledctl_mode1 |= ledctl_off << (i << 3);
7195 + case ID_LED_DEF1_ON2:
7196 + case ID_LED_ON1_ON2:
7197 + case ID_LED_OFF1_ON2:
7198 + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
7199 + mac->ledctl_mode2 |= ledctl_on << (i << 3);
7201 + case ID_LED_DEF1_OFF2:
7202 + case ID_LED_ON1_OFF2:
7203 + case ID_LED_OFF1_OFF2:
7204 + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
7205 + mac->ledctl_mode2 |= ledctl_off << (i << 3);
7218 + * e1000_setup_led_generic - Configures SW controllable LED
7219 + * @hw: pointer to the HW structure
7221 + * This prepares the SW controllable LED for use and saves the current state
7222 + * of the LED so it can be later restored.
7224 +s32 e1000_setup_led_generic(struct e1000_hw *hw)
7227 + s32 ret_val = E1000_SUCCESS;
7229 + DEBUGFUNC("e1000_setup_led_generic");
7231 + if (hw->mac.ops.setup_led != e1000_setup_led_generic) {
7232 + ret_val = -E1000_ERR_CONFIG;
7236 + if (hw->phy.media_type == e1000_media_type_fiber) {
7237 + ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
7238 + hw->mac.ledctl_default = ledctl;
7239 + /* Turn off LED0 */
7240 + ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
7241 + E1000_LEDCTL_LED0_BLINK |
7242 + E1000_LEDCTL_LED0_MODE_MASK);
7243 + ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
7244 + E1000_LEDCTL_LED0_MODE_SHIFT);
7245 + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
7246 + } else if (hw->phy.media_type == e1000_media_type_copper) {
7247 + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
7255 + * e1000_cleanup_led_generic - Set LED config to default operation
7256 + * @hw: pointer to the HW structure
7258 + * Remove the current LED configuration and set the LED configuration
7259 + * to the default value, saved from the EEPROM.
7261 +s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
7263 + s32 ret_val = E1000_SUCCESS;
7265 + DEBUGFUNC("e1000_cleanup_led_generic");
7267 + if (hw->mac.ops.cleanup_led != e1000_cleanup_led_generic) {
7268 + ret_val = -E1000_ERR_CONFIG;
7272 + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
7279 + * e1000_blink_led_generic - Blink LED
7280 + * @hw: pointer to the HW structure
7282 + * Blink the LEDs which are set to be on.
7284 +s32 e1000_blink_led_generic(struct e1000_hw *hw)
7286 + u32 ledctl_blink = 0;
7289 + DEBUGFUNC("e1000_blink_led_generic");
7291 + if (hw->phy.media_type == e1000_media_type_fiber) {
7292 + /* always blink LED0 for PCI-E fiber */
7293 + ledctl_blink = E1000_LEDCTL_LED0_BLINK |
7294 + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
7297 + * set the blink bit for each LED that's "on" (0x0E)
7300 + ledctl_blink = hw->mac.ledctl_mode2;
7301 + for (i = 0; i < 4; i++)
7302 + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
7303 + E1000_LEDCTL_MODE_LED_ON)
7304 + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
7308 + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
7310 + return E1000_SUCCESS;
7314 + * e1000_led_on_generic - Turn LED on
7315 + * @hw: pointer to the HW structure
7319 +s32 e1000_led_on_generic(struct e1000_hw *hw)
7323 + DEBUGFUNC("e1000_led_on_generic");
7325 + switch (hw->phy.media_type) {
7326 + case e1000_media_type_fiber:
7327 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
7328 + ctrl &= ~E1000_CTRL_SWDPIN0;
7329 + ctrl |= E1000_CTRL_SWDPIO0;
7330 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
7332 + case e1000_media_type_copper:
7333 + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
7339 + return E1000_SUCCESS;
7343 + * e1000_led_off_generic - Turn LED off
7344 + * @hw: pointer to the HW structure
7348 +s32 e1000_led_off_generic(struct e1000_hw *hw)
7352 + DEBUGFUNC("e1000_led_off_generic");
7354 + switch (hw->phy.media_type) {
7355 + case e1000_media_type_fiber:
7356 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
7357 + ctrl |= E1000_CTRL_SWDPIN0;
7358 + ctrl |= E1000_CTRL_SWDPIO0;
7359 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
7361 + case e1000_media_type_copper:
7362 + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
7368 + return E1000_SUCCESS;
7372 + * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
7373 + * @hw: pointer to the HW structure
7374 + * @no_snoop: bitmap of snoop events
7376 + * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
7378 +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
7382 + DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
7384 + if (hw->bus.type != e1000_bus_type_pci_express)
7388 + gcr = E1000_READ_REG(hw, E1000_GCR);
7389 + gcr &= ~(PCIE_NO_SNOOP_ALL);
7391 + E1000_WRITE_REG(hw, E1000_GCR, gcr);
7398 + * e1000_disable_pcie_master_generic - Disables PCI-express master access
7399 + * @hw: pointer to the HW structure
7401 + * Returns 0 (E1000_SUCCESS) if successful, else returns -10
7402 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
7403 + * the master requests to be disabled.
7405 + * Disables PCI-Express master access and verifies there are no pending
7408 +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
7411 + s32 timeout = MASTER_DISABLE_TIMEOUT;
7412 + s32 ret_val = E1000_SUCCESS;
7414 + DEBUGFUNC("e1000_disable_pcie_master_generic");
7416 + if (hw->bus.type != e1000_bus_type_pci_express)
7419 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
7420 + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
7421 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
7424 + if (!(E1000_READ_REG(hw, E1000_STATUS) &
7425 + E1000_STATUS_GIO_MASTER_ENABLE))
7432 + DEBUGOUT("Master requests are pending.\n");
7433 + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
7442 + * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
7443 + * @hw: pointer to the HW structure
7445 + * Reset the Adaptive Interframe Spacing throttle to default values.
7447 +void e1000_reset_adaptive_generic(struct e1000_hw *hw)
7449 + struct e1000_mac_info *mac = &hw->mac;
7451 + DEBUGFUNC("e1000_reset_adaptive_generic");
7453 + if (!mac->adaptive_ifs) {
7454 + DEBUGOUT("Not in Adaptive IFS mode!\n");
7458 + mac->current_ifs_val = 0;
7459 + mac->ifs_min_val = IFS_MIN;
7460 + mac->ifs_max_val = IFS_MAX;
7461 + mac->ifs_step_size = IFS_STEP;
7462 + mac->ifs_ratio = IFS_RATIO;
7464 + mac->in_ifs_mode = false;
7465 + E1000_WRITE_REG(hw, E1000_AIT, 0);
7471 + * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
7472 + * @hw: pointer to the HW structure
7474 + * Update the Adaptive Interframe Spacing Throttle value based on the
7475 + * time between transmitted packets and time between collisions.
7477 +void e1000_update_adaptive_generic(struct e1000_hw *hw)
7479 + struct e1000_mac_info *mac = &hw->mac;
7481 + DEBUGFUNC("e1000_update_adaptive_generic");
7483 + if (!mac->adaptive_ifs) {
7484 + DEBUGOUT("Not in Adaptive IFS mode!\n");
7488 + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
7489 + if (mac->tx_packet_delta > MIN_NUM_XMITS) {
7490 + mac->in_ifs_mode = true;
7491 + if (mac->current_ifs_val < mac->ifs_max_val) {
7492 + if (!mac->current_ifs_val)
7493 + mac->current_ifs_val = mac->ifs_min_val;
7495 + mac->current_ifs_val +=
7496 + mac->ifs_step_size;
7497 + E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val);
7501 + if (mac->in_ifs_mode &&
7502 + (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
7503 + mac->current_ifs_val = 0;
7504 + mac->in_ifs_mode = false;
7505 + E1000_WRITE_REG(hw, E1000_AIT, 0);
7513 + * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
7514 + * @hw: pointer to the HW structure
7516 + * Verify that when not using auto-negotiation that MDI/MDIx is correctly
7517 + * set, which is forced to MDI mode only.
7519 +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
7521 + s32 ret_val = E1000_SUCCESS;
7523 + DEBUGFUNC("e1000_validate_mdi_setting_generic");
7525 + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
7526 + DEBUGOUT("Invalid MDI setting detected\n");
7528 + ret_val = -E1000_ERR_CONFIG;
7537 + * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
7538 + * @hw: pointer to the HW structure
7539 + * @reg: 32bit register offset such as E1000_SCTL
7540 + * @offset: register offset to write to
7541 + * @data: data to write at register offset
7543 + * Writes an address/data control type register. There are several of these
7544 + * and they all have the format address << 8 | data and bit 31 is polled for
7547 +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
7548 + u32 offset, u8 data)
7550 + u32 i, regvalue = 0;
7551 + s32 ret_val = E1000_SUCCESS;
7553 + DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
7555 + /* Set up the address and data */
7556 + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
7557 + E1000_WRITE_REG(hw, reg, regvalue);
7559 + /* Poll the ready bit to see if the MDI read completed */
7560 + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
7562 + regvalue = E1000_READ_REG(hw, reg);
7563 + if (regvalue & E1000_GEN_CTL_READY)
7566 + if (!(regvalue & E1000_GEN_CTL_READY)) {
7567 + DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
7568 + ret_val = -E1000_ERR_PHY;
7575 Index: linux-2.6.22/drivers/net/igb/e1000_mac.h
7576 ===================================================================
7577 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
7578 +++ linux-2.6.22/drivers/net/igb/e1000_mac.h 2009-12-18 12:39:22.000000000 -0500
7580 +/*******************************************************************************
7582 + Intel(R) Gigabit Ethernet Linux driver
7583 + Copyright(c) 2007-2009 Intel Corporation.
7585 + This program is free software; you can redistribute it and/or modify it
7586 + under the terms and conditions of the GNU General Public License,
7587 + version 2, as published by the Free Software Foundation.
7589 + This program is distributed in the hope it will be useful, but WITHOUT
7590 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7591 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
7594 + You should have received a copy of the GNU General Public License along with
7595 + this program; if not, write to the Free Software Foundation, Inc.,
7596 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7598 + The full GNU General Public License is included in this distribution in
7599 + the file called "COPYING".
7601 + Contact Information:
7602 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
7603 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
7605 +*******************************************************************************/
7607 +#ifndef _E1000_MAC_H_
7608 +#define _E1000_MAC_H_
7611 + * Functions that should not be called directly from drivers but can be used
7612 + * by other files in this 'shared code'
7614 +void e1000_init_mac_ops_generic(struct e1000_hw *hw);
7615 +s32 e1000_blink_led_generic(struct e1000_hw *hw);
7616 +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
7617 +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
7618 +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
7619 +s32 e1000_cleanup_led_generic(struct e1000_hw *hw);
7620 +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
7621 +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw);
7622 +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw);
7623 +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
7624 +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
7625 +void e1000_set_lan_id_single_port(struct e1000_hw *hw);
7626 +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
7627 +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
7629 +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
7630 + u16 *speed, u16 *duplex);
7631 +s32 e1000_id_led_init_generic(struct e1000_hw *hw);
7632 +s32 e1000_led_on_generic(struct e1000_hw *hw);
7633 +s32 e1000_led_off_generic(struct e1000_hw *hw);
7634 +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
7635 + u8 *mc_addr_list, u32 mc_addr_count);
7636 +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
7637 +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
7638 +s32 e1000_setup_led_generic(struct e1000_hw *hw);
7639 +s32 e1000_setup_link_generic(struct e1000_hw *hw);
7640 +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
7641 + u32 offset, u8 data);
7643 +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
7645 +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
7646 +void e1000_clear_vfta_generic(struct e1000_hw *hw);
7647 +void e1000_config_collision_dist_generic(struct e1000_hw *hw);
7648 +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
7649 +void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value);
7650 +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
7651 +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
7652 +void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
7653 +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
7654 +void e1000_reset_adaptive_generic(struct e1000_hw *hw);
7655 +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
7656 +void e1000_update_adaptive_generic(struct e1000_hw *hw);
7657 +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
7660 Index: linux-2.6.22/drivers/net/igb/e1000_manage.c
7661 ===================================================================
7662 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
7663 +++ linux-2.6.22/drivers/net/igb/e1000_manage.c 2009-12-18 12:39:22.000000000 -0500
7665 +/*******************************************************************************
7667 + Intel(R) Gigabit Ethernet Linux driver
7668 + Copyright(c) 2007-2009 Intel Corporation.
7670 + This program is free software; you can redistribute it and/or modify it
7671 + under the terms and conditions of the GNU General Public License,
7672 + version 2, as published by the Free Software Foundation.
7674 + This program is distributed in the hope it will be useful, but WITHOUT
7675 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7676 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
7679 + You should have received a copy of the GNU General Public License along with
7680 + this program; if not, write to the Free Software Foundation, Inc.,
7681 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7683 + The full GNU General Public License is included in this distribution in
7684 + the file called "COPYING".
7686 + Contact Information:
7687 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
7688 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
7690 +*******************************************************************************/
7692 +#include "e1000_api.h"
7694 +static u8 e1000_calculate_checksum(u8 *buffer, u32 length);
7697 + * e1000_calculate_checksum - Calculate checksum for buffer
7698 + * @buffer: pointer to EEPROM
7699 + * @length: size of EEPROM to calculate a checksum for
7701 + * Calculates the checksum for some buffer on a specified length. The
7702 + * checksum calculated is returned.
7704 +static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
7709 + DEBUGFUNC("e1000_calculate_checksum");
7714 + for (i = 0; i < length; i++)
7717 + return (u8) (0 - sum);
7721 + * e1000_mng_enable_host_if_generic - Checks host interface is enabled
7722 + * @hw: pointer to the HW structure
7724 + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
7726 + * This function checks whether the HOST IF is enabled for command operation
7727 + * and also checks whether the previous command is completed. It busy waits
7728 + * in case of previous command is not completed.
7730 +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
7733 + s32 ret_val = E1000_SUCCESS;
7736 + DEBUGFUNC("e1000_mng_enable_host_if_generic");
7738 + /* Check that the host interface is enabled. */
7739 + hicr = E1000_READ_REG(hw, E1000_HICR);
7740 + if ((hicr & E1000_HICR_EN) == 0) {
7741 + DEBUGOUT("E1000_HOST_EN bit disabled.\n");
7742 + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
7745 + /* check the previous command is completed */
7746 + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
7747 + hicr = E1000_READ_REG(hw, E1000_HICR);
7748 + if (!(hicr & E1000_HICR_C))
7750 + msec_delay_irq(1);
7753 + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
7754 + DEBUGOUT("Previous command timeout failed .\n");
7755 + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
7764 + * e1000_check_mng_mode_generic - Generic check management mode
7765 + * @hw: pointer to the HW structure
7767 + * Reads the firmware semaphore register and returns true (>0) if
7768 + * manageability is enabled, else false (0).
7770 +bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
7774 + DEBUGFUNC("e1000_check_mng_mode_generic");
7776 + fwsm = E1000_READ_REG(hw, E1000_FWSM);
7778 + return (fwsm & E1000_FWSM_MODE_MASK) ==
7779 + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
7783 + * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on TX
7784 + * @hw: pointer to the HW structure
7786 + * Enables packet filtering on transmit packets if manageability is enabled
7787 + * and host interface is enabled.
7789 +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
7791 + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
7792 + u32 *buffer = (u32 *)&hw->mng_cookie;
7794 + s32 ret_val, hdr_csum, csum;
7796 + bool tx_filter = true;
7798 + DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
7800 + /* No manageability, no filtering */
7801 + if (!hw->mac.ops.check_mng_mode(hw)) {
7802 + tx_filter = false;
7807 + * If we can't read from the host interface for whatever
7808 + * reason, disable filtering.
7810 + ret_val = hw->mac.ops.mng_enable_host_if(hw);
7811 + if (ret_val != E1000_SUCCESS) {
7812 + tx_filter = false;
7816 + /* Read in the header. Length and offset are in dwords. */
7817 + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
7818 + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
7819 + for (i = 0; i < len; i++) {
7820 + *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
7824 + hdr_csum = hdr->checksum;
7825 + hdr->checksum = 0;
7826 + csum = e1000_calculate_checksum((u8 *)hdr,
7827 + E1000_MNG_DHCP_COOKIE_LENGTH);
7829 + * If either the checksums or signature don't match, then
7830 + * the cookie area isn't considered valid, in which case we
7831 + * take the safe route of assuming Tx filtering is enabled.
7833 + if (hdr_csum != csum)
7835 + if (hdr->signature != E1000_IAMT_SIGNATURE)
7838 + /* Cookie area is valid, make the final check for filtering. */
7839 + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
7840 + tx_filter = false;
7843 + hw->mac.tx_pkt_filtering = tx_filter;
7848 + * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
7849 + * @hw: pointer to the HW structure
7850 + * @buffer: pointer to the host interface
7851 + * @length: size of the buffer
7853 + * Writes the DHCP information to the host interface.
7855 +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
7858 + struct e1000_host_mng_command_header hdr;
7862 + DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
7864 + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
7865 + hdr.command_length = length;
7866 + hdr.reserved1 = 0;
7867 + hdr.reserved2 = 0;
7870 + /* Enable the host interface */
7871 + ret_val = hw->mac.ops.mng_enable_host_if(hw);
7875 + /* Populate the host interface with the contents of "buffer". */
7876 + ret_val = hw->mac.ops.mng_host_if_write(hw, buffer, length,
7877 + sizeof(hdr), &(hdr.checksum));
7881 + /* Write the manageability command header */
7882 + ret_val = hw->mac.ops.mng_write_cmd_header(hw, &hdr);
7886 + /* Tell the ARC a new command is pending. */
7887 + hicr = E1000_READ_REG(hw, E1000_HICR);
7888 + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
7895 + * e1000_mng_write_cmd_header_generic - Writes manageability command header
7896 + * @hw: pointer to the HW structure
7897 + * @hdr: pointer to the host interface command header
7899 + * Writes the command header after does the checksum calculation.
7901 +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
7902 + struct e1000_host_mng_command_header *hdr)
7904 + u16 i, length = sizeof(struct e1000_host_mng_command_header);
7906 + DEBUGFUNC("e1000_mng_write_cmd_header_generic");
7908 + /* Write the whole command header structure with new checksum. */
7910 + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
7913 + /* Write the relevant command block into the ram area. */
7914 + for (i = 0; i < length; i++) {
7915 + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
7916 + *((u32 *) hdr + i));
7917 + E1000_WRITE_FLUSH(hw);
7920 + return E1000_SUCCESS;
7924 + * e1000_mng_host_if_write_generic - Write to the manageability host interface
7925 + * @hw: pointer to the HW structure
7926 + * @buffer: pointer to the host interface buffer
7927 + * @length: size of the buffer
7928 + * @offset: location in the buffer to write to
7929 + * @sum: sum of the data (not checksum)
7931 + * This function writes the buffer content at the offset given on the host if.
7932 + * It also does alignment considerations to do the writes in most efficient
7933 + * way. Also fills up the sum of the buffer in *buffer parameter.
7935 +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
7936 + u16 length, u16 offset, u8 *sum)
7939 + u8 *bufptr = buffer;
7941 + s32 ret_val = E1000_SUCCESS;
7942 + u16 remaining, i, j, prev_bytes;
7944 + DEBUGFUNC("e1000_mng_host_if_write_generic");
7946 + /* sum = only sum of the data and it is not checksum */
7948 + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
7949 + ret_val = -E1000_ERR_PARAM;
7953 + tmp = (u8 *)&data;
7954 + prev_bytes = offset & 0x3;
7958 + data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
7959 + for (j = prev_bytes; j < sizeof(u32); j++) {
7960 + *(tmp + j) = *bufptr++;
7961 + *sum += *(tmp + j);
7963 + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
7964 + length -= j - prev_bytes;
7968 + remaining = length & 0x3;
7969 + length -= remaining;
7971 + /* Calculate length in DWORDs */
7975 + * The device driver writes the relevant command block into the
7978 + for (i = 0; i < length; i++) {
7979 + for (j = 0; j < sizeof(u32); j++) {
7980 + *(tmp + j) = *bufptr++;
7981 + *sum += *(tmp + j);
7984 + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
7988 + for (j = 0; j < sizeof(u32); j++) {
7989 + if (j < remaining)
7990 + *(tmp + j) = *bufptr++;
7994 + *sum += *(tmp + j);
7996 + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
8004 + * e1000_enable_mng_pass_thru - Enable processing of ARP's
8005 + * @hw: pointer to the HW structure
8007 + * Verifies the hardware needs to allow ARPs to be processed by the host.
8009 +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
8013 + bool ret_val = false;
8015 + DEBUGFUNC("e1000_enable_mng_pass_thru");
8017 + if (!hw->mac.asf_firmware_present)
8020 + manc = E1000_READ_REG(hw, E1000_MANC);
8022 + if (!(manc & E1000_MANC_RCV_TCO_EN) ||
8023 + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
8026 + if (hw->mac.arc_subsystem_valid) {
8027 + fwsm = E1000_READ_REG(hw, E1000_FWSM);
8028 + factps = E1000_READ_REG(hw, E1000_FACTPS);
8030 + if (!(factps & E1000_FACTPS_MNGCG) &&
8031 + ((fwsm & E1000_FWSM_MODE_MASK) ==
8032 + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
8037 + if ((manc & E1000_MANC_SMBUS_EN) &&
8038 + !(manc & E1000_MANC_ASF_EN)) {
8048 Index: linux-2.6.22/drivers/net/igb/e1000_manage.h
8049 ===================================================================
8050 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
8051 +++ linux-2.6.22/drivers/net/igb/e1000_manage.h 2009-12-18 12:39:22.000000000 -0500
8053 +/*******************************************************************************
8055 + Intel(R) Gigabit Ethernet Linux driver
8056 + Copyright(c) 2007-2009 Intel Corporation.
8058 + This program is free software; you can redistribute it and/or modify it
8059 + under the terms and conditions of the GNU General Public License,
8060 + version 2, as published by the Free Software Foundation.
8062 + This program is distributed in the hope it will be useful, but WITHOUT
8063 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8064 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8067 + You should have received a copy of the GNU General Public License along with
8068 + this program; if not, write to the Free Software Foundation, Inc.,
8069 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8071 + The full GNU General Public License is included in this distribution in
8072 + the file called "COPYING".
8074 + Contact Information:
8075 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8076 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8078 +*******************************************************************************/
8080 +#ifndef _E1000_MANAGE_H_
8081 +#define _E1000_MANAGE_H_
8083 +bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
8084 +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
8085 +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
8086 +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
8087 + u16 length, u16 offset, u8 *sum);
8088 +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
8089 + struct e1000_host_mng_command_header *hdr);
8090 +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
8091 + u8 *buffer, u16 length);
8092 +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
8094 +enum e1000_mng_mode {
8095 + e1000_mng_mode_none = 0,
8096 + e1000_mng_mode_asf,
8097 + e1000_mng_mode_pt,
8098 + e1000_mng_mode_ipmi,
8099 + e1000_mng_mode_host_if_only
8102 +#define E1000_FACTPS_MNGCG 0x20000000
8104 +#define E1000_FWSM_MODE_MASK 0xE
8105 +#define E1000_FWSM_MODE_SHIFT 1
8107 +#define E1000_MNG_IAMT_MODE 0x3
8108 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
8109 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
8110 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
8111 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
8112 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
8113 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
8115 +#define E1000_VFTA_ENTRY_SHIFT 5
8116 +#define E1000_VFTA_ENTRY_MASK 0x7F
8117 +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
8119 +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
8120 +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
8121 +#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
8123 +#define E1000_HICR_EN 0x01 /* Enable bit - RO */
8124 +/* Driver sets this bit when done to put command in RAM */
8125 +#define E1000_HICR_C 0x02
8126 +#define E1000_HICR_SV 0x04 /* Status Validity */
8127 +#define E1000_HICR_FW_RESET_ENABLE 0x40
8128 +#define E1000_HICR_FW_RESET 0x80
8130 +/* Intel(R) Active Management Technology signature */
8131 +#define E1000_IAMT_SIGNATURE 0x544D4149
8134 Index: linux-2.6.22/drivers/net/igb/e1000_mbx.c
8135 ===================================================================
8136 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
8137 +++ linux-2.6.22/drivers/net/igb/e1000_mbx.c 2009-12-18 12:39:22.000000000 -0500
8139 +/*******************************************************************************
8141 + Intel(R) Gigabit Ethernet Linux driver
8142 + Copyright(c) 2007-2009 Intel Corporation.
8144 + This program is free software; you can redistribute it and/or modify it
8145 + under the terms and conditions of the GNU General Public License,
8146 + version 2, as published by the Free Software Foundation.
8148 + This program is distributed in the hope it will be useful, but WITHOUT
8149 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8150 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8153 + You should have received a copy of the GNU General Public License along with
8154 + this program; if not, write to the Free Software Foundation, Inc.,
8155 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8157 + The full GNU General Public License is included in this distribution in
8158 + the file called "COPYING".
8160 + Contact Information:
8161 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8162 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8164 +*******************************************************************************/
8166 +#include "e1000_mbx.h"
8169 + * e1000_read_mbx - Reads a message from the mailbox
8170 + * @hw: pointer to the HW structure
8171 + * @msg: The message buffer
8172 + * @size: Length of buffer
8173 + * @mbx_id: id of mailbox to read
8175 + * returns SUCCESS if it successfuly read message from buffer
8177 +s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
8179 + struct e1000_mbx_info *mbx = &hw->mbx;
8180 + s32 ret_val = -E1000_ERR_MBX;
8182 + DEBUGFUNC("e1000_read_mbx");
8184 + /* limit read to size of mailbox */
8185 + if (size > mbx->size)
8188 + if (mbx->ops.read)
8189 + ret_val = mbx->ops.read(hw, msg, size, mbx_id);
8195 + * e1000_write_mbx - Write a message to the mailbox
8196 + * @hw: pointer to the HW structure
8197 + * @msg: The message buffer
8198 + * @size: Length of buffer
8199 + * @mbx_id: id of mailbox to write
8201 + * returns SUCCESS if it successfully copied message into the buffer
8203 +s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
8205 + struct e1000_mbx_info *mbx = &hw->mbx;
8206 + s32 ret_val = E1000_SUCCESS;
8208 + DEBUGFUNC("e1000_write_mbx");
8210 + if (size > mbx->size)
8211 + ret_val = -E1000_ERR_MBX;
8213 + else if (mbx->ops.write)
8214 + ret_val = mbx->ops.write(hw, msg, size, mbx_id);
8220 + * e1000_check_for_msg - checks to see if someone sent us mail
8221 + * @hw: pointer to the HW structure
8222 + * @mbx_id: id of mailbox to check
8224 + * returns SUCCESS if the Status bit was found or else ERR_MBX
8226 +s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
8228 + struct e1000_mbx_info *mbx = &hw->mbx;
8229 + s32 ret_val = -E1000_ERR_MBX;
8231 + DEBUGFUNC("e1000_check_for_msg");
8233 + if (mbx->ops.check_for_msg)
8234 + ret_val = mbx->ops.check_for_msg(hw, mbx_id);
8240 + * e1000_check_for_ack - checks to see if someone sent us ACK
8241 + * @hw: pointer to the HW structure
8242 + * @mbx_id: id of mailbox to check
8244 + * returns SUCCESS if the Status bit was found or else ERR_MBX
8246 +s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
8248 + struct e1000_mbx_info *mbx = &hw->mbx;
8249 + s32 ret_val = -E1000_ERR_MBX;
8251 + DEBUGFUNC("e1000_check_for_ack");
8253 + if (mbx->ops.check_for_ack)
8254 + ret_val = mbx->ops.check_for_ack(hw, mbx_id);
8260 + * e1000_check_for_rst - checks to see if other side has reset
8261 + * @hw: pointer to the HW structure
8262 + * @mbx_id: id of mailbox to check
8264 + * returns SUCCESS if the Status bit was found or else ERR_MBX
8266 +s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
8268 + struct e1000_mbx_info *mbx = &hw->mbx;
8269 + s32 ret_val = -E1000_ERR_MBX;
8271 + DEBUGFUNC("e1000_check_for_rst");
8273 + if (mbx->ops.check_for_rst)
8274 + ret_val = mbx->ops.check_for_rst(hw, mbx_id);
8280 + * e1000_poll_for_msg - Wait for message notification
8281 + * @hw: pointer to the HW structure
8282 + * @mbx_id: id of mailbox to write
8284 + * returns SUCCESS if it successfully received a message notification
8286 +static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
8288 + struct e1000_mbx_info *mbx = &hw->mbx;
8289 + int countdown = mbx->timeout;
8291 + DEBUGFUNC("e1000_poll_for_msg");
8293 + if (!countdown || !mbx->ops.check_for_msg)
8296 + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
8300 + usec_delay(mbx->usec_delay);
8303 + /* if we failed, all future posted messages fail until reset */
8307 + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
8311 + * e1000_poll_for_ack - Wait for message acknowledgement
8312 + * @hw: pointer to the HW structure
8313 + * @mbx_id: id of mailbox to write
8315 + * returns SUCCESS if it successfully received a message acknowledgement
8317 +static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
8319 + struct e1000_mbx_info *mbx = &hw->mbx;
8320 + int countdown = mbx->timeout;
8322 + DEBUGFUNC("e1000_poll_for_ack");
8324 + if (!countdown || !mbx->ops.check_for_ack)
8327 + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
8331 + usec_delay(mbx->usec_delay);
8334 + /* if we failed, all future posted messages fail until reset */
8338 + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
8342 + * e1000_read_posted_mbx - Wait for message notification and receive message
8343 + * @hw: pointer to the HW structure
8344 + * @msg: The message buffer
8345 + * @size: Length of buffer
8346 + * @mbx_id: id of mailbox to write
8348 + * returns SUCCESS if it successfully received a message notification and
8349 + * copied it into the receive buffer.
8351 +s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
8353 + struct e1000_mbx_info *mbx = &hw->mbx;
8354 + s32 ret_val = -E1000_ERR_MBX;
8356 + DEBUGFUNC("e1000_read_posted_mbx");
8358 + if (!mbx->ops.read)
8361 + ret_val = e1000_poll_for_msg(hw, mbx_id);
8363 + /* if ack received read message, otherwise we timed out */
8365 + ret_val = mbx->ops.read(hw, msg, size, mbx_id);
8371 + * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
8372 + * @hw: pointer to the HW structure
8373 + * @msg: The message buffer
8374 + * @size: Length of buffer
8375 + * @mbx_id: id of mailbox to write
8377 + * returns SUCCESS if it successfully copied message into the buffer and
8378 + * received an ack to that message within delay * timeout period
8380 +s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
8382 + struct e1000_mbx_info *mbx = &hw->mbx;
8383 + s32 ret_val = -E1000_ERR_MBX;
8385 + DEBUGFUNC("e1000_write_posted_mbx");
8387 + /* exit if either we can't write or there isn't a defined timeout */
8388 + if (!mbx->ops.write || !mbx->timeout)
8392 + ret_val = mbx->ops.write(hw, msg, size, mbx_id);
8394 + /* if msg sent wait until we receive an ack */
8396 + ret_val = e1000_poll_for_ack(hw, mbx_id);
8402 + * e1000_init_mbx_ops_generic - Initialize NVM function pointers
8403 + * @hw: pointer to the HW structure
8405 + * Setups up the function pointers to no-op functions
8407 +void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
8409 + struct e1000_mbx_info *mbx = &hw->mbx;
8410 + mbx->ops.read_posted = e1000_read_posted_mbx;
8411 + mbx->ops.write_posted = e1000_write_posted_mbx;
8414 +static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
8416 + u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
8417 + s32 ret_val = -E1000_ERR_MBX;
8419 + if (mbvficr & mask) {
8420 + ret_val = E1000_SUCCESS;
8421 + E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
8428 + * e1000_check_for_msg_pf - checks to see if the VF has sent mail
8429 + * @hw: pointer to the HW structure
8430 + * @vf_number: the VF index
8432 + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
8434 +static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
8436 + s32 ret_val = -E1000_ERR_MBX;
8438 + DEBUGFUNC("e1000_check_for_msg_pf");
8440 + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
8441 + ret_val = E1000_SUCCESS;
8442 + hw->mbx.stats.reqs++;
8449 + * e1000_check_for_ack_pf - checks to see if the VF has ACKed
8450 + * @hw: pointer to the HW structure
8451 + * @vf_number: the VF index
8453 + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
8455 +static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
8457 + s32 ret_val = -E1000_ERR_MBX;
8459 + DEBUGFUNC("e1000_check_for_ack_pf");
8461 + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
8462 + ret_val = E1000_SUCCESS;
8463 + hw->mbx.stats.acks++;
8470 + * e1000_check_for_rst_pf - checks to see if the VF has reset
8471 + * @hw: pointer to the HW structure
8472 + * @vf_number: the VF index
8474 + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
8476 +static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
8478 + u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
8479 + s32 ret_val = -E1000_ERR_MBX;
8481 + DEBUGFUNC("e1000_check_for_rst_pf");
8483 + if (vflre & (1 << vf_number)) {
8484 + ret_val = E1000_SUCCESS;
8485 + E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
8486 + hw->mbx.stats.rsts++;
8493 + * e1000_obtain_mbx_lock_pf - obtain mailbox lock
8494 + * @hw: pointer to the HW structure
8495 + * @vf_number: the VF index
8497 + * return SUCCESS if we obtained the mailbox lock
8499 +static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
8501 + s32 ret_val = -E1000_ERR_MBX;
8504 + DEBUGFUNC("e1000_obtain_mbx_lock_pf");
8506 + /* Take ownership of the buffer */
8507 + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
8509 + /* reserve mailbox for vf use */
8510 + p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
8511 + if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
8512 + ret_val = E1000_SUCCESS;
8518 + * e1000_write_mbx_pf - Places a message in the mailbox
8519 + * @hw: pointer to the HW structure
8520 + * @msg: The message buffer
8521 + * @size: Length of buffer
8522 + * @vf_number: the VF index
8524 + * returns SUCCESS if it successfully copied message into the buffer
8526 +static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
8532 + DEBUGFUNC("e1000_write_mbx_pf");
8534 + /* lock the mailbox to prevent pf/vf race condition */
8535 + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
8537 + goto out_no_write;
8539 + /* flush msg and acks as we are overwriting the message buffer */
8540 + e1000_check_for_msg_pf(hw, vf_number);
8541 + e1000_check_for_ack_pf(hw, vf_number);
8543 + /* copy the caller specified message to the mailbox memory buffer */
8544 + for (i = 0; i < size; i++)
8545 + E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
8547 + /* Interrupt VF to tell it a message has been sent and release buffer*/
8548 + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
8550 + /* update stats */
8551 + hw->mbx.stats.msgs_tx++;
8559 + * e1000_read_mbx_pf - Read a message from the mailbox
8560 + * @hw: pointer to the HW structure
8561 + * @msg: The message buffer
8562 + * @size: Length of buffer
8563 + * @vf_number: the VF index
8565 + * This function copies a message from the mailbox buffer to the caller's
8566 + * memory buffer. The presumption is that the caller knows that there was
8567 + * a message due to a VF request so no polling for message is needed.
8569 +static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
8575 + DEBUGFUNC("e1000_read_mbx_pf");
8577 + /* lock the mailbox to prevent pf/vf race condition */
8578 + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
8582 + /* copy the message to the mailbox memory buffer */
8583 + for (i = 0; i < size; i++)
8584 + msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
8586 + /* Acknowledge the message and release buffer */
8587 + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
8589 + /* update stats */
8590 + hw->mbx.stats.msgs_rx++;
8597 + * e1000_init_mbx_params_pf - set initial values for pf mailbox
8598 + * @hw: pointer to the HW structure
8600 + * Initializes the hw->mbx struct to correct values for pf mailbox
8602 +s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
8604 + struct e1000_mbx_info *mbx = &hw->mbx;
8606 + if (hw->mac.type == e1000_82576) {
8608 + mbx->usec_delay = 0;
8610 + mbx->size = E1000_VFMAILBOX_SIZE;
8612 + mbx->ops.read = e1000_read_mbx_pf;
8613 + mbx->ops.write = e1000_write_mbx_pf;
8614 + mbx->ops.read_posted = e1000_read_posted_mbx;
8615 + mbx->ops.write_posted = e1000_write_posted_mbx;
8616 + mbx->ops.check_for_msg = e1000_check_for_msg_pf;
8617 + mbx->ops.check_for_ack = e1000_check_for_ack_pf;
8618 + mbx->ops.check_for_rst = e1000_check_for_rst_pf;
8620 + mbx->stats.msgs_tx = 0;
8621 + mbx->stats.msgs_rx = 0;
8622 + mbx->stats.reqs = 0;
8623 + mbx->stats.acks = 0;
8624 + mbx->stats.rsts = 0;
8627 + return E1000_SUCCESS;
8630 Index: linux-2.6.22/drivers/net/igb/e1000_mbx.h
8631 ===================================================================
8632 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
8633 +++ linux-2.6.22/drivers/net/igb/e1000_mbx.h 2009-12-18 12:39:22.000000000 -0500
8635 +/*******************************************************************************
8637 + Intel(R) Gigabit Ethernet Linux driver
8638 + Copyright(c) 2007-2009 Intel Corporation.
8640 + This program is free software; you can redistribute it and/or modify it
8641 + under the terms and conditions of the GNU General Public License,
8642 + version 2, as published by the Free Software Foundation.
8644 + This program is distributed in the hope it will be useful, but WITHOUT
8645 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8646 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8649 + You should have received a copy of the GNU General Public License along with
8650 + this program; if not, write to the Free Software Foundation, Inc.,
8651 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8653 + The full GNU General Public License is included in this distribution in
8654 + the file called "COPYING".
8656 + Contact Information:
8657 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8658 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8660 +*******************************************************************************/
8662 +#ifndef _E1000_MBX_H_
8663 +#define _E1000_MBX_H_
8665 +#include "e1000_api.h"
8667 +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
8668 +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
8669 +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
8670 +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
8671 +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
8673 +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
8674 +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
8675 +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
8676 +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
8678 +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
8680 +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
8681 + * PF. The reverse is true if it is E1000_PF_*.
8682 + * Message ACK's are the value or'd with 0xF0000000
8684 +#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
8685 + * this are the ACK */
8686 +#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
8687 + * this are the NACK */
8688 +#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
8689 + clear to send requests */
8690 +#define E1000_VT_MSGINFO_SHIFT 16
8691 +/* bits 23:16 are used for exra info for certain messages */
8692 +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
8694 +#define E1000_VF_RESET 0x01 /* VF requests reset */
8695 +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
8696 +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
8697 +#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
8698 +#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT)
8699 +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
8700 +#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT)
8701 +#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
8702 +#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
8703 +#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT)
8704 +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
8706 +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
8708 +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
8709 +#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
8711 +s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
8712 +s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
8713 +s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
8714 +s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
8715 +s32 e1000_check_for_msg(struct e1000_hw *, u16);
8716 +s32 e1000_check_for_ack(struct e1000_hw *, u16);
8717 +s32 e1000_check_for_rst(struct e1000_hw *, u16);
8718 +void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
8719 +s32 e1000_init_mbx_params_pf(struct e1000_hw *);
8721 +#endif /* _E1000_MBX_H_ */
8722 Index: linux-2.6.22/drivers/net/igb/e1000_nvm.c
8723 ===================================================================
8724 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
8725 +++ linux-2.6.22/drivers/net/igb/e1000_nvm.c 2009-12-18 12:39:22.000000000 -0500
8727 +/*******************************************************************************
8729 + Intel(R) Gigabit Ethernet Linux driver
8730 + Copyright(c) 2007-2009 Intel Corporation.
8732 + This program is free software; you can redistribute it and/or modify it
8733 + under the terms and conditions of the GNU General Public License,
8734 + version 2, as published by the Free Software Foundation.
8736 + This program is distributed in the hope it will be useful, but WITHOUT
8737 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8738 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8741 + You should have received a copy of the GNU General Public License along with
8742 + this program; if not, write to the Free Software Foundation, Inc.,
8743 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8745 + The full GNU General Public License is included in this distribution in
8746 + the file called "COPYING".
8748 + Contact Information:
8749 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8750 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8752 +*******************************************************************************/
8754 +#include "e1000_api.h"
8756 +static void e1000_stop_nvm(struct e1000_hw *hw);
8757 +static void e1000_reload_nvm_generic(struct e1000_hw *hw);
8760 + * e1000_init_nvm_ops_generic - Initialize NVM function pointers
8761 + * @hw: pointer to the HW structure
8763 + * Setups up the function pointers to no-op functions
8765 +void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
8767 + struct e1000_nvm_info *nvm = &hw->nvm;
8768 + DEBUGFUNC("e1000_init_nvm_ops_generic");
8770 + /* Initialize function pointers */
8771 + nvm->ops.reload = e1000_reload_nvm_generic;
8775 + * e1000_raise_eec_clk - Raise EEPROM clock
8776 + * @hw: pointer to the HW structure
8777 + * @eecd: pointer to the EEPROM
8779 + * Enable/Raise the EEPROM clock bit.
8781 +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
8783 + *eecd = *eecd | E1000_EECD_SK;
8784 + E1000_WRITE_REG(hw, E1000_EECD, *eecd);
8785 + E1000_WRITE_FLUSH(hw);
8786 + usec_delay(hw->nvm.delay_usec);
8790 + * e1000_lower_eec_clk - Lower EEPROM clock
8791 + * @hw: pointer to the HW structure
8792 + * @eecd: pointer to the EEPROM
8794 + * Clear/Lower the EEPROM clock bit.
8796 +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
8798 + *eecd = *eecd & ~E1000_EECD_SK;
8799 + E1000_WRITE_REG(hw, E1000_EECD, *eecd);
8800 + E1000_WRITE_FLUSH(hw);
8801 + usec_delay(hw->nvm.delay_usec);
8805 + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
8806 + * @hw: pointer to the HW structure
8807 + * @data: data to send to the EEPROM
8808 + * @count: number of bits to shift out
8810 + * We need to shift 'count' bits out to the EEPROM. So, the value in the
8811 + * "data" parameter will be shifted out to the EEPROM one bit at a time.
8812 + * In order to do this, "data" must be broken down into bits.
8814 +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
8816 + struct e1000_nvm_info *nvm = &hw->nvm;
8817 + u32 eecd = E1000_READ_REG(hw, E1000_EECD);
8820 + DEBUGFUNC("e1000_shift_out_eec_bits");
8822 + mask = 0x01 << (count - 1);
8823 + if (nvm->type == e1000_nvm_eeprom_spi)
8824 + eecd |= E1000_EECD_DO;
8827 + eecd &= ~E1000_EECD_DI;
8830 + eecd |= E1000_EECD_DI;
8832 + E1000_WRITE_REG(hw, E1000_EECD, eecd);
8833 + E1000_WRITE_FLUSH(hw);
8835 + usec_delay(nvm->delay_usec);
8837 + e1000_raise_eec_clk(hw, &eecd);
8838 + e1000_lower_eec_clk(hw, &eecd);
8843 + eecd &= ~E1000_EECD_DI;
8844 + E1000_WRITE_REG(hw, E1000_EECD, eecd);
8848 + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
8849 + * @hw: pointer to the HW structure
8850 + * @count: number of bits to shift in
8852 + * In order to read a register from the EEPROM, we need to shift 'count' bits
8853 + * in from the EEPROM. Bits are "shifted in" by raising the clock input to
8854 + * the EEPROM (setting the SK bit), and then reading the value of the data out
8855 + * "DO" bit. During this "shifting in" process the data in "DI" bit should
8856 + * always be clear.
8858 +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
8864 + DEBUGFUNC("e1000_shift_in_eec_bits");
8866 + eecd = E1000_READ_REG(hw, E1000_EECD);
8868 + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
8871 + for (i = 0; i < count; i++) {
8873 + e1000_raise_eec_clk(hw, &eecd);
8875 + eecd = E1000_READ_REG(hw, E1000_EECD);
8877 + eecd &= ~E1000_EECD_DI;
8878 + if (eecd & E1000_EECD_DO)
8881 + e1000_lower_eec_clk(hw, &eecd);
8888 + * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
8889 + * @hw: pointer to the HW structure
8890 + * @ee_reg: EEPROM flag for polling
8892 + * Polls the EEPROM status bit for either read or write completion based
8893 + * upon the value of 'ee_reg'.
8895 +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
8897 + u32 attempts = 100000;
8899 + s32 ret_val = -E1000_ERR_NVM;
8901 + DEBUGFUNC("e1000_poll_eerd_eewr_done");
8903 + for (i = 0; i < attempts; i++) {
8904 + if (ee_reg == E1000_NVM_POLL_READ)
8905 + reg = E1000_READ_REG(hw, E1000_EERD);
8907 + reg = E1000_READ_REG(hw, E1000_EEWR);
8909 + if (reg & E1000_NVM_RW_REG_DONE) {
8910 + ret_val = E1000_SUCCESS;
8921 + * e1000_acquire_nvm_generic - Generic request for access to EEPROM
8922 + * @hw: pointer to the HW structure
8924 + * Set the EEPROM access request bit and wait for EEPROM access grant bit.
8925 + * Return successful if access grant bit set, else clear the request for
8926 + * EEPROM access and return -E1000_ERR_NVM (-1).
8928 +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
8930 + u32 eecd = E1000_READ_REG(hw, E1000_EECD);
8931 + s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
8932 + s32 ret_val = E1000_SUCCESS;
8934 + DEBUGFUNC("e1000_acquire_nvm_generic");
8936 + E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
8937 + eecd = E1000_READ_REG(hw, E1000_EECD);
8940 + if (eecd & E1000_EECD_GNT)
8943 + eecd = E1000_READ_REG(hw, E1000_EECD);
8948 + eecd &= ~E1000_EECD_REQ;
8949 + E1000_WRITE_REG(hw, E1000_EECD, eecd);
8950 + DEBUGOUT("Could not acquire NVM grant\n");
8951 + ret_val = -E1000_ERR_NVM;
8958 + * e1000_standby_nvm - Return EEPROM to standby state
8959 + * @hw: pointer to the HW structure
8961 + * Return the EEPROM to a standby state.
8963 +static void e1000_standby_nvm(struct e1000_hw *hw)
8965 + struct e1000_nvm_info *nvm = &hw->nvm;
8966 + u32 eecd = E1000_READ_REG(hw, E1000_EECD);
8968 + DEBUGFUNC("e1000_standby_nvm");
8970 + if (nvm->type == e1000_nvm_eeprom_spi) {
8971 + /* Toggle CS to flush commands */
8972 + eecd |= E1000_EECD_CS;
8973 + E1000_WRITE_REG(hw, E1000_EECD, eecd);
8974 + E1000_WRITE_FLUSH(hw);
8975 + usec_delay(nvm->delay_usec);
8976 + eecd &= ~E1000_EECD_CS;
8977 + E1000_WRITE_REG(hw, E1000_EECD, eecd);
8978 + E1000_WRITE_FLUSH(hw);
8979 + usec_delay(nvm->delay_usec);
8984 + * e1000_stop_nvm - Terminate EEPROM command
8985 + * @hw: pointer to the HW structure
8987 + * Terminates the current command by inverting the EEPROM's chip select pin.
8989 +static void e1000_stop_nvm(struct e1000_hw *hw)
8993 + DEBUGFUNC("e1000_stop_nvm");
8995 + eecd = E1000_READ_REG(hw, E1000_EECD);
8996 + if (hw->nvm.type == e1000_nvm_eeprom_spi) {
8997 + /* Pull CS high */
8998 + eecd |= E1000_EECD_CS;
8999 + e1000_lower_eec_clk(hw, &eecd);
9004 + * e1000_release_nvm_generic - Release exclusive access to EEPROM
9005 + * @hw: pointer to the HW structure
9007 + * Stop any current commands to the EEPROM and clear the EEPROM request bit.
9009 +void e1000_release_nvm_generic(struct e1000_hw *hw)
9013 + DEBUGFUNC("e1000_release_nvm_generic");
9015 + e1000_stop_nvm(hw);
9017 + eecd = E1000_READ_REG(hw, E1000_EECD);
9018 + eecd &= ~E1000_EECD_REQ;
9019 + E1000_WRITE_REG(hw, E1000_EECD, eecd);
9023 + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
9024 + * @hw: pointer to the HW structure
9026 + * Setups the EEPROM for reading and writing.
9028 +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
9030 + struct e1000_nvm_info *nvm = &hw->nvm;
9031 + u32 eecd = E1000_READ_REG(hw, E1000_EECD);
9032 + s32 ret_val = E1000_SUCCESS;
9036 + DEBUGFUNC("e1000_ready_nvm_eeprom");
9038 + if (nvm->type == e1000_nvm_eeprom_spi) {
9039 + /* Clear SK and CS */
9040 + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
9041 + E1000_WRITE_REG(hw, E1000_EECD, eecd);
9043 + timeout = NVM_MAX_RETRY_SPI;
9046 + * Read "Status Register" repeatedly until the LSB is cleared.
9047 + * The EEPROM will signal that the command has been completed
9048 + * by clearing bit 0 of the internal status register. If it's
9049 + * not cleared within 'timeout', then error out.
9052 + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
9053 + hw->nvm.opcode_bits);
9054 + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
9055 + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
9059 + e1000_standby_nvm(hw);
9064 + DEBUGOUT("SPI NVM Status error\n");
9065 + ret_val = -E1000_ERR_NVM;
9075 + * e1000_read_nvm_eerd - Reads EEPROM using EERD register
9076 + * @hw: pointer to the HW structure
9077 + * @offset: offset of word in the EEPROM to read
9078 + * @words: number of words to read
9079 + * @data: word read from the EEPROM
9081 + * Reads a 16 bit word from the EEPROM using the EERD register.
9083 +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
9085 + struct e1000_nvm_info *nvm = &hw->nvm;
9087 + s32 ret_val = E1000_SUCCESS;
9089 + DEBUGFUNC("e1000_read_nvm_eerd");
9092 + * A check for invalid values: offset too large, too many words,
9093 + * too many words for the offset, and not enough words.
9095 + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
9097 + DEBUGOUT("nvm parameter(s) out of bounds\n");
9098 + ret_val = -E1000_ERR_NVM;
9102 + for (i = 0; i < words; i++) {
9103 + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
9104 + E1000_NVM_RW_REG_START;
9106 + E1000_WRITE_REG(hw, E1000_EERD, eerd);
9107 + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
9111 + data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
9112 + E1000_NVM_RW_REG_DATA);
9120 + * e1000_write_nvm_spi - Write to EEPROM using SPI
9121 + * @hw: pointer to the HW structure
9122 + * @offset: offset within the EEPROM to be written to
9123 + * @words: number of words to write
9124 + * @data: 16 bit word(s) to be written to the EEPROM
9126 + * Writes data to EEPROM at offset using SPI interface.
9128 + * If e1000_update_nvm_checksum is not called after this function , the
9129 + * EEPROM will most likely contain an invalid checksum.
9131 +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
9133 + struct e1000_nvm_info *nvm = &hw->nvm;
9137 + DEBUGFUNC("e1000_write_nvm_spi");
9140 + * A check for invalid values: offset too large, too many words,
9141 + * and not enough words.
9143 + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
9145 + DEBUGOUT("nvm parameter(s) out of bounds\n");
9146 + ret_val = -E1000_ERR_NVM;
9150 + ret_val = nvm->ops.acquire(hw);
9154 + while (widx < words) {
9155 + u8 write_opcode = NVM_WRITE_OPCODE_SPI;
9157 + ret_val = e1000_ready_nvm_eeprom(hw);
9161 + e1000_standby_nvm(hw);
9163 + /* Send the WRITE ENABLE command (8 bit opcode) */
9164 + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
9165 + nvm->opcode_bits);
9167 + e1000_standby_nvm(hw);
9170 + * Some SPI eeproms use the 8th address bit embedded in the
9173 + if ((nvm->address_bits == 8) && (offset >= 128))
9174 + write_opcode |= NVM_A8_OPCODE_SPI;
9176 + /* Send the Write command (8-bit opcode + addr) */
9177 + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
9178 + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
9179 + nvm->address_bits);
9181 + /* Loop to allow for up to whole page write of eeprom */
9182 + while (widx < words) {
9183 + u16 word_out = data[widx];
9184 + word_out = (word_out >> 8) | (word_out << 8);
9185 + e1000_shift_out_eec_bits(hw, word_out, 16);
9188 + if ((((offset + widx) * 2) % nvm->page_size) == 0) {
9189 + e1000_standby_nvm(hw);
9197 + nvm->ops.release(hw);
9204 + * e1000_read_pba_num_generic - Read device part number
9205 + * @hw: pointer to the HW structure
9206 + * @pba_num: pointer to device part number
9208 + * Reads the product board assembly (PBA) number from the EEPROM and stores
9209 + * the value in pba_num.
9211 +s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
9216 + DEBUGFUNC("e1000_read_pba_num_generic");
9218 + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
9220 + DEBUGOUT("NVM Read Error\n");
9223 + *pba_num = (u32)(nvm_data << 16);
9225 + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
9227 + DEBUGOUT("NVM Read Error\n");
9230 + *pba_num |= nvm_data;
9237 + * e1000_read_mac_addr_generic - Read device MAC address
9238 + * @hw: pointer to the HW structure
9240 + * Reads the device MAC address from the EEPROM and stores the value.
9241 + * Since devices with two ports use the same EEPROM, we increment the
9242 + * last bit in the MAC address for the second port.
9244 +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
9250 + rar_high = E1000_READ_REG(hw, E1000_RAH(0));
9251 + rar_low = E1000_READ_REG(hw, E1000_RAL(0));
9253 + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
9254 + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
9256 + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
9257 + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
9259 + for (i = 0; i < ETH_ADDR_LEN; i++)
9260 + hw->mac.addr[i] = hw->mac.perm_addr[i];
9262 + return E1000_SUCCESS;
9266 + * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
9267 + * @hw: pointer to the HW structure
9269 + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
9270 + * and then verifies that the sum of the EEPROM is equal to 0xBABA.
9272 +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
9274 + s32 ret_val = E1000_SUCCESS;
9278 + DEBUGFUNC("e1000_validate_nvm_checksum_generic");
9280 + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
9281 + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
9283 + DEBUGOUT("NVM Read Error\n");
9286 + checksum += nvm_data;
9289 + if (checksum != (u16) NVM_SUM) {
9290 + DEBUGOUT("NVM Checksum Invalid\n");
9291 + ret_val = -E1000_ERR_NVM;
9300 + * e1000_update_nvm_checksum_generic - Update EEPROM checksum
9301 + * @hw: pointer to the HW structure
9303 + * Updates the EEPROM checksum by reading/adding each word of the EEPROM
9304 + * up to the checksum. Then calculates the EEPROM checksum and writes the
9305 + * value to the EEPROM.
9307 +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
9313 + DEBUGFUNC("e1000_update_nvm_checksum");
9315 + for (i = 0; i < NVM_CHECKSUM_REG; i++) {
9316 + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
9318 + DEBUGOUT("NVM Read Error while updating checksum.\n");
9321 + checksum += nvm_data;
9323 + checksum = (u16) NVM_SUM - checksum;
9324 + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
9326 + DEBUGOUT("NVM Write Error while updating checksum.\n");
9333 + * e1000_reload_nvm_generic - Reloads EEPROM
9334 + * @hw: pointer to the HW structure
9336 + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
9337 + * extended control register.
9339 +static void e1000_reload_nvm_generic(struct e1000_hw *hw)
9343 + DEBUGFUNC("e1000_reload_nvm_generic");
9346 + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
9347 + ctrl_ext |= E1000_CTRL_EXT_EE_RST;
9348 + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
9349 + E1000_WRITE_FLUSH(hw);
9352 Index: linux-2.6.22/drivers/net/igb/e1000_nvm.h
9353 ===================================================================
9354 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
9355 +++ linux-2.6.22/drivers/net/igb/e1000_nvm.h 2009-12-18 12:39:22.000000000 -0500
9357 +/*******************************************************************************
9359 + Intel(R) Gigabit Ethernet Linux driver
9360 + Copyright(c) 2007-2009 Intel Corporation.
9362 + This program is free software; you can redistribute it and/or modify it
9363 + under the terms and conditions of the GNU General Public License,
9364 + version 2, as published by the Free Software Foundation.
9366 + This program is distributed in the hope it will be useful, but WITHOUT
9367 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9368 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9371 + You should have received a copy of the GNU General Public License along with
9372 + this program; if not, write to the Free Software Foundation, Inc.,
9373 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9375 + The full GNU General Public License is included in this distribution in
9376 + the file called "COPYING".
9378 + Contact Information:
9379 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9380 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9382 +*******************************************************************************/
9384 +#ifndef _E1000_NVM_H_
9385 +#define _E1000_NVM_H_
9387 +void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
9388 +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
9390 +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
9391 +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
9392 +s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num);
9393 +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
9395 +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
9396 +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
9397 +s32 e1000_write_nvm_eewr(struct e1000_hw *hw, u16 offset,
9398 + u16 words, u16 *data);
9399 +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
9401 +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
9402 +void e1000_release_nvm_generic(struct e1000_hw *hw);
9404 +#define E1000_STM_OPCODE 0xDB00
9407 Index: linux-2.6.22/drivers/net/igb/e1000_osdep.h
9408 ===================================================================
9409 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
9410 +++ linux-2.6.22/drivers/net/igb/e1000_osdep.h 2009-12-18 12:39:22.000000000 -0500
9412 +/*******************************************************************************
9414 + Intel(R) Gigabit Ethernet Linux driver
9415 + Copyright(c) 2007-2009 Intel Corporation.
9417 + This program is free software; you can redistribute it and/or modify it
9418 + under the terms and conditions of the GNU General Public License,
9419 + version 2, as published by the Free Software Foundation.
9421 + This program is distributed in the hope it will be useful, but WITHOUT
9422 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9423 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9426 + You should have received a copy of the GNU General Public License along with
9427 + this program; if not, write to the Free Software Foundation, Inc.,
9428 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9430 + The full GNU General Public License is included in this distribution in
9431 + the file called "COPYING".
9433 + Contact Information:
9434 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9435 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9437 +*******************************************************************************/
9440 +/* glue for the OS independent part of e1000
9441 + * includes register access macros
9444 +#ifndef _E1000_OSDEP_H_
9445 +#define _E1000_OSDEP_H_
9447 +#include <linux/pci.h>
9448 +#include <linux/delay.h>
9449 +#include <linux/interrupt.h>
9450 +#include <linux/if_ether.h>
9451 +#include <linux/sched.h>
9452 +#include "kcompat.h"
9454 +#define usec_delay(x) udelay(x)
9456 +#define msec_delay(x) do { \
9457 + /* Don't mdelay in interrupt context! */ \
9458 + if (in_interrupt()) \
9464 +/* Some workarounds require millisecond delays and are run during interrupt
9465 + * context. Most notably, when establishing link, the phy may need tweaking
9466 + * but cannot process phy register reads/writes faster than millisecond
9467 + * intervals...and we establish link due to a "link status change" interrupt.
9469 +#define msec_delay_irq(x) mdelay(x)
9472 +#define PCI_COMMAND_REGISTER PCI_COMMAND
9473 +#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
9474 +#define ETH_ADDR_LEN ETH_ALEN
9476 +#ifdef __BIG_ENDIAN
9477 +#define E1000_BIG_ENDIAN __BIG_ENDIAN
9481 +#define DEBUGOUT(S)
9482 +#define DEBUGOUT1(S, A...)
9484 +#define DEBUGFUNC(F) DEBUGOUT(F "\n")
9485 +#define DEBUGOUT2 DEBUGOUT1
9486 +#define DEBUGOUT3 DEBUGOUT2
9487 +#define DEBUGOUT7 DEBUGOUT3
9489 +#define E1000_REGISTER(a, reg) reg
9491 +#define E1000_WRITE_REG(a, reg, value) ( \
9492 + writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg))))
9494 +#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg)))
9496 +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
9497 + writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))))
9499 +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
9500 + readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))
9502 +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
9503 +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
9505 +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
9506 + writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))))
9508 +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
9509 + readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
9511 +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
9512 + writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
9514 +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
9515 + readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
9517 +#define E1000_WRITE_REG_IO(a, reg, offset) do { \
9518 + outl(reg, ((a)->io_base)); \
9519 + outl(offset, ((a)->io_base + 4)); } while (0)
9521 +#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
9523 +#define E1000_WRITE_FLASH_REG(a, reg, value) ( \
9524 + writel((value), ((a)->flash_address + reg)))
9526 +#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
9527 + writew((value), ((a)->flash_address + reg)))
9529 +#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
9531 +#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
9533 +#endif /* _E1000_OSDEP_H_ */
9534 Index: linux-2.6.22/drivers/net/igb/e1000_phy.c
9535 ===================================================================
9536 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
9537 +++ linux-2.6.22/drivers/net/igb/e1000_phy.c 2009-12-18 12:39:22.000000000 -0500
9539 +/*******************************************************************************
9541 + Intel(R) Gigabit Ethernet Linux driver
9542 + Copyright(c) 2007-2009 Intel Corporation.
9544 + This program is free software; you can redistribute it and/or modify it
9545 + under the terms and conditions of the GNU General Public License,
9546 + version 2, as published by the Free Software Foundation.
9548 + This program is distributed in the hope it will be useful, but WITHOUT
9549 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9550 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9553 + You should have received a copy of the GNU General Public License along with
9554 + this program; if not, write to the Free Software Foundation, Inc.,
9555 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9557 + The full GNU General Public License is included in this distribution in
9558 + the file called "COPYING".
9560 + Contact Information:
9561 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9562 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9564 +*******************************************************************************/
9566 +#include "e1000_api.h"
9568 +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
9569 +/* Cable length tables */
9570 +static const u16 e1000_m88_cable_length_table[] =
9571 + { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
9572 +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
9573 + (sizeof(e1000_m88_cable_length_table) / \
9574 + sizeof(e1000_m88_cable_length_table[0]))
9576 +static const u16 e1000_igp_2_cable_length_table[] =
9577 + { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
9578 + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
9579 + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
9580 + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
9581 + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
9582 + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
9583 + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
9584 + 104, 109, 114, 118, 121, 124};
9585 +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
9586 + (sizeof(e1000_igp_2_cable_length_table) / \
9587 + sizeof(e1000_igp_2_cable_length_table[0]))
9590 + * e1000_check_reset_block_generic - Check if PHY reset is blocked
9591 + * @hw: pointer to the HW structure
9593 + * Read the PHY management control register and check whether a PHY reset
9594 + * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise
9595 + * return E1000_BLK_PHY_RESET (12).
9597 +s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
9601 + DEBUGFUNC("e1000_check_reset_block");
9603 + manc = E1000_READ_REG(hw, E1000_MANC);
9605 + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
9606 + E1000_BLK_PHY_RESET : E1000_SUCCESS;
9610 + * e1000_get_phy_id - Retrieve the PHY ID and revision
9611 + * @hw: pointer to the HW structure
9613 + * Reads the PHY registers and stores the PHY ID and possibly the PHY
9614 + * revision in the hardware structure.
9616 +s32 e1000_get_phy_id(struct e1000_hw *hw)
9618 + struct e1000_phy_info *phy = &hw->phy;
9619 + s32 ret_val = E1000_SUCCESS;
9622 + DEBUGFUNC("e1000_get_phy_id");
9624 + if (!(phy->ops.read_reg))
9627 + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
9631 + phy->id = (u32)(phy_id << 16);
9633 + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
9637 + phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
9638 + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
9645 + * e1000_phy_reset_dsp_generic - Reset PHY DSP
9646 + * @hw: pointer to the HW structure
9648 + * Reset the digital signal processor.
9650 +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
9652 + s32 ret_val = E1000_SUCCESS;
9654 + DEBUGFUNC("e1000_phy_reset_dsp_generic");
9656 + if (!(hw->phy.ops.write_reg))
9659 + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
9663 + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
9670 + * e1000_read_phy_reg_mdic - Read MDI control register
9671 + * @hw: pointer to the HW structure
9672 + * @offset: register offset to be read
9673 + * @data: pointer to the read data
9675 + * Reads the MDI control register in the PHY at offset and stores the
9676 + * information read to data.
9678 +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
9680 + struct e1000_phy_info *phy = &hw->phy;
9682 + s32 ret_val = E1000_SUCCESS;
9684 + DEBUGFUNC("e1000_read_phy_reg_mdic");
9687 + * Set up Op-code, Phy Address, and register offset in the MDI
9688 + * Control register. The MAC will take care of interfacing with the
9689 + * PHY to retrieve the desired data.
9691 + mdic = ((offset << E1000_MDIC_REG_SHIFT) |
9692 + (phy->addr << E1000_MDIC_PHY_SHIFT) |
9693 + (E1000_MDIC_OP_READ));
9695 + E1000_WRITE_REG(hw, E1000_MDIC, mdic);
9698 + * Poll the ready bit to see if the MDI read completed
9699 + * Increasing the time out as testing showed failures with
9700 + * the lower time out
9702 + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
9704 + mdic = E1000_READ_REG(hw, E1000_MDIC);
9705 + if (mdic & E1000_MDIC_READY)
9708 + if (!(mdic & E1000_MDIC_READY)) {
9709 + DEBUGOUT("MDI Read did not complete\n");
9710 + ret_val = -E1000_ERR_PHY;
9713 + if (mdic & E1000_MDIC_ERROR) {
9714 + DEBUGOUT("MDI Error\n");
9715 + ret_val = -E1000_ERR_PHY;
9718 + *data = (u16) mdic;
9725 + * e1000_write_phy_reg_mdic - Write MDI control register
9726 + * @hw: pointer to the HW structure
9727 + * @offset: register offset to write to
9728 + * @data: data to write to register at offset
9730 + * Writes data to MDI control register in the PHY at offset.
9732 +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
9734 + struct e1000_phy_info *phy = &hw->phy;
9736 + s32 ret_val = E1000_SUCCESS;
9738 + DEBUGFUNC("e1000_write_phy_reg_mdic");
9741 + * Set up Op-code, Phy Address, and register offset in the MDI
9742 + * Control register. The MAC will take care of interfacing with the
9743 + * PHY to retrieve the desired data.
9745 + mdic = (((u32)data) |
9746 + (offset << E1000_MDIC_REG_SHIFT) |
9747 + (phy->addr << E1000_MDIC_PHY_SHIFT) |
9748 + (E1000_MDIC_OP_WRITE));
9750 + E1000_WRITE_REG(hw, E1000_MDIC, mdic);
9753 + * Poll the ready bit to see if the MDI read completed
9754 + * Increasing the time out as testing showed failures with
9755 + * the lower time out
9757 + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
9759 + mdic = E1000_READ_REG(hw, E1000_MDIC);
9760 + if (mdic & E1000_MDIC_READY)
9763 + if (!(mdic & E1000_MDIC_READY)) {
9764 + DEBUGOUT("MDI Write did not complete\n");
9765 + ret_val = -E1000_ERR_PHY;
9768 + if (mdic & E1000_MDIC_ERROR) {
9769 + DEBUGOUT("MDI Error\n");
9770 + ret_val = -E1000_ERR_PHY;
9779 + * e1000_read_phy_reg_i2c - Read PHY register using i2c
9780 + * @hw: pointer to the HW structure
9781 + * @offset: register offset to be read
9782 + * @data: pointer to the read data
9784 + * Reads the PHY register at offset using the i2c interface and stores the
9785 + * retrieved information in data.
9787 +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
9789 + struct e1000_phy_info *phy = &hw->phy;
9790 + u32 i, i2ccmd = 0;
9792 + DEBUGFUNC("e1000_read_phy_reg_i2c");
9795 + * Set up Op-code, Phy Address, and register address in the I2CCMD
9796 + * register. The MAC will take care of interfacing with the
9797 + * PHY to retrieve the desired data.
9799 + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
9800 + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
9801 + (E1000_I2CCMD_OPCODE_READ));
9803 + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
9805 + /* Poll the ready bit to see if the I2C read completed */
9806 + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
9808 + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
9809 + if (i2ccmd & E1000_I2CCMD_READY)
9812 + if (!(i2ccmd & E1000_I2CCMD_READY)) {
9813 + DEBUGOUT("I2CCMD Read did not complete\n");
9814 + return -E1000_ERR_PHY;
9816 + if (i2ccmd & E1000_I2CCMD_ERROR) {
9817 + DEBUGOUT("I2CCMD Error bit set\n");
9818 + return -E1000_ERR_PHY;
9821 + /* Need to byte-swap the 16-bit value. */
9822 + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
9824 + return E1000_SUCCESS;
9828 + * e1000_write_phy_reg_i2c - Write PHY register using i2c
9829 + * @hw: pointer to the HW structure
9830 + * @offset: register offset to write to
9831 + * @data: data to write at register offset
9833 + * Writes the data to PHY register at the offset using the i2c interface.
9835 +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
9837 + struct e1000_phy_info *phy = &hw->phy;
9838 + u32 i, i2ccmd = 0;
9839 + u16 phy_data_swapped;
9841 + DEBUGFUNC("e1000_write_phy_reg_i2c");
9843 + /* Swap the data bytes for the I2C interface */
9844 + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
9847 + * Set up Op-code, Phy Address, and register address in the I2CCMD
9848 + * register. The MAC will take care of interfacing with the
9849 + * PHY to retrieve the desired data.
9851 + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
9852 + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
9853 + E1000_I2CCMD_OPCODE_WRITE |
9854 + phy_data_swapped);
9856 + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
9858 + /* Poll the ready bit to see if the I2C read completed */
9859 + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
9861 + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
9862 + if (i2ccmd & E1000_I2CCMD_READY)
9865 + if (!(i2ccmd & E1000_I2CCMD_READY)) {
9866 + DEBUGOUT("I2CCMD Write did not complete\n");
9867 + return -E1000_ERR_PHY;
9869 + if (i2ccmd & E1000_I2CCMD_ERROR) {
9870 + DEBUGOUT("I2CCMD Error bit set\n");
9871 + return -E1000_ERR_PHY;
9874 + return E1000_SUCCESS;
9878 + * e1000_read_phy_reg_m88 - Read m88 PHY register
9879 + * @hw: pointer to the HW structure
9880 + * @offset: register offset to be read
9881 + * @data: pointer to the read data
9883 + * Acquires semaphore, if necessary, then reads the PHY register at offset
9884 + * and storing the retrieved information in data. Release any acquired
9885 + * semaphores before exiting.
9887 +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
9889 + s32 ret_val = E1000_SUCCESS;
9891 + DEBUGFUNC("e1000_read_phy_reg_m88");
9893 + if (!(hw->phy.ops.acquire))
9896 + ret_val = hw->phy.ops.acquire(hw);
9900 + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
9903 + hw->phy.ops.release(hw);
9910 + * e1000_write_phy_reg_m88 - Write m88 PHY register
9911 + * @hw: pointer to the HW structure
9912 + * @offset: register offset to write to
9913 + * @data: data to write at register offset
9915 + * Acquires semaphore, if necessary, then writes the data to PHY register
9916 + * at the offset. Release any acquired semaphores before exiting.
9918 +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
9920 + s32 ret_val = E1000_SUCCESS;
9922 + DEBUGFUNC("e1000_write_phy_reg_m88");
9924 + if (!(hw->phy.ops.acquire))
9927 + ret_val = hw->phy.ops.acquire(hw);
9931 + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
9934 + hw->phy.ops.release(hw);
9941 + * __e1000_read_phy_reg_igp - Read igp PHY register
9942 + * @hw: pointer to the HW structure
9943 + * @offset: register offset to be read
9944 + * @data: pointer to the read data
9945 + * @locked: semaphore has already been acquired or not
9947 + * Acquires semaphore, if necessary, then reads the PHY register at offset
9948 + * and stores the retrieved information in data. Release any acquired
9949 + * semaphores before exiting.
9951 +static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
9954 + s32 ret_val = E1000_SUCCESS;
9956 + DEBUGFUNC("__e1000_read_phy_reg_igp");
9959 + if (!(hw->phy.ops.acquire))
9962 + ret_val = hw->phy.ops.acquire(hw);
9967 + if (offset > MAX_PHY_MULTI_PAGE_REG) {
9968 + ret_val = e1000_write_phy_reg_mdic(hw,
9969 + IGP01E1000_PHY_PAGE_SELECT,
9975 + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
9980 + hw->phy.ops.release(hw);
9985 + * e1000_read_phy_reg_igp - Read igp PHY register
9986 + * @hw: pointer to the HW structure
9987 + * @offset: register offset to be read
9988 + * @data: pointer to the read data
9990 + * Acquires semaphore then reads the PHY register at offset and stores the
9991 + * retrieved information in data.
9992 + * Release the acquired semaphore before exiting.
9994 +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
9996 + return __e1000_read_phy_reg_igp(hw, offset, data, false);
10000 + * e1000_read_phy_reg_igp_locked - Read igp PHY register
10001 + * @hw: pointer to the HW structure
10002 + * @offset: register offset to be read
10003 + * @data: pointer to the read data
10005 + * Reads the PHY register at offset and stores the retrieved information
10006 + * in data. Assumes semaphore already acquired.
10008 +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
10010 + return __e1000_read_phy_reg_igp(hw, offset, data, true);
10014 + * e1000_write_phy_reg_igp - Write igp PHY register
10015 + * @hw: pointer to the HW structure
10016 + * @offset: register offset to write to
10017 + * @data: data to write at register offset
10018 + * @locked: semaphore has already been acquired or not
10020 + * Acquires semaphore, if necessary, then writes the data to PHY register
10021 + * at the offset. Release any acquired semaphores before exiting.
10023 +static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
10026 + s32 ret_val = E1000_SUCCESS;
10028 + DEBUGFUNC("e1000_write_phy_reg_igp");
10031 + if (!(hw->phy.ops.acquire))
10034 + ret_val = hw->phy.ops.acquire(hw);
10039 + if (offset > MAX_PHY_MULTI_PAGE_REG) {
10040 + ret_val = e1000_write_phy_reg_mdic(hw,
10041 + IGP01E1000_PHY_PAGE_SELECT,
10047 + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
10052 + hw->phy.ops.release(hw);
10059 + * e1000_write_phy_reg_igp - Write igp PHY register
10060 + * @hw: pointer to the HW structure
10061 + * @offset: register offset to write to
10062 + * @data: data to write at register offset
10064 + * Acquires semaphore then writes the data to PHY register
10065 + * at the offset. Release any acquired semaphores before exiting.
10067 +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
10069 + return __e1000_write_phy_reg_igp(hw, offset, data, false);
10073 + * e1000_write_phy_reg_igp_locked - Write igp PHY register
10074 + * @hw: pointer to the HW structure
10075 + * @offset: register offset to write to
10076 + * @data: data to write at register offset
10078 + * Writes the data to PHY register at the offset.
10079 + * Assumes semaphore already acquired.
10081 +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
10083 + return __e1000_write_phy_reg_igp(hw, offset, data, true);
10087 + * __e1000_read_kmrn_reg - Read kumeran register
10088 + * @hw: pointer to the HW structure
10089 + * @offset: register offset to be read
10090 + * @data: pointer to the read data
10091 + * @locked: semaphore has already been acquired or not
10093 + * Acquires semaphore, if necessary. Then reads the PHY register at offset
10094 + * using the kumeran interface. The information retrieved is stored in data.
10095 + * Release any acquired semaphores before exiting.
10097 +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
10101 + s32 ret_val = E1000_SUCCESS;
10103 + DEBUGFUNC("__e1000_read_kmrn_reg");
10106 + if (!(hw->phy.ops.acquire))
10109 + ret_val = hw->phy.ops.acquire(hw);
10114 + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
10115 + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
10116 + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
10120 + kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
10121 + *data = (u16)kmrnctrlsta;
10124 + hw->phy.ops.release(hw);
10131 + * e1000_read_kmrn_reg_generic - Read kumeran register
10132 + * @hw: pointer to the HW structure
10133 + * @offset: register offset to be read
10134 + * @data: pointer to the read data
10136 + * Acquires semaphore then reads the PHY register at offset using the
10137 + * kumeran interface. The information retrieved is stored in data.
10138 + * Release the acquired semaphore before exiting.
10140 +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
10142 + return __e1000_read_kmrn_reg(hw, offset, data, false);
10146 + * e1000_read_kmrn_reg_locked - Read kumeran register
10147 + * @hw: pointer to the HW structure
10148 + * @offset: register offset to be read
10149 + * @data: pointer to the read data
10151 + * Reads the PHY register at offset using the kumeran interface. The
10152 + * information retrieved is stored in data.
10153 + * Assumes semaphore already acquired.
10155 +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
10157 + return __e1000_read_kmrn_reg(hw, offset, data, true);
10161 + * __e1000_write_kmrn_reg - Write kumeran register
10162 + * @hw: pointer to the HW structure
10163 + * @offset: register offset to write to
10164 + * @data: data to write at register offset
10165 + * @locked: semaphore has already been acquired or not
10167 + * Acquires semaphore, if necessary. Then write the data to PHY register
10168 + * at the offset using the kumeran interface. Release any acquired semaphores
10169 + * before exiting.
10171 +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
10175 + s32 ret_val = E1000_SUCCESS;
10177 + DEBUGFUNC("e1000_write_kmrn_reg_generic");
10180 + if (!(hw->phy.ops.acquire))
10183 + ret_val = hw->phy.ops.acquire(hw);
10188 + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
10189 + E1000_KMRNCTRLSTA_OFFSET) | data;
10190 + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
10195 + hw->phy.ops.release(hw);
10202 + * e1000_write_kmrn_reg_generic - Write kumeran register
10203 + * @hw: pointer to the HW structure
10204 + * @offset: register offset to write to
10205 + * @data: data to write at register offset
10207 + * Acquires semaphore then writes the data to the PHY register at the offset
10208 + * using the kumeran interface. Release the acquired semaphore before exiting.
10210 +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
10212 + return __e1000_write_kmrn_reg(hw, offset, data, false);
10216 + * e1000_write_kmrn_reg_locked - Write kumeran register
10217 + * @hw: pointer to the HW structure
10218 + * @offset: register offset to write to
10219 + * @data: data to write at register offset
10221 + * Write the data to PHY register at the offset using the kumeran interface.
10222 + * Assumes semaphore already acquired.
10224 +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
10226 + return __e1000_write_kmrn_reg(hw, offset, data, true);
10230 + * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
10231 + * @hw: pointer to the HW structure
10233 + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
10234 + * and downshift values are set also.
10236 +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
10238 + struct e1000_phy_info *phy = &hw->phy;
10242 + DEBUGFUNC("e1000_copper_link_setup_m88");
10244 + if (phy->reset_disable) {
10245 + ret_val = E1000_SUCCESS;
10249 + /* Enable CRS on TX. This must be set for half-duplex operation. */
10250 + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
10254 + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
10258 + * MDI/MDI-X = 0 (default)
10259 + * 0 - Auto for all speeds
10262 + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
10264 + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
10266 + switch (phy->mdix) {
10268 + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
10271 + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
10274 + phy_data |= M88E1000_PSCR_AUTO_X_1000T;
10278 + phy_data |= M88E1000_PSCR_AUTO_X_MODE;
10284 + * disable_polarity_correction = 0 (default)
10285 + * Automatic Correction for Reversed Cable Polarity
10289 + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
10290 + if (phy->disable_polarity_correction == 1)
10291 + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
10293 + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
10297 + if (phy->revision < E1000_REVISION_4) {
10299 + * Force TX_CLK in the Extended PHY Specific Control Register
10300 + * to 25MHz clock.
10302 + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
10307 + phy_data |= M88E1000_EPSCR_TX_CLK_25;
10309 + if ((phy->revision == E1000_REVISION_2) &&
10310 + (phy->id == M88E1111_I_PHY_ID)) {
10311 + /* 82573L PHY - set the downshift counter to 5x. */
10312 + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
10313 + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
10315 + /* Configure Master and Slave downshift values */
10316 + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
10317 + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
10318 + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
10319 + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
10321 + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
10327 + /* Commit the changes. */
10328 + ret_val = phy->ops.commit(hw);
10330 + DEBUGOUT("Error committing the PHY changes\n");
10339 + * e1000_copper_link_setup_igp - Setup igp PHY's for copper link
10340 + * @hw: pointer to the HW structure
10342 + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
10345 +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
10347 + struct e1000_phy_info *phy = &hw->phy;
10351 + DEBUGFUNC("e1000_copper_link_setup_igp");
10353 + if (phy->reset_disable) {
10354 + ret_val = E1000_SUCCESS;
10358 + ret_val = hw->phy.ops.reset(hw);
10360 + DEBUGOUT("Error resetting the PHY.\n");
10365 + * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
10366 + * timeout issues when LFS is enabled.
10371 + * The NVM settings will configure LPLU in D3 for
10374 + if (phy->type == e1000_phy_igp) {
10375 + /* disable lplu d3 during driver init */
10376 + ret_val = hw->phy.ops.set_d3_lplu_state(hw, false);
10378 + DEBUGOUT("Error Disabling LPLU D3\n");
10383 + /* disable lplu d0 during driver init */
10384 + if (hw->phy.ops.set_d0_lplu_state) {
10385 + ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
10387 + DEBUGOUT("Error Disabling LPLU D0\n");
10391 + /* Configure mdi-mdix settings */
10392 + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
10396 + data &= ~IGP01E1000_PSCR_AUTO_MDIX;
10398 + switch (phy->mdix) {
10400 + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
10403 + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
10407 + data |= IGP01E1000_PSCR_AUTO_MDIX;
10410 + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
10414 + /* set auto-master slave resolution settings */
10415 + if (hw->mac.autoneg) {
10417 + * when autonegotiation advertisement is only 1000Mbps then we
10418 + * should disable SmartSpeed and enable Auto MasterSlave
10419 + * resolution as hardware default.
10421 + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
10422 + /* Disable SmartSpeed */
10423 + ret_val = phy->ops.read_reg(hw,
10424 + IGP01E1000_PHY_PORT_CONFIG,
10429 + data &= ~IGP01E1000_PSCFR_SMART_SPEED;
10430 + ret_val = phy->ops.write_reg(hw,
10431 + IGP01E1000_PHY_PORT_CONFIG,
10436 + /* Set auto Master/Slave resolution process */
10437 + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
10441 + data &= ~CR_1000T_MS_ENABLE;
10442 + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
10447 + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
10451 + /* load defaults for future use */
10452 + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
10453 + ((data & CR_1000T_MS_VALUE) ?
10454 + e1000_ms_force_master :
10455 + e1000_ms_force_slave) :
10458 + switch (phy->ms_type) {
10459 + case e1000_ms_force_master:
10460 + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
10462 + case e1000_ms_force_slave:
10463 + data |= CR_1000T_MS_ENABLE;
10464 + data &= ~(CR_1000T_MS_VALUE);
10466 + case e1000_ms_auto:
10467 + data &= ~CR_1000T_MS_ENABLE;
10471 + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
10481 + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
10482 + * @hw: pointer to the HW structure
10484 + * Performs initial bounds checking on autoneg advertisement parameter, then
10485 + * configure to advertise the full capability. Setup the PHY to autoneg
10486 + * and restart the negotiation process between the link partner. If
10487 + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
10489 +s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
10491 + struct e1000_phy_info *phy = &hw->phy;
10495 + DEBUGFUNC("e1000_copper_link_autoneg");
10498 + * Perform some bounds checking on the autoneg advertisement
10501 + phy->autoneg_advertised &= phy->autoneg_mask;
10504 + * If autoneg_advertised is zero, we assume it was not defaulted
10505 + * by the calling code so we set to advertise full capability.
10507 + if (phy->autoneg_advertised == 0)
10508 + phy->autoneg_advertised = phy->autoneg_mask;
10510 + DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
10511 + ret_val = e1000_phy_setup_autoneg(hw);
10513 + DEBUGOUT("Error Setting up Auto-Negotiation\n");
10516 + DEBUGOUT("Restarting Auto-Neg\n");
10519 + * Restart auto-negotiation by setting the Auto Neg Enable bit and
10520 + * the Auto Neg Restart bit in the PHY control register.
10522 + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
10526 + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
10527 + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
10532 + * Does the user want to wait for Auto-Neg to complete here, or
10533 + * check at a later time (for example, callback routine).
10535 + if (phy->autoneg_wait_to_complete) {
10536 + ret_val = hw->mac.ops.wait_autoneg(hw);
10538 + DEBUGOUT("Error while waiting for "
10539 + "autoneg to complete\n");
10544 + hw->mac.get_link_status = true;
10551 + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
10552 + * @hw: pointer to the HW structure
10554 + * Reads the MII auto-neg advertisement register and/or the 1000T control
10555 + * register and if the PHY is already setup for auto-negotiation, then
10556 + * return successful. Otherwise, setup advertisement and flow control to
10557 + * the appropriate values for the wanted auto-negotiation.
10559 +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
10561 + struct e1000_phy_info *phy = &hw->phy;
10563 + u16 mii_autoneg_adv_reg;
10564 + u16 mii_1000t_ctrl_reg = 0;
10566 + DEBUGFUNC("e1000_phy_setup_autoneg");
10568 + phy->autoneg_advertised &= phy->autoneg_mask;
10570 + /* Read the MII Auto-Neg Advertisement Register (Address 4). */
10571 + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
10575 + if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
10576 + /* Read the MII 1000Base-T Control Register (Address 9). */
10577 + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
10578 + &mii_1000t_ctrl_reg);
10584 + * Need to parse both autoneg_advertised and fc and set up
10585 + * the appropriate PHY registers. First we will parse for
10586 + * autoneg_advertised software override. Since we can advertise
10587 + * a plethora of combinations, we need to check each bit
10592 + * First we clear all the 10/100 mb speed bits in the Auto-Neg
10593 + * Advertisement Register (Address 4) and the 1000 mb speed bits in
10594 + * the 1000Base-T Control Register (Address 9).
10596 + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
10597 + NWAY_AR_100TX_HD_CAPS |
10598 + NWAY_AR_10T_FD_CAPS |
10599 + NWAY_AR_10T_HD_CAPS);
10600 + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
10602 + DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
10604 + /* Do we want to advertise 10 Mb Half Duplex? */
10605 + if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
10606 + DEBUGOUT("Advertise 10mb Half duplex\n");
10607 + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
10610 + /* Do we want to advertise 10 Mb Full Duplex? */
10611 + if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
10612 + DEBUGOUT("Advertise 10mb Full duplex\n");
10613 + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
10616 + /* Do we want to advertise 100 Mb Half Duplex? */
10617 + if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
10618 + DEBUGOUT("Advertise 100mb Half duplex\n");
10619 + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
10622 + /* Do we want to advertise 100 Mb Full Duplex? */
10623 + if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
10624 + DEBUGOUT("Advertise 100mb Full duplex\n");
10625 + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
10628 + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
10629 + if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
10630 + DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
10632 + /* Do we want to advertise 1000 Mb Full Duplex? */
10633 + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
10634 + DEBUGOUT("Advertise 1000mb Full duplex\n");
10635 + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
10639 + * Check for a software override of the flow control settings, and
10640 + * setup the PHY advertisement registers accordingly. If
10641 + * auto-negotiation is enabled, then software will have to set the
10642 + * "PAUSE" bits to the correct value in the Auto-Negotiation
10643 + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
10646 + * The possible values of the "fc" parameter are:
10647 + * 0: Flow control is completely disabled
10648 + * 1: Rx flow control is enabled (we can receive pause frames
10649 + * but not send pause frames).
10650 + * 2: Tx flow control is enabled (we can send pause frames
10651 + * but we do not support receiving pause frames).
10652 + * 3: Both Rx and Tx flow control (symmetric) are enabled.
10653 + * other: No software override. The flow control configuration
10654 + * in the EEPROM is used.
10656 + switch (hw->fc.current_mode) {
10657 + case e1000_fc_none:
10659 + * Flow control (Rx & Tx) is completely disabled by a
10660 + * software over-ride.
10662 + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
10664 + case e1000_fc_rx_pause:
10666 + * Rx Flow control is enabled, and Tx Flow control is
10667 + * disabled, by a software over-ride.
10669 + * Since there really isn't a way to advertise that we are
10670 + * capable of Rx Pause ONLY, we will advertise that we
10671 + * support both symmetric and asymmetric Rx PAUSE. Later
10672 + * (in e1000_config_fc_after_link_up) we will disable the
10673 + * hw's ability to send PAUSE frames.
10675 + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
10677 + case e1000_fc_tx_pause:
10679 + * Tx Flow control is enabled, and Rx Flow control is
10680 + * disabled, by a software over-ride.
10682 + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
10683 + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
10685 + case e1000_fc_full:
10687 + * Flow control (both Rx and Tx) is enabled by a software
10690 + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
10693 + DEBUGOUT("Flow control param set incorrectly\n");
10694 + ret_val = -E1000_ERR_CONFIG;
10698 + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
10702 + DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
10704 + if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
10705 + ret_val = phy->ops.write_reg(hw,
10707 + mii_1000t_ctrl_reg);
10717 + * e1000_setup_copper_link_generic - Configure copper link settings
10718 + * @hw: pointer to the HW structure
10720 + * Calls the appropriate function to configure the link for auto-neg or forced
10721 + * speed and duplex. Then we check for link, once link is established calls
10722 + * to configure collision distance and flow control are called. If link is
10723 + * not established, we return -E1000_ERR_PHY (-2).
10725 +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
10730 + DEBUGFUNC("e1000_setup_copper_link_generic");
10732 + if (hw->mac.autoneg) {
10734 + * Setup autoneg and flow control advertisement and perform
10735 + * autonegotiation.
10737 + ret_val = e1000_copper_link_autoneg(hw);
10742 + * PHY will be set to 10H, 10F, 100H or 100F
10743 + * depending on user settings.
10745 + DEBUGOUT("Forcing Speed and Duplex\n");
10746 + ret_val = hw->phy.ops.force_speed_duplex(hw);
10748 + DEBUGOUT("Error Forcing Speed and Duplex\n");
10754 + * Check link status. Wait up to 100 microseconds for link to become
10757 + ret_val = e1000_phy_has_link_generic(hw,
10758 + COPPER_LINK_UP_LIMIT,
10765 + DEBUGOUT("Valid link established!!!\n");
10766 + e1000_config_collision_dist_generic(hw);
10767 + ret_val = e1000_config_fc_after_link_up_generic(hw);
10769 + DEBUGOUT("Unable to establish link!!!\n");
10777 + * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
10778 + * @hw: pointer to the HW structure
10780 + * Calls the PHY setup function to force speed and duplex. Clears the
10781 + * auto-crossover to force MDI manually. Waits for link and returns
10782 + * successful if link up is successful, else -E1000_ERR_PHY (-2).
10784 +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
10786 + struct e1000_phy_info *phy = &hw->phy;
10791 + DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
10793 + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
10797 + e1000_phy_force_speed_duplex_setup(hw, &phy_data);
10799 + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
10804 + * Clear Auto-Crossover to force MDI manually. IGP requires MDI
10805 + * forced whenever speed and duplex are forced.
10807 + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
10811 + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
10812 + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
10814 + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
10818 + DEBUGOUT1("IGP PSCR: %X\n", phy_data);
10822 + if (phy->autoneg_wait_to_complete) {
10823 + DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
10825 + ret_val = e1000_phy_has_link_generic(hw,
10833 + DEBUGOUT("Link taking longer than expected.\n");
10835 + /* Try once more */
10836 + ret_val = e1000_phy_has_link_generic(hw,
10849 + * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
10850 + * @hw: pointer to the HW structure
10852 + * Calls the PHY setup function to force speed and duplex. Clears the
10853 + * auto-crossover to force MDI manually. Resets the PHY to commit the
10854 + * changes. If time expires while waiting for link up, we reset the DSP.
10855 + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
10856 + * successful completion, else return corresponding error code.
10858 +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
10860 + struct e1000_phy_info *phy = &hw->phy;
10865 + DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
10868 + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
10869 + * forced whenever speed and duplex are forced.
10871 + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
10875 + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
10876 + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
10880 + DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
10882 + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
10886 + e1000_phy_force_speed_duplex_setup(hw, &phy_data);
10888 + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
10892 + /* Reset the phy to commit changes. */
10893 + ret_val = hw->phy.ops.commit(hw);
10897 + if (phy->autoneg_wait_to_complete) {
10898 + DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
10900 + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
10907 + * We didn't get link.
10908 + * Reset the DSP and cross our fingers.
10910 + ret_val = phy->ops.write_reg(hw,
10911 + M88E1000_PHY_PAGE_SELECT,
10915 + ret_val = e1000_phy_reset_dsp_generic(hw);
10920 + /* Try once more */
10921 + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
10927 + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
10932 + * Resetting the phy means we need to re-force TX_CLK in the
10933 + * Extended PHY Specific Control Register to 25MHz clock from
10934 + * the reset value of 2.5MHz.
10936 + phy_data |= M88E1000_EPSCR_TX_CLK_25;
10937 + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
10942 + * In addition, we must re-enable CRS on Tx for both half and full
10945 + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
10949 + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
10950 + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
10957 + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
10958 + * @hw: pointer to the HW structure
10960 + * Forces the speed and duplex settings of the PHY.
10961 + * This is a function pointer entry point only called by
10962 + * PHY setup routines.
10964 +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
10966 + struct e1000_phy_info *phy = &hw->phy;
10971 + DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
10973 + if (phy->type != e1000_phy_ife) {
10974 + ret_val = e1000_phy_force_speed_duplex_igp(hw);
10978 + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
10982 + e1000_phy_force_speed_duplex_setup(hw, &data);
10984 + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
10988 + /* Disable MDI-X support for 10/100 */
10989 + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
10993 + data &= ~IFE_PMC_AUTO_MDIX;
10994 + data &= ~IFE_PMC_FORCE_MDIX;
10996 + ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
11000 + DEBUGOUT1("IFE PMC: %X\n", data);
11004 + if (phy->autoneg_wait_to_complete) {
11005 + DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
11007 + ret_val = e1000_phy_has_link_generic(hw,
11015 + DEBUGOUT("Link taking longer than expected.\n");
11017 + /* Try once more */
11018 + ret_val = e1000_phy_has_link_generic(hw,
11031 + * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
11032 + * @hw: pointer to the HW structure
11033 + * @phy_ctrl: pointer to current value of PHY_CONTROL
11035 + * Forces speed and duplex on the PHY by doing the following: disable flow
11036 + * control, force speed/duplex on the MAC, disable auto speed detection,
11037 + * disable auto-negotiation, configure duplex, configure speed, configure
11038 + * the collision distance, write configuration to CTRL register. The
11039 + * caller must write to the PHY_CONTROL register for these settings to
11042 +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
11044 + struct e1000_mac_info *mac = &hw->mac;
11047 + DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
11049 + /* Turn off flow control when forcing speed/duplex */
11050 + hw->fc.current_mode = e1000_fc_none;
11052 + /* Force speed/duplex on the mac */
11053 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
11054 + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
11055 + ctrl &= ~E1000_CTRL_SPD_SEL;
11057 + /* Disable Auto Speed Detection */
11058 + ctrl &= ~E1000_CTRL_ASDE;
11060 + /* Disable autoneg on the phy */
11061 + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
11063 + /* Forcing Full or Half Duplex? */
11064 + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
11065 + ctrl &= ~E1000_CTRL_FD;
11066 + *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
11067 + DEBUGOUT("Half Duplex\n");
11069 + ctrl |= E1000_CTRL_FD;
11070 + *phy_ctrl |= MII_CR_FULL_DUPLEX;
11071 + DEBUGOUT("Full Duplex\n");
11074 + /* Forcing 10mb or 100mb? */
11075 + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
11076 + ctrl |= E1000_CTRL_SPD_100;
11077 + *phy_ctrl |= MII_CR_SPEED_100;
11078 + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
11079 + DEBUGOUT("Forcing 100mb\n");
11081 + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
11082 + *phy_ctrl |= MII_CR_SPEED_10;
11083 + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
11084 + DEBUGOUT("Forcing 10mb\n");
11087 + e1000_config_collision_dist_generic(hw);
11089 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
11093 + * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
11094 + * @hw: pointer to the HW structure
11095 + * @active: boolean used to enable/disable lplu
11097 + * Success returns 0, Failure returns 1
11099 + * The low power link up (lplu) state is set to the power management level D3
11100 + * and SmartSpeed is disabled when active is true, else clear lplu for D3
11101 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
11102 + * is used during Dx states where the power conservation is most important.
11103 + * During driver activity, SmartSpeed should be enabled so performance is
11106 +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
11108 + struct e1000_phy_info *phy = &hw->phy;
11109 + s32 ret_val = E1000_SUCCESS;
11112 + DEBUGFUNC("e1000_set_d3_lplu_state_generic");
11114 + if (!(hw->phy.ops.read_reg))
11117 + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
11122 + data &= ~IGP02E1000_PM_D3_LPLU;
11123 + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
11128 + * LPLU and SmartSpeed are mutually exclusive. LPLU is used
11129 + * during Dx states where the power conservation is most
11130 + * important. During driver activity we should enable
11131 + * SmartSpeed, so performance is maintained.
11133 + if (phy->smart_speed == e1000_smart_speed_on) {
11134 + ret_val = phy->ops.read_reg(hw,
11135 + IGP01E1000_PHY_PORT_CONFIG,
11140 + data |= IGP01E1000_PSCFR_SMART_SPEED;
11141 + ret_val = phy->ops.write_reg(hw,
11142 + IGP01E1000_PHY_PORT_CONFIG,
11146 + } else if (phy->smart_speed == e1000_smart_speed_off) {
11147 + ret_val = phy->ops.read_reg(hw,
11148 + IGP01E1000_PHY_PORT_CONFIG,
11153 + data &= ~IGP01E1000_PSCFR_SMART_SPEED;
11154 + ret_val = phy->ops.write_reg(hw,
11155 + IGP01E1000_PHY_PORT_CONFIG,
11160 + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
11161 + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
11162 + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
11163 + data |= IGP02E1000_PM_D3_LPLU;
11164 + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
11169 + /* When LPLU is enabled, we should disable SmartSpeed */
11170 + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
11175 + data &= ~IGP01E1000_PSCFR_SMART_SPEED;
11176 + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
11185 + * e1000_check_downshift_generic - Checks whether a downshift in speed occurred
11186 + * @hw: pointer to the HW structure
11188 + * Success returns 0, Failure returns 1
11190 + * A downshift is detected by querying the PHY link health.
11192 +s32 e1000_check_downshift_generic(struct e1000_hw *hw)
11194 + struct e1000_phy_info *phy = &hw->phy;
11196 + u16 phy_data, offset, mask;
11198 + DEBUGFUNC("e1000_check_downshift_generic");
11200 + switch (phy->type) {
11201 + case e1000_phy_m88:
11202 + case e1000_phy_gg82563:
11203 + offset = M88E1000_PHY_SPEC_STATUS;
11204 + mask = M88E1000_PSSR_DOWNSHIFT;
11206 + case e1000_phy_igp_2:
11207 + case e1000_phy_igp:
11208 + case e1000_phy_igp_3:
11209 + offset = IGP01E1000_PHY_LINK_HEALTH;
11210 + mask = IGP01E1000_PLHR_SS_DOWNGRADE;
11213 + /* speed downshift not supported */
11214 + phy->speed_downgraded = false;
11215 + ret_val = E1000_SUCCESS;
11219 + ret_val = phy->ops.read_reg(hw, offset, &phy_data);
11222 + phy->speed_downgraded = (phy_data & mask) ? true : false;
11229 + * e1000_check_polarity_m88 - Checks the polarity.
11230 + * @hw: pointer to the HW structure
11232 + * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
11234 + * Polarity is determined based on the PHY specific status register.
11236 +s32 e1000_check_polarity_m88(struct e1000_hw *hw)
11238 + struct e1000_phy_info *phy = &hw->phy;
11242 + DEBUGFUNC("e1000_check_polarity_m88");
11244 + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
11247 + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
11248 + ? e1000_rev_polarity_reversed
11249 + : e1000_rev_polarity_normal;
11255 + * e1000_check_polarity_igp - Checks the polarity.
11256 + * @hw: pointer to the HW structure
11258 + * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
11260 + * Polarity is determined based on the PHY port status register, and the
11261 + * current speed (since there is no polarity at 100Mbps).
11263 +s32 e1000_check_polarity_igp(struct e1000_hw *hw)
11265 + struct e1000_phy_info *phy = &hw->phy;
11267 + u16 data, offset, mask;
11269 + DEBUGFUNC("e1000_check_polarity_igp");
11272 + * Polarity is determined based on the speed of
11273 + * our connection.
11275 + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
11279 + if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
11280 + IGP01E1000_PSSR_SPEED_1000MBPS) {
11281 + offset = IGP01E1000_PHY_PCS_INIT_REG;
11282 + mask = IGP01E1000_PHY_POLARITY_MASK;
11285 + * This really only applies to 10Mbps since
11286 + * there is no polarity for 100Mbps (always 0).
11288 + offset = IGP01E1000_PHY_PORT_STATUS;
11289 + mask = IGP01E1000_PSSR_POLARITY_REVERSED;
11292 + ret_val = phy->ops.read_reg(hw, offset, &data);
11295 + phy->cable_polarity = (data & mask)
11296 + ? e1000_rev_polarity_reversed
11297 + : e1000_rev_polarity_normal;
11304 + * e1000_check_polarity_ife - Check cable polarity for IFE PHY
11305 + * @hw: pointer to the HW structure
11307 + * Polarity is determined on the polarity reversal feature being enabled.
11309 +s32 e1000_check_polarity_ife(struct e1000_hw *hw)
11311 + struct e1000_phy_info *phy = &hw->phy;
11313 + u16 phy_data, offset, mask;
11315 + DEBUGFUNC("e1000_check_polarity_ife");
11318 + * Polarity is determined based on the reversal feature being enabled.
11320 + if (phy->polarity_correction) {
11321 + offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
11322 + mask = IFE_PESC_POLARITY_REVERSED;
11324 + offset = IFE_PHY_SPECIAL_CONTROL;
11325 + mask = IFE_PSC_FORCE_POLARITY;
11328 + ret_val = phy->ops.read_reg(hw, offset, &phy_data);
11331 + phy->cable_polarity = (phy_data & mask)
11332 + ? e1000_rev_polarity_reversed
11333 + : e1000_rev_polarity_normal;
11339 + * e1000_wait_autoneg_generic - Wait for auto-neg completion
11340 + * @hw: pointer to the HW structure
11342 + * Waits for auto-negotiation to complete or for the auto-negotiation time
11343 + * limit to expire, which ever happens first.
11345 +s32 e1000_wait_autoneg_generic(struct e1000_hw *hw)
11347 + s32 ret_val = E1000_SUCCESS;
11348 + u16 i, phy_status;
11350 + DEBUGFUNC("e1000_wait_autoneg_generic");
11352 + if (!(hw->phy.ops.read_reg))
11353 + return E1000_SUCCESS;
11355 + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
11356 + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
11357 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
11360 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
11363 + if (phy_status & MII_SR_AUTONEG_COMPLETE)
11369 + * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
11376 + * e1000_phy_has_link_generic - Polls PHY for link
11377 + * @hw: pointer to the HW structure
11378 + * @iterations: number of times to poll for link
11379 + * @usec_interval: delay between polling attempts
11380 + * @success: pointer to whether polling was successful or not
11382 + * Polls the PHY status register for link, 'iterations' number of times.
11384 +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
11385 + u32 usec_interval, bool *success)
11387 + s32 ret_val = E1000_SUCCESS;
11388 + u16 i, phy_status;
11390 + DEBUGFUNC("e1000_phy_has_link_generic");
11392 + if (!(hw->phy.ops.read_reg))
11393 + return E1000_SUCCESS;
11395 + for (i = 0; i < iterations; i++) {
11397 + * Some PHYs require the PHY_STATUS register to be read
11398 + * twice due to the link bit being sticky. No harm doing
11399 + * it across the board.
11401 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
11404 + * If the first read fails, another entity may have
11405 + * ownership of the resources, wait and try again to
11406 + * see if they have relinquished the resources yet.
11408 + usec_delay(usec_interval);
11410 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
11413 + if (phy_status & MII_SR_LINK_STATUS)
11415 + if (usec_interval >= 1000)
11416 + msec_delay_irq(usec_interval/1000);
11418 + usec_delay(usec_interval);
11421 + *success = (i < iterations) ? true : false;
11427 + * e1000_get_cable_length_m88 - Determine cable length for m88 PHY
11428 + * @hw: pointer to the HW structure
11430 + * Reads the PHY specific status register to retrieve the cable length
11431 + * information. The cable length is determined by averaging the minimum and
11432 + * maximum values to get the "average" cable length. The m88 PHY has four
11433 + * possible cable length values, which are:
11434 + * Register Value Cable Length
11436 + * 1 50 - 80 meters
11437 + * 2 80 - 110 meters
11438 + * 3 110 - 140 meters
11441 +s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
11443 + struct e1000_phy_info *phy = &hw->phy;
11445 + u16 phy_data, index;
11447 + DEBUGFUNC("e1000_get_cable_length_m88");
11449 + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
11453 + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
11454 + M88E1000_PSSR_CABLE_LENGTH_SHIFT;
11455 + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
11456 + ret_val = -E1000_ERR_PHY;
11460 + phy->min_cable_length = e1000_m88_cable_length_table[index];
11461 + phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
11463 + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
11470 + * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
11471 + * @hw: pointer to the HW structure
11473 + * The automatic gain control (agc) normalizes the amplitude of the
11474 + * received signal, adjusting for the attenuation produced by the
11475 + * cable. By reading the AGC registers, which represent the
11476 + * combination of coarse and fine gain value, the value can be put
11477 + * into a lookup table to obtain the approximate cable length
11478 + * for each channel.
11480 +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
11482 + struct e1000_phy_info *phy = &hw->phy;
11483 + s32 ret_val = E1000_SUCCESS;
11484 + u16 phy_data, i, agc_value = 0;
11485 + u16 cur_agc_index, max_agc_index = 0;
11486 + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
11487 + u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
11488 + {IGP02E1000_PHY_AGC_A,
11489 + IGP02E1000_PHY_AGC_B,
11490 + IGP02E1000_PHY_AGC_C,
11491 + IGP02E1000_PHY_AGC_D};
11493 + DEBUGFUNC("e1000_get_cable_length_igp_2");
11495 + /* Read the AGC registers for all channels */
11496 + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
11497 + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
11502 + * Getting bits 15:9, which represent the combination of
11503 + * coarse and fine gain values. The result is a number
11504 + * that can be put into the lookup table to obtain the
11505 + * approximate cable length.
11507 + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
11508 + IGP02E1000_AGC_LENGTH_MASK;
11510 + /* Array index bound check. */
11511 + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
11512 + (cur_agc_index == 0)) {
11513 + ret_val = -E1000_ERR_PHY;
11517 + /* Remove min & max AGC values from calculation. */
11518 + if (e1000_igp_2_cable_length_table[min_agc_index] >
11519 + e1000_igp_2_cable_length_table[cur_agc_index])
11520 + min_agc_index = cur_agc_index;
11521 + if (e1000_igp_2_cable_length_table[max_agc_index] <
11522 + e1000_igp_2_cable_length_table[cur_agc_index])
11523 + max_agc_index = cur_agc_index;
11525 + agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
11528 + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
11529 + e1000_igp_2_cable_length_table[max_agc_index]);
11530 + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
11532 + /* Calculate cable length with the error range of +/- 10 meters. */
11533 + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
11534 + (agc_value - IGP02E1000_AGC_RANGE) : 0;
11535 + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
11537 + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
11544 + * e1000_get_phy_info_m88 - Retrieve PHY information
11545 + * @hw: pointer to the HW structure
11547 + * Valid for only copper links. Read the PHY status register (sticky read)
11548 + * to verify that link is up. Read the PHY special control register to
11549 + * determine the polarity and 10base-T extended distance. Read the PHY
11550 + * special status register to determine MDI/MDIx and current speed. If
11551 + * speed is 1000, then determine cable length, local and remote receiver.
11553 +s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
11555 + struct e1000_phy_info *phy = &hw->phy;
11560 + DEBUGFUNC("e1000_get_phy_info_m88");
11562 + if (phy->media_type != e1000_media_type_copper) {
11563 + DEBUGOUT("Phy info is only valid for copper media\n");
11564 + ret_val = -E1000_ERR_CONFIG;
11568 + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
11573 + DEBUGOUT("Phy info is only valid if link is up\n");
11574 + ret_val = -E1000_ERR_CONFIG;
11578 + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
11582 + phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
11585 + ret_val = e1000_check_polarity_m88(hw);
11589 + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
11593 + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
11595 + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
11596 + ret_val = hw->phy.ops.get_cable_length(hw);
11600 + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
11604 + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
11605 + ? e1000_1000t_rx_status_ok
11606 + : e1000_1000t_rx_status_not_ok;
11608 + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
11609 + ? e1000_1000t_rx_status_ok
11610 + : e1000_1000t_rx_status_not_ok;
11612 + /* Set values to "undefined" */
11613 + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
11614 + phy->local_rx = e1000_1000t_rx_status_undefined;
11615 + phy->remote_rx = e1000_1000t_rx_status_undefined;
11623 + * e1000_get_phy_info_igp - Retrieve igp PHY information
11624 + * @hw: pointer to the HW structure
11626 + * Read PHY status to determine if link is up. If link is up, then
11627 + * set/determine 10base-T extended distance and polarity correction. Read
11628 + * PHY port status to determine MDI/MDIx and speed. Based on the speed,
11629 + * determine on the cable length, local and remote receiver.
11631 +s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
11633 + struct e1000_phy_info *phy = &hw->phy;
11638 + DEBUGFUNC("e1000_get_phy_info_igp");
11640 + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
11645 + DEBUGOUT("Phy info is only valid if link is up\n");
11646 + ret_val = -E1000_ERR_CONFIG;
11650 + phy->polarity_correction = true;
11652 + ret_val = e1000_check_polarity_igp(hw);
11656 + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
11660 + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
11662 + if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
11663 + IGP01E1000_PSSR_SPEED_1000MBPS) {
11664 + ret_val = phy->ops.get_cable_length(hw);
11668 + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
11672 + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
11673 + ? e1000_1000t_rx_status_ok
11674 + : e1000_1000t_rx_status_not_ok;
11676 + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
11677 + ? e1000_1000t_rx_status_ok
11678 + : e1000_1000t_rx_status_not_ok;
11680 + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
11681 + phy->local_rx = e1000_1000t_rx_status_undefined;
11682 + phy->remote_rx = e1000_1000t_rx_status_undefined;
11690 + * e1000_phy_sw_reset_generic - PHY software reset
11691 + * @hw: pointer to the HW structure
11693 + * Does a software reset of the PHY by reading the PHY control register and
11694 + * setting/write the control register reset bit to the PHY.
11696 +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
11698 + s32 ret_val = E1000_SUCCESS;
11701 + DEBUGFUNC("e1000_phy_sw_reset_generic");
11703 + if (!(hw->phy.ops.read_reg))
11706 + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
11710 + phy_ctrl |= MII_CR_RESET;
11711 + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
11722 + * e1000_phy_hw_reset_generic - PHY hardware reset
11723 + * @hw: pointer to the HW structure
11725 + * Verify the reset block is not blocking us from resetting. Acquire
11726 + * semaphore (if necessary) and read/set/write the device control reset
11727 + * bit in the PHY. Wait the appropriate delay time for the device to
11728 + * reset and release the semaphore (if necessary).
11730 +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
11732 + struct e1000_phy_info *phy = &hw->phy;
11733 + s32 ret_val = E1000_SUCCESS;
11736 + DEBUGFUNC("e1000_phy_hw_reset_generic");
11738 + ret_val = phy->ops.check_reset_block(hw);
11740 + ret_val = E1000_SUCCESS;
11744 + ret_val = phy->ops.acquire(hw);
11748 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
11749 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
11750 + E1000_WRITE_FLUSH(hw);
11752 + usec_delay(phy->reset_delay_us);
11754 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
11755 + E1000_WRITE_FLUSH(hw);
11759 + phy->ops.release(hw);
11761 + ret_val = phy->ops.get_cfg_done(hw);
11768 + * e1000_get_cfg_done_generic - Generic configuration done
11769 + * @hw: pointer to the HW structure
11771 + * Generic function to wait 10 milli-seconds for configuration to complete
11772 + * and return success.
11774 +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw)
11776 + DEBUGFUNC("e1000_get_cfg_done_generic");
11778 + msec_delay_irq(10);
11780 + return E1000_SUCCESS;
11784 + * e1000_phy_init_script_igp3 - Inits the IGP3 PHY
11785 + * @hw: pointer to the HW structure
11787 + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
11789 +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
11791 + DEBUGOUT("Running IGP 3 PHY init script\n");
11793 + /* PHY init IGP 3 */
11794 + /* Enable rise/fall, 10-mode work in class-A */
11795 + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
11796 + /* Remove all caps from Replica path filter */
11797 + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
11798 + /* Bias trimming for ADC, AFE and Driver (Default) */
11799 + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
11800 + /* Increase Hybrid poly bias */
11801 + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
11802 + /* Add 4% to Tx amplitude in Gig mode */
11803 + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
11804 + /* Disable trimming (TTT) */
11805 + hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
11806 + /* Poly DC correction to 94.6% + 2% for all channels */
11807 + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
11808 + /* ABS DC correction to 95.9% */
11809 + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
11810 + /* BG temp curve trim */
11811 + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
11812 + /* Increasing ADC OPAMP stage 1 currents to max */
11813 + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
11814 + /* Force 1000 ( required for enabling PHY regs configuration) */
11815 + hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
11816 + /* Set upd_freq to 6 */
11817 + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
11818 + /* Disable NPDFE */
11819 + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
11820 + /* Disable adaptive fixed FFE (Default) */
11821 + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
11822 + /* Enable FFE hysteresis */
11823 + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
11824 + /* Fixed FFE for short cable lengths */
11825 + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
11826 + /* Fixed FFE for medium cable lengths */
11827 + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
11828 + /* Fixed FFE for long cable lengths */
11829 + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
11830 + /* Enable Adaptive Clip Threshold */
11831 + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
11832 + /* AHT reset limit to 1 */
11833 + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
11834 + /* Set AHT master delay to 127 msec */
11835 + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
11836 + /* Set scan bits for AHT */
11837 + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
11838 + /* Set AHT Preset bits */
11839 + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
11840 + /* Change integ_factor of channel A to 3 */
11841 + hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
11842 + /* Change prop_factor of channels BCD to 8 */
11843 + hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
11844 + /* Change cg_icount + enable integbp for channels BCD */
11845 + hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
11847 + * Change cg_icount + enable integbp + change prop_factor_master
11848 + * to 8 for channel A
11850 + hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
11851 + /* Disable AHT in Slave mode on channel A */
11852 + hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
11854 + * Enable LPLU and disable AN to 1000 in non-D0a states,
11857 + hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
11858 + /* Enable restart AN on an1000_dis change */
11859 + hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
11860 + /* Enable wh_fifo read clock in 10/100 modes */
11861 + hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
11862 + /* Restart AN, Speed selection is 1000 */
11863 + hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
11865 + return E1000_SUCCESS;
11869 + * e1000_get_phy_type_from_id - Get PHY type from id
11870 + * @phy_id: phy_id read from the phy
11872 + * Returns the phy type from the id.
11874 +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
11876 + enum e1000_phy_type phy_type = e1000_phy_unknown;
11878 + switch (phy_id) {
11879 + case M88E1000_I_PHY_ID:
11880 + case M88E1000_E_PHY_ID:
11881 + case M88E1111_I_PHY_ID:
11882 + case M88E1011_I_PHY_ID:
11883 + phy_type = e1000_phy_m88;
11885 + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
11886 + phy_type = e1000_phy_igp_2;
11888 + case GG82563_E_PHY_ID:
11889 + phy_type = e1000_phy_gg82563;
11891 + case IGP03E1000_E_PHY_ID:
11892 + phy_type = e1000_phy_igp_3;
11894 + case IFE_E_PHY_ID:
11895 + case IFE_PLUS_E_PHY_ID:
11896 + case IFE_C_E_PHY_ID:
11897 + phy_type = e1000_phy_ife;
11900 + phy_type = e1000_phy_unknown;
11907 + * e1000_determine_phy_address - Determines PHY address.
11908 + * @hw: pointer to the HW structure
11910 + * This uses a trial and error method to loop through possible PHY
11911 + * addresses. It tests each by reading the PHY ID registers and
11912 + * checking for a match.
11914 +s32 e1000_determine_phy_address(struct e1000_hw *hw)
11916 + s32 ret_val = -E1000_ERR_PHY_TYPE;
11917 + u32 phy_addr = 0;
11919 + enum e1000_phy_type phy_type = e1000_phy_unknown;
11921 + hw->phy.id = phy_type;
11923 + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
11924 + hw->phy.addr = phy_addr;
11928 + e1000_get_phy_id(hw);
11929 + phy_type = e1000_get_phy_type_from_id(hw->phy.id);
11932 + * If phy_type is valid, break - we found our
11935 + if (phy_type != e1000_phy_unknown) {
11936 + ret_val = E1000_SUCCESS;
11941 + } while (i < 10);
11949 + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
11950 + * @hw: pointer to the HW structure
11952 + * In the case of a PHY power down to save power, or to turn off link during a
11953 + * driver unload, or wake on lan is not enabled, restore the link to previous
11956 +void e1000_power_up_phy_copper(struct e1000_hw *hw)
11960 + /* The PHY will retain its settings across a power down/up cycle */
11961 + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
11962 + mii_reg &= ~MII_CR_POWER_DOWN;
11963 + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
11967 + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
11968 + * @hw: pointer to the HW structure
11970 + * In the case of a PHY power down to save power, or to turn off link during a
11971 + * driver unload, or wake on lan is not enabled, restore the link to previous
11974 +void e1000_power_down_phy_copper(struct e1000_hw *hw)
11978 + /* The PHY will retain its settings across a power down/up cycle */
11979 + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
11980 + mii_reg |= MII_CR_POWER_DOWN;
11981 + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
11984 Index: linux-2.6.22/drivers/net/igb/e1000_phy.h
11985 ===================================================================
11986 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
11987 +++ linux-2.6.22/drivers/net/igb/e1000_phy.h 2009-12-18 12:39:22.000000000 -0500
11989 +/*******************************************************************************
11991 + Intel(R) Gigabit Ethernet Linux driver
11992 + Copyright(c) 2007-2009 Intel Corporation.
11994 + This program is free software; you can redistribute it and/or modify it
11995 + under the terms and conditions of the GNU General Public License,
11996 + version 2, as published by the Free Software Foundation.
11998 + This program is distributed in the hope it will be useful, but WITHOUT
11999 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12000 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12003 + You should have received a copy of the GNU General Public License along with
12004 + this program; if not, write to the Free Software Foundation, Inc.,
12005 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
12007 + The full GNU General Public License is included in this distribution in
12008 + the file called "COPYING".
12010 + Contact Information:
12011 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
12012 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
12014 +*******************************************************************************/
12016 +#ifndef _E1000_PHY_H_
12017 +#define _E1000_PHY_H_
12019 +void e1000_init_phy_ops_generic(struct e1000_hw *hw);
12020 +s32 e1000_check_downshift_generic(struct e1000_hw *hw);
12021 +s32 e1000_check_polarity_m88(struct e1000_hw *hw);
12022 +s32 e1000_check_polarity_igp(struct e1000_hw *hw);
12023 +s32 e1000_check_polarity_ife(struct e1000_hw *hw);
12024 +s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
12025 +s32 e1000_copper_link_autoneg(struct e1000_hw *hw);
12026 +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
12027 +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
12028 +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
12029 +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
12030 +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
12031 +s32 e1000_get_cable_length_m88(struct e1000_hw *hw);
12032 +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw);
12033 +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
12034 +s32 e1000_get_phy_id(struct e1000_hw *hw);
12035 +s32 e1000_get_phy_info_igp(struct e1000_hw *hw);
12036 +s32 e1000_get_phy_info_m88(struct e1000_hw *hw);
12037 +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw);
12038 +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
12039 +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw);
12040 +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
12041 +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
12042 +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
12043 +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
12044 +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
12045 +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
12046 +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
12047 +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
12048 +s32 e1000_wait_autoneg_generic(struct e1000_hw *hw);
12049 +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
12050 +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
12051 +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
12052 +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
12053 +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
12054 +s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
12055 +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
12056 + u32 usec_interval, bool *success);
12057 +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
12058 +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
12059 +s32 e1000_determine_phy_address(struct e1000_hw *hw);
12060 +void e1000_power_up_phy_copper(struct e1000_hw *hw);
12061 +void e1000_power_down_phy_copper(struct e1000_hw *hw);
12062 +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
12063 +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
12064 +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
12065 +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
12067 +#define E1000_MAX_PHY_ADDR 4
12069 +/* IGP01E1000 Specific Registers */
12070 +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
12071 +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
12072 +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
12073 +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
12074 +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */
12075 +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality */
12076 +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
12077 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
12078 +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
12079 +#define IGP_PAGE_SHIFT 5
12080 +#define PHY_REG_MASK 0x1F
12082 +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
12083 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078
12085 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
12086 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
12088 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
12090 +/* Enable flexible speed on link-up */
12091 +#define IGP01E1000_GMII_FLEX_SPD 0x0010
12092 +#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */
12094 +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
12095 +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
12096 +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
12098 +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
12100 +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
12101 +#define IGP01E1000_PSSR_MDIX 0x0800
12102 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000
12103 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
12105 +#define IGP02E1000_PHY_CHANNEL_NUM 4
12106 +#define IGP02E1000_PHY_AGC_A 0x11B1
12107 +#define IGP02E1000_PHY_AGC_B 0x12B1
12108 +#define IGP02E1000_PHY_AGC_C 0x14B1
12109 +#define IGP02E1000_PHY_AGC_D 0x18B1
12111 +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
12112 +#define IGP02E1000_AGC_LENGTH_MASK 0x7F
12113 +#define IGP02E1000_AGC_RANGE 15
12115 +#define IGP03E1000_PHY_MISC_CTRL 0x1B
12116 +#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Manually Set Duplex */
12118 +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
12120 +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
12121 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
12122 +#define E1000_KMRNCTRLSTA_REN 0x00200000
12123 +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
12124 +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
12125 +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
12126 +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
12128 +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
12129 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
12130 +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
12131 +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
12133 +/* IFE PHY Extended Status Control */
12134 +#define IFE_PESC_POLARITY_REVERSED 0x0100
12136 +/* IFE PHY Special Control */
12137 +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
12138 +#define IFE_PSC_FORCE_POLARITY 0x0020
12139 +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100
12141 +/* IFE PHY Special Control and LED Control */
12142 +#define IFE_PSCL_PROBE_MODE 0x0020
12143 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
12144 +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
12146 +/* IFE PHY MDIX Control */
12147 +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
12148 +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
12149 +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
12152 Index: linux-2.6.22/drivers/net/igb/e1000_regs.h
12153 ===================================================================
12154 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
12155 +++ linux-2.6.22/drivers/net/igb/e1000_regs.h 2009-12-18 12:39:22.000000000 -0500
12157 +/*******************************************************************************
12159 + Intel(R) Gigabit Ethernet Linux driver
12160 + Copyright(c) 2007-2009 Intel Corporation.
12162 + This program is free software; you can redistribute it and/or modify it
12163 + under the terms and conditions of the GNU General Public License,
12164 + version 2, as published by the Free Software Foundation.
12166 + This program is distributed in the hope it will be useful, but WITHOUT
12167 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12168 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12171 + You should have received a copy of the GNU General Public License along with
12172 + this program; if not, write to the Free Software Foundation, Inc.,
12173 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
12175 + The full GNU General Public License is included in this distribution in
12176 + the file called "COPYING".
12178 + Contact Information:
12179 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
12180 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
12182 +*******************************************************************************/
12184 +#ifndef _E1000_REGS_H_
12185 +#define _E1000_REGS_H_
12187 +#define E1000_CTRL 0x00000 /* Device Control - RW */
12188 +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
12189 +#define E1000_STATUS 0x00008 /* Device Status - RO */
12190 +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
12191 +#define E1000_EERD 0x00014 /* EEPROM Read - RW */
12192 +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
12193 +#define E1000_FLA 0x0001C /* Flash Access - RW */
12194 +#define E1000_MDIC 0x00020 /* MDI Control - RW */
12195 +#define E1000_SCTL 0x00024 /* SerDes Control - RW */
12196 +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
12197 +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
12198 +#define E1000_FEXT 0x0002C /* Future Extended - RW */
12199 +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
12200 +#define E1000_FCT 0x00030 /* Flow Control Type - RW */
12201 +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
12202 +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
12203 +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
12204 +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
12205 +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
12206 +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
12207 +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
12208 +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
12209 +#define E1000_RCTL 0x00100 /* Rx Control - RW */
12210 +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
12211 +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
12212 +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
12213 +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
12214 +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
12215 +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
12216 +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
12217 +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
12218 +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
12219 +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
12220 +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
12221 +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
12222 +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
12223 +#define E1000_TCTL 0x00400 /* Tx Control - RW */
12224 +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
12225 +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
12226 +#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */
12227 +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
12228 +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
12229 +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
12230 +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
12231 +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
12232 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
12233 +#define E1000_PBS 0x01008 /* Packet Buffer Size */
12234 +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
12235 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
12236 +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
12237 +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
12238 +#define E1000_FLSWCTL 0x01030 /* FLASH control register */
12239 +#define E1000_FLSWDATA 0x01034 /* FLASH data register */
12240 +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
12241 +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
12242 +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
12243 +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
12244 +#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */
12245 +#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */
12246 +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
12247 +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
12248 +#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */
12249 +#define E1000_ICR_V2 0x01500 /* Interrupt Cause - new location - RC */
12250 +#define E1000_ICS_V2 0x01504 /* Interrupt Cause Set - new location - WO */
12251 +#define E1000_IMS_V2 0x01508 /* Interrupt Mask Set/Read - new location - RW */
12252 +#define E1000_IMC_V2 0x0150C /* Interrupt Mask Clear - new location - WO */
12253 +#define E1000_IAM_V2 0x01510 /* Interrupt Ack Auto Mask - new location - RW */
12254 +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
12255 +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
12256 +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
12257 +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
12258 +#define E1000_RDFPCQ(_n) (0x02430 + (0x4 * (_n)))
12259 +#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */
12260 +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
12261 +/* Split and Replication Rx Control - RW */
12262 +#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */
12263 +#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */
12264 +#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */
12265 +#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */
12266 +#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */
12267 +#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */
12268 +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
12269 +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
12270 +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
12272 + * Convenience macros
12274 + * Note: "_n" is the queue number of the register to be written to.
12277 + * E1000_RDBAL_REG(current_rx_queue)
12279 +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
12280 + (0x0C000 + ((_n) * 0x40)))
12281 +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
12282 + (0x0C004 + ((_n) * 0x40)))
12283 +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
12284 + (0x0C008 + ((_n) * 0x40)))
12285 +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
12286 + (0x0C00C + ((_n) * 0x40)))
12287 +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
12288 + (0x0C010 + ((_n) * 0x40)))
12289 +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
12290 + (0x0C014 + ((_n) * 0x40)))
12291 +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
12292 +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
12293 + (0x0C018 + ((_n) * 0x40)))
12294 +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
12295 + (0x0C028 + ((_n) * 0x40)))
12296 +#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
12297 + (0x0C030 + ((_n) * 0x40)))
12298 +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
12299 + (0x0E000 + ((_n) * 0x40)))
12300 +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
12301 + (0x0E004 + ((_n) * 0x40)))
12302 +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
12303 + (0x0E008 + ((_n) * 0x40)))
12304 +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
12305 + (0x0E010 + ((_n) * 0x40)))
12306 +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
12307 + (0x0E014 + ((_n) * 0x40)))
12308 +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
12309 +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
12310 + (0x0E018 + ((_n) * 0x40)))
12311 +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
12312 + (0x0E028 + ((_n) * 0x40)))
12313 +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
12314 + (0x0E038 + ((_n) * 0x40)))
12315 +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
12316 + (0x0E03C + ((_n) * 0x40)))
12317 +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
12318 +#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
12319 +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
12320 +#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */
12321 +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
12322 +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
12323 +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
12324 + (0x054E0 + ((_i - 16) * 8)))
12325 +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
12326 + (0x054E4 + ((_i - 16) * 8)))
12327 +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
12328 +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
12329 +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
12330 +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
12331 +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
12332 +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
12333 +#define E1000_PBSLAC 0x03100 /* Packet Buffer Slave Access Control */
12334 +#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Packet Buffer DWORD (_n) */
12335 +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
12336 +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
12337 +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
12338 +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
12339 +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
12340 +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
12341 +#define E1000_TDPUMB 0x0357C /* DMA Tx Descriptor uC Mail Box - RW */
12342 +#define E1000_TDPUAD 0x03580 /* DMA Tx Descriptor uC Addr Command - RW */
12343 +#define E1000_TDPUWD 0x03584 /* DMA Tx Descriptor uC Data Write - RW */
12344 +#define E1000_TDPURD 0x03588 /* DMA Tx Descriptor uC Data Read - RW */
12345 +#define E1000_TDPUCTL 0x0358C /* DMA Tx Descriptor uC Control - RW */
12346 +#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */
12347 +#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
12348 +#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
12349 +#define E1000_DTXMXSZRQ 0x03540 /* DMA Tx Max Total Allow Size Requests - RW */
12350 +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
12351 +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
12352 +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
12353 +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
12354 +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
12355 +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
12356 +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
12357 +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
12358 +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
12359 +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
12360 +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
12361 +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
12362 +#define E1000_COLC 0x04028 /* Collision Count - R/clr */
12363 +#define E1000_DC 0x04030 /* Defer Count - R/clr */
12364 +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
12365 +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
12366 +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
12367 +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
12368 +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
12369 +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
12370 +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
12371 +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
12372 +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
12373 +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
12374 +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
12375 +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
12376 +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
12377 +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
12378 +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
12379 +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
12380 +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
12381 +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
12382 +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
12383 +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
12384 +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
12385 +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
12386 +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
12387 +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
12388 +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
12389 +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
12390 +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
12391 +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
12392 +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
12393 +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
12394 +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
12395 +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
12396 +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
12397 +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
12398 +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
12399 +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
12400 +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
12401 +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
12402 +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
12403 +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
12404 +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
12405 +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
12406 +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
12407 +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
12408 +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
12409 +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
12410 +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
12411 +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
12412 +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
12413 +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
12414 +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
12415 +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
12416 +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
12417 +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
12418 +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
12419 +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
12421 +#define E1000_LSECTXUT 0x04300 /* LinkSec Tx Untagged Packet Count - OutPktsUntagged */
12422 +#define E1000_LSECTXPKTE 0x04304 /* LinkSec Encrypted Tx Packets Count - OutPktsEncrypted */
12423 +#define E1000_LSECTXPKTP 0x04308 /* LinkSec Protected Tx Packet Count - OutPktsProtected */
12424 +#define E1000_LSECTXOCTE 0x0430C /* LinkSec Encrypted Tx Octets Count - OutOctetsEncrypted */
12425 +#define E1000_LSECTXOCTP 0x04310 /* LinkSec Protected Tx Octets Count - OutOctetsProtected */
12426 +#define E1000_LSECRXUT 0x04314 /* LinkSec Untagged non-Strict Rx Packet Count - InPktsUntagged/InPktsNoTag */
12427 +#define E1000_LSECRXOCTD 0x0431C /* LinkSec Rx Octets Decrypted Count - InOctetsDecrypted */
12428 +#define E1000_LSECRXOCTV 0x04320 /* LinkSec Rx Octets Validated - InOctetsValidated */
12429 +#define E1000_LSECRXBAD 0x04324 /* LinkSec Rx Bad Tag - InPktsBadTag */
12430 +#define E1000_LSECRXNOSCI 0x04328 /* LinkSec Rx Packet No SCI Count - InPktsNoSci */
12431 +#define E1000_LSECRXUNSCI 0x0432C /* LinkSec Rx Packet Unknown SCI Count - InPktsUnknownSci */
12432 +#define E1000_LSECRXUNCH 0x04330 /* LinkSec Rx Unchecked Packets Count - InPktsUnchecked */
12433 +#define E1000_LSECRXDELAY 0x04340 /* LinkSec Rx Delayed Packet Count - InPktsDelayed */
12434 +#define E1000_LSECRXLATE 0x04350 /* LinkSec Rx Late Packets Count - InPktsLate */
12435 +#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* LinkSec Rx Packet OK Count - InPktsOk */
12436 +#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* LinkSec Rx Invalid Count - InPktsInvalid */
12437 +#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* LinkSec Rx Not Valid Count - InPktsNotValid */
12438 +#define E1000_LSECRXUNSA 0x043C0 /* LinkSec Rx Unused SA Count - InPktsUnusedSa */
12439 +#define E1000_LSECRXNUSA 0x043D0 /* LinkSec Rx Not Using SA Count - InPktsNotUsingSa */
12440 +#define E1000_LSECTXCAP 0x0B000 /* LinkSec Tx Capabilities Register - RO */
12441 +#define E1000_LSECRXCAP 0x0B300 /* LinkSec Rx Capabilities Register - RO */
12442 +#define E1000_LSECTXCTRL 0x0B004 /* LinkSec Tx Control - RW */
12443 +#define E1000_LSECRXCTRL 0x0B304 /* LinkSec Rx Control - RW */
12444 +#define E1000_LSECTXSCL 0x0B008 /* LinkSec Tx SCI Low - RW */
12445 +#define E1000_LSECTXSCH 0x0B00C /* LinkSec Tx SCI High - RW */
12446 +#define E1000_LSECTXSA 0x0B010 /* LinkSec Tx SA0 - RW */
12447 +#define E1000_LSECTXPN0 0x0B018 /* LinkSec Tx SA PN 0 - RW */
12448 +#define E1000_LSECTXPN1 0x0B01C /* LinkSec Tx SA PN 1 - RW */
12449 +#define E1000_LSECRXSCL 0x0B3D0 /* LinkSec Rx SCI Low - RW */
12450 +#define E1000_LSECRXSCH 0x0B3E0 /* LinkSec Rx SCI High - RW */
12451 +#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 0 - WO */
12452 +#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 1 - WO */
12453 +#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
12454 +#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
12456 + * LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit
12459 +#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
12461 +#define E1000_SSVPC 0x041A0 /* Switch Security Violation Packet Count */
12462 +#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */
12463 +#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */
12464 +#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */
12465 +#define E1000_IPSRXIPADDR(_n) (0x0B420+ (0x04 * (_n))) /* IPSec Rx IPv4/v6 Address - RW */
12466 +#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) /* IPSec Rx 128-bit Key - RW */
12467 +#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */
12468 +#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */
12469 +#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) /* IPSec Tx 128-bit Key - RW */
12470 +#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */
12471 +#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */
12472 +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
12473 +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
12474 +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
12475 +#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */
12476 +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
12477 +#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */
12478 +#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */
12479 +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
12480 +#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */
12481 +#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */
12482 +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
12483 +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
12484 +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
12485 +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
12486 +#define E1000_LENERRS 0x04138 /* Length Errors Count */
12487 +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
12488 +#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
12489 +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
12490 +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
12491 +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
12492 +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */
12493 +#define E1000_1GSTAT_RCV 0x04228 /* 1GSTAT Code Violation Packet Count - RW */
12494 +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
12495 +#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */
12496 +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
12497 +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
12498 +#define E1000_RA 0x05400 /* Receive Address - RW Array */
12499 +#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
12500 +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
12501 +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
12502 +#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */
12503 +#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */
12504 +#define E1000_WUC 0x05800 /* Wakeup Control - RW */
12505 +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
12506 +#define E1000_WUS 0x05810 /* Wakeup Status - RO */
12507 +#define E1000_MANC 0x05820 /* Management Control - RW */
12508 +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
12509 +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
12510 +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
12511 +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
12512 +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
12513 +#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */
12514 +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
12515 +#define E1000_HOST_IF 0x08800 /* Host Interface */
12516 +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
12517 +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
12518 +#define E1000_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flexible Host Filter Table */
12519 +#define E1000_FHFT_EXT(_n) (0x09A00 + (_n * 0x100)) /* Ext Flexible Host Filter Table */
12522 +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
12523 +#define E1000_MDPHYA 0x0003C /* PHY address - RW */
12524 +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
12525 +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
12526 +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
12527 +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
12528 +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
12529 +#define E1000_GCR 0x05B00 /* PCI-Ex Control */
12530 +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
12531 +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
12532 +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
12533 +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
12534 +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
12535 +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
12536 +#define E1000_SWSM 0x05B50 /* SW Semaphore */
12537 +#define E1000_FWSM 0x05B54 /* FW Semaphore */
12538 +#define E1000_SWSM2 0x05B58 /* Driver-only SW semaphore (not used by BOOT agents) */
12539 +#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
12540 +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
12541 +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
12542 +#define E1000_HICR 0x08F00 /* Host Interface Control */
12544 +/* RSS registers */
12545 +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
12546 +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
12547 +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
12548 +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/
12549 +#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */
12550 +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Allocation Register
12552 +#define E1000_MSIXTADD(_i) (0x0C000 + ((_i) * 0x10)) /* MSI-X Table entry addr
12553 + * low reg - RW */
12554 +#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) /* MSI-X Table entry addr
12555 + * upper reg - RW */
12556 +#define E1000_MSIXTMSG(_i) (0x0C008 + ((_i) * 0x10)) /* MSI-X Table entry
12557 + * message reg - RW */
12558 +#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) /* MSI-X Table entry
12559 + * vector ctrl reg - RW */
12560 +#define E1000_MSIXPBA 0x0E000 /* MSI-X Pending bit array */
12561 +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
12562 +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
12563 +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
12564 +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
12565 +/* VT Registers */
12566 +#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */
12567 +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
12568 +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
12569 +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
12570 +#define E1000_VFRE 0x00C8C /* VF Receive Enables */
12571 +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
12572 +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
12573 +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
12574 +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
12575 +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
12576 +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
12577 +#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */
12578 +/* These act per VF so an array friendly macro is used */
12579 +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
12580 +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
12581 +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
12582 +#define E1000_VFVMBMEM(_n) (0x00800 + (_n))
12583 +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
12584 +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
12587 +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
12588 +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
12589 +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
12590 +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
12591 +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
12592 +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
12593 +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
12594 +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
12595 +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
12596 +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
12597 +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
12598 +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
12600 +/* Filtering Registers */
12601 +#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
12602 +#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
12603 +#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
12604 +#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
12605 +#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
12606 +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
12607 +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
12609 +#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */
12610 +#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */
12611 +#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */
12612 +#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */
12613 +#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */
12614 +#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) /* Tx Desc plane TC Rate-scheduler config */
12615 +#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Config */
12616 +#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Config */
12617 +#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler Status */
12618 +#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler MMW */
12619 +#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Status */
12620 +#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) /* Tx Packet plane TC Rate-scheduler MMW */
12621 +#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Status */
12622 +#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler MMW */
12623 +#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) /* Tx Desc plane VM Rate-Scheduler MMW*/
12624 +#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) /* Tx BCN Rate-Scheduler MMW */
12625 +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */
12626 +#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */
12627 +#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */
12628 +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */
12629 +#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */
12630 +#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */
12631 +#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */
12632 +#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */
12633 +#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */
12634 +#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */
12635 +#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */
12636 +#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */
12637 +#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */
12638 +#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */
12641 Index: linux-2.6.22/drivers/net/igb/igb.h
12642 ===================================================================
12643 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
12644 +++ linux-2.6.22/drivers/net/igb/igb.h 2009-12-18 12:39:22.000000000 -0500
12646 +/*******************************************************************************
12648 + Intel(R) Gigabit Ethernet Linux driver
12649 + Copyright(c) 2007-2009 Intel Corporation.
12651 + This program is free software; you can redistribute it and/or modify it
12652 + under the terms and conditions of the GNU General Public License,
12653 + version 2, as published by the Free Software Foundation.
12655 + This program is distributed in the hope it will be useful, but WITHOUT
12656 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12657 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12660 + You should have received a copy of the GNU General Public License along with
12661 + this program; if not, write to the Free Software Foundation, Inc.,
12662 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
12664 + The full GNU General Public License is included in this distribution in
12665 + the file called "COPYING".
12667 + Contact Information:
12668 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
12669 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
12671 +*******************************************************************************/
12674 +/* Linux PRO/1000 Ethernet Driver main header file */
12679 +#include <linux/pci.h>
12680 +#include <linux/netdevice.h>
12681 +#include <linux/vmalloc.h>
12683 +#ifdef SIOCETHTOOL
12684 +#include <linux/ethtool.h>
12687 +#ifdef SIOCSHWTSTAMP
12688 +#include <linux/clocksource.h>
12689 +#include <linux/timecompare.h>
12690 +#include <linux/net_tstamp.h>
12692 +struct igb_adapter;
12694 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
12698 +#include <linux/dca.h>
12704 +#ifdef NETIF_F_LRO
12705 +#if defined(CONFIG_INET_LRO) || defined(CONFIG_INET_LRO_MODULE)
12706 +#include <linux/inet_lro.h>
12707 +#define MAX_LRO_DESCRIPTORS 8
12711 +#endif /* IGB_LRO */
12713 +#include "kcompat.h"
12715 +#include "e1000_api.h"
12716 +#include "e1000_82575.h"
12718 +#define IGB_ERR(args...) printk(KERN_ERR "igb: " args)
12720 +#define PFX "igb: "
12721 +#define DPRINTK(nlevel, klevel, fmt, args...) \
12722 + (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
12723 + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
12724 + __FUNCTION__ , ## args))
12726 +/* Interrupt defines */
12727 +#define IGB_START_ITR 648 /* ~6000 ints/sec */
12729 +/* Interrupt modes, as used by the IntMode paramter */
12730 +#define IGB_INT_MODE_LEGACY 0
12731 +#define IGB_INT_MODE_MSI 1
12732 +#define IGB_INT_MODE_MSIX 2
12735 +/* TX/RX descriptor defines */
12736 +#define IGB_DEFAULT_TXD 256
12737 +#define IGB_MIN_TXD 80
12738 +#define IGB_MAX_TXD 4096
12740 +#define IGB_DEFAULT_RXD 256
12741 +#define IGB_MIN_RXD 80
12742 +#define IGB_MAX_RXD 4096
12744 +#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */
12745 +#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */
12747 +#define NON_Q_VECTORS 1
12748 +#define MAX_Q_VECTORS 8
12750 +/* Transmit and receive queues */
12751 +#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
12752 + (hw->mac.type > e1000_82575 ? 8 : 4))
12753 +#define IGB_ABS_MAX_TX_QUEUES 8
12754 +#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
12756 +#define IGB_MAX_VF_MC_ENTRIES 30
12757 +#define IGB_MAX_VF_FUNCTIONS 8
12758 +#define IGB_MAX_VFTA_ENTRIES 128
12759 +#define IGB_MAX_UTA_ENTRIES 128
12760 +#define MAX_EMULATION_MAC_ADDRS 16
12763 +struct vf_data_storage {
12764 + unsigned char vf_mac_addresses[ETH_ALEN];
12765 + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
12766 + u16 num_vf_mc_hashes;
12767 + u16 default_vf_vlan_id;
12768 + u16 vlans_enabled;
12769 + unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN];
12770 + u32 uta_table_copy[IGB_MAX_UTA_ENTRIES];
12772 + unsigned long last_nack;
12775 +#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
12776 +#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
12777 +#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
12779 +/* RX descriptor control thresholds.
12780 + * PTHRESH - MAC will consider prefetch if it has fewer than this number of
12781 + * descriptors available in its onboard memory.
12782 + * Setting this to 0 disables RX descriptor prefetch.
12783 + * HTHRESH - MAC will only prefetch if there are at least this many descriptors
12784 + * available in host memory.
12785 + * If PTHRESH is 0, this should also be 0.
12786 + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
12787 + * descriptors until either it has this many to write back, or the
12788 + * ITR timer expires.
12790 +#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
12791 +#define IGB_RX_HTHRESH 8
12792 +#define IGB_RX_WTHRESH 1
12793 +#define IGB_TX_PTHRESH 8
12794 +#define IGB_TX_HTHRESH 1
12795 +#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
12796 + adapter->msix_entries) ? 0 : 16)
12798 +/* this is the size past which hardware will drop packets when setting LPE=0 */
12799 +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
12801 +/* Supported Rx Buffer Sizes */
12802 +#define IGB_RXBUFFER_128 128 /* Used for packet split */
12803 +#define IGB_RXBUFFER_256 256 /* Used for packet split */
12804 +#define IGB_RXBUFFER_512 512
12805 +#define IGB_RXBUFFER_1024 1024
12806 +#define IGB_RXBUFFER_2048 2048
12807 +#define IGB_RXBUFFER_4096 4096
12808 +#define IGB_RXBUFFER_8192 8192
12809 +#define IGB_RXBUFFER_16384 16384
12811 +/* Packet Buffer allocations */
12812 +#define IGB_PBA_BYTES_SHIFT 0xA
12813 +#define IGB_TX_HEAD_ADDR_SHIFT 7
12814 +#define IGB_PBA_TX_MASK 0xFFFF0000
12816 +#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */
12818 +/* How many Tx Descriptors do we need to call netif_wake_queue ? */
12819 +#define IGB_TX_QUEUE_WAKE 32
12820 +/* How many Rx Buffers do we bundle into one write to the hardware ? */
12821 +#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
12823 +#define AUTO_ALL_MODES 0
12824 +#define IGB_EEPROM_APME 0x0400
12826 +#ifndef IGB_MASTER_SLAVE
12827 +/* Switch to override PHY master/slave setting */
12828 +#define IGB_MASTER_SLAVE e1000_ms_hw_default
12831 +#define IGB_MNG_VLAN_NONE -1
12833 +/* wrapper around a pointer to a socket buffer,
12834 + * so a DMA handle can be stored along with the buffer */
12835 +struct igb_buffer {
12836 + struct sk_buff *skb;
12838 + dma_addr_t page_dma;
12842 + unsigned long time_stamp;
12844 + u16 next_to_watch;
12847 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
12850 + unsigned long page_offset;
12851 + struct page *page;
12857 +struct igb_queue_stats {
12862 +struct igb_q_vector {
12863 + struct igb_adapter *adapter; /* backlink */
12864 + struct igb_ring *rx_ring;
12865 + struct igb_ring *tx_ring;
12866 + struct napi_struct napi;
12874 + void __iomem *itr_register;
12876 + char name[IFNAMSIZ + 9];
12877 +#ifndef HAVE_NETDEV_NAPI_LIST
12878 + struct net_device poll_dev;
12883 + struct igb_q_vector *q_vector; /* backlink to q_vector */
12884 + struct pci_dev *pdev; /* pci device for dma mapping */
12885 + dma_addr_t dma; /* phys address of the ring */
12886 + void *desc; /* descriptor ring memory */
12887 + unsigned int size; /* length of desc. ring in bytes */
12888 + u16 count; /* number of desc. in the ring */
12890 + u16 next_to_clean;
12893 + void __iomem *head;
12894 + void __iomem *tail;
12895 + struct igb_buffer *buffer_info; /* array of buffer info structs */
12897 + unsigned int total_bytes;
12898 + unsigned int total_packets;
12900 + struct igb_queue_stats stats;
12905 + unsigned int restart_queue;
12907 + bool detect_tx_hung;
12912 + u64 hw_csum_good;
12913 + u32 rx_buffer_len;
12914 + u16 rx_ps_hdr_size;
12917 + struct net_lro_mgr lro_mgr;
12925 +#define IGB_ADVTXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
12927 +#define IGB_DESC_UNUSED(R) \
12928 + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
12929 + (R)->next_to_clean - (R)->next_to_use - 1)
12931 +#define E1000_RX_DESC_ADV(R, i) \
12932 + (&(((union e1000_adv_rx_desc *)((R).desc))[i]))
12933 +#define E1000_TX_DESC_ADV(R, i) \
12934 + (&(((union e1000_adv_tx_desc *)((R).desc))[i]))
12935 +#define E1000_TX_CTXTDESC_ADV(R, i) \
12936 + (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
12937 +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
12938 +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
12939 +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
12941 +#define MAX_MSIX_COUNT 10
12942 +/* board specific private data structure */
12944 +struct igb_adapter {
12945 + struct timer_list watchdog_timer;
12946 + struct timer_list phy_info_timer;
12947 + struct vlan_group *vlgrp;
12955 + unsigned int total_tx_bytes;
12956 + unsigned int total_tx_packets;
12957 + unsigned int total_rx_bytes;
12958 + unsigned int total_rx_packets;
12959 + /* Interrupt Throttle Rate */
12965 + struct work_struct reset_task;
12966 + struct work_struct watchdog_task;
12968 + u8 tx_timeout_factor;
12969 +#ifdef ETHTOOL_PHYS_ID
12970 + struct timer_list blink_timer;
12971 + unsigned long led_status;
12975 + struct igb_ring *tx_ring; /* One per active queue */
12976 + unsigned int restart_queue;
12977 + unsigned long tx_queue_len;
12978 + u32 tx_timeout_count;
12981 + struct igb_ring *rx_ring; /* One per active queue */
12982 + int num_tx_queues;
12983 + int num_rx_queues;
12986 + u64 hw_csum_good;
12987 + u32 alloc_rx_buff_failed;
12988 + u32 max_frame_size;
12989 + u32 min_frame_size;
12991 + /* OS defined structs */
12992 + struct net_device *netdev;
12993 + struct pci_dev *pdev;
12994 + struct net_device_stats net_stats;
12995 +#ifdef SIOCSHWTSTAMP
12996 + struct cyclecounter cycles;
12997 + struct timecounter clock;
12998 + struct timecompare compare;
12999 + struct hwtstamp_config hwtstamp_config;
13002 + /* structs defined in e1000_hw.h */
13003 + struct e1000_hw hw;
13004 + struct e1000_hw_stats stats;
13005 + struct e1000_phy_info phy_info;
13006 + struct e1000_phy_stats phy_stats;
13008 +#ifdef ETHTOOL_TEST
13010 + struct igb_ring test_tx_ring;
13011 + struct igb_ring test_rx_ring;
13016 + struct msix_entry *msix_entries;
13018 + u32 eims_enable_mask;
13022 + unsigned long state;
13023 + unsigned int flags;
13025 + u32 *config_space;
13027 + struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
13028 +#endif /* HAVE_TX_MQ */
13030 + unsigned int lro_max_aggr;
13031 + unsigned int lro_aggregated;
13032 + unsigned int lro_flushed;
13033 + unsigned int lro_no_desc;
13035 + u16 tx_ring_count;
13036 + u16 rx_ring_count;
13037 + unsigned int vfs_allocated_count;
13038 + struct vf_data_storage *vf_data;
13041 + unsigned int num_q_vectors;
13042 + struct igb_q_vector *q_vector[MAX_Q_VECTORS];
13046 +#define IGB_FLAG_HAS_MSI (1 << 0)
13047 +#define IGB_FLAG_MSI_ENABLE (1 << 1)
13048 +#define IGB_FLAG_DCA_ENABLED (1 << 3)
13049 +#define IGB_FLAG_LLI_PUSH (1 << 4)
13050 +#define IGB_FLAG_IN_NETPOLL (1 << 5)
13051 +#define IGB_FLAG_QUAD_PORT_A (1 << 6)
13052 +#define IGB_FLAG_QUEUE_PAIRS (1 << 7)
13054 +#define IGB_82576_TSYNC_SHIFT 19
13055 +enum e1000_state_t {
13061 +extern char igb_driver_name[];
13062 +extern char igb_driver_version[];
13064 +extern int igb_up(struct igb_adapter *);
13065 +extern void igb_down(struct igb_adapter *);
13066 +extern void igb_reinit_locked(struct igb_adapter *);
13067 +extern void igb_reset(struct igb_adapter *);
13068 +extern int igb_set_spd_dplx(struct igb_adapter *, u16);
13069 +extern int igb_setup_tx_resources(struct igb_ring *);
13070 +extern int igb_setup_rx_resources(struct igb_ring *);
13071 +extern void igb_free_tx_resources(struct igb_ring *);
13072 +extern void igb_free_rx_resources(struct igb_ring *);
13073 +extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
13074 +extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
13075 +extern void igb_setup_tctl(struct igb_adapter *);
13076 +extern void igb_setup_rctl(struct igb_adapter *);
13077 +extern int igb_alloc_rx_buffers_adv(struct igb_ring *, int);
13078 +extern void igb_update_stats(struct igb_adapter *);
13079 +extern void igb_set_ethtool_ops(struct net_device *);
13080 +extern void igb_check_options(struct igb_adapter *);
13081 +#ifdef ETHTOOL_OPS_COMPAT
13082 +extern int ethtool_ioctl(struct ifreq *);
13084 +extern int igb_set_vf_mac(struct igb_adapter *adapter,
13085 + int vf, unsigned char *mac_addr);
13086 +extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32);
13087 +extern void igb_configure_vt_default_pool(struct igb_adapter *adapter);
13089 +#endif /* _IGB_H_ */
13090 Index: linux-2.6.22/drivers/net/igb/igb_ethtool.c
13091 ===================================================================
13092 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
13093 +++ linux-2.6.22/drivers/net/igb/igb_ethtool.c 2009-12-18 12:39:22.000000000 -0500
13095 +/*******************************************************************************
13097 + Intel(R) Gigabit Ethernet Linux driver
13098 + Copyright(c) 2007-2009 Intel Corporation.
13100 + This program is free software; you can redistribute it and/or modify it
13101 + under the terms and conditions of the GNU General Public License,
13102 + version 2, as published by the Free Software Foundation.
13104 + This program is distributed in the hope it will be useful, but WITHOUT
13105 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13106 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13109 + You should have received a copy of the GNU General Public License along with
13110 + this program; if not, write to the Free Software Foundation, Inc.,
13111 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
13113 + The full GNU General Public License is included in this distribution in
13114 + the file called "COPYING".
13116 + Contact Information:
13117 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
13118 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
13120 +*******************************************************************************/
13122 +/* ethtool support for igb */
13124 +#include <linux/netdevice.h>
13125 +#include <linux/vmalloc.h>
13127 +#ifdef SIOCETHTOOL
13128 +#include <linux/ethtool.h>
13131 +#include "igb_regtest.h"
13132 +#include <linux/if_vlan.h>
13134 +#ifdef ETHTOOL_OPS_COMPAT
13135 +#include "kcompat_ethtool.c"
13138 +#ifdef ETHTOOL_GSTATS
13139 +struct igb_stats {
13140 + char stat_string[ETH_GSTRING_LEN];
13145 +#define IGB_STAT(m) sizeof(((struct igb_adapter *)0)->m), \
13146 + offsetof(struct igb_adapter, m)
13147 +static const struct igb_stats igb_gstrings_stats[] = {
13148 + { "rx_packets", IGB_STAT(stats.gprc) },
13149 + { "tx_packets", IGB_STAT(stats.gptc) },
13150 + { "rx_bytes", IGB_STAT(stats.gorc) },
13151 + { "tx_bytes", IGB_STAT(stats.gotc) },
13152 + { "rx_broadcast", IGB_STAT(stats.bprc) },
13153 + { "tx_broadcast", IGB_STAT(stats.bptc) },
13154 + { "rx_multicast", IGB_STAT(stats.mprc) },
13155 + { "tx_multicast", IGB_STAT(stats.mptc) },
13156 + { "rx_errors", IGB_STAT(net_stats.rx_errors) },
13157 + { "tx_errors", IGB_STAT(net_stats.tx_errors) },
13158 + { "tx_dropped", IGB_STAT(net_stats.tx_dropped) },
13159 + { "multicast", IGB_STAT(stats.mprc) },
13160 + { "collisions", IGB_STAT(stats.colc) },
13161 + { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) },
13162 + { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) },
13163 + { "rx_crc_errors", IGB_STAT(stats.crcerrs) },
13164 + { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) },
13165 + { "rx_no_buffer_count", IGB_STAT(stats.rnbc) },
13166 + { "rx_missed_errors", IGB_STAT(stats.mpc) },
13167 + { "tx_aborted_errors", IGB_STAT(stats.ecol) },
13168 + { "tx_carrier_errors", IGB_STAT(stats.tncrs) },
13169 + { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) },
13170 + { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) },
13171 + { "tx_window_errors", IGB_STAT(stats.latecol) },
13172 + { "tx_abort_late_coll", IGB_STAT(stats.latecol) },
13173 + { "tx_deferred_ok", IGB_STAT(stats.dc) },
13174 + { "tx_single_coll_ok", IGB_STAT(stats.scc) },
13175 + { "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
13176 + { "tx_timeout_count", IGB_STAT(tx_timeout_count) },
13177 + { "tx_restart_queue", IGB_STAT(restart_queue) },
13178 + { "rx_long_length_errors", IGB_STAT(stats.roc) },
13179 + { "rx_short_length_errors", IGB_STAT(stats.ruc) },
13180 + { "rx_align_errors", IGB_STAT(stats.algnerrc) },
13181 + { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) },
13182 + { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) },
13183 + { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) },
13184 + { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) },
13185 + { "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
13186 + { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
13187 + { "rx_long_byte_count", IGB_STAT(stats.gorc) },
13188 + { "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
13189 + { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
13190 + { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
13191 + { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
13192 + { "tx_smbus", IGB_STAT(stats.mgptc) },
13193 + { "rx_smbus", IGB_STAT(stats.mgprc) },
13194 + { "dropped_smbus", IGB_STAT(stats.mgpdc) },
13196 + { "lro_aggregated", IGB_STAT(lro_aggregated) },
13197 + { "lro_flushed", IGB_STAT(lro_flushed) },
13198 + { "lro_no_desc", IGB_STAT(lro_no_desc) },
13202 +#define IGB_QUEUE_STATS_LEN \
13203 + ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues + \
13204 + ((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
13205 + (sizeof(struct igb_queue_stats) / sizeof(u64)))
13206 +#define IGB_GLOBAL_STATS_LEN \
13207 + (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
13208 +#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
13209 +#endif /* ETHTOOL_GSTATS */
13210 +#ifdef ETHTOOL_TEST
13211 +static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
13212 + "Register test (offline)", "Eeprom test (offline)",
13213 + "Interrupt test (offline)", "Loopback test (offline)",
13214 + "Link test (on/offline)"
13216 +#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
13217 +#endif /* ETHTOOL_TEST */
13219 +static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
13221 + struct igb_adapter *adapter = netdev_priv(netdev);
13222 + struct e1000_hw *hw = &adapter->hw;
13225 + if (hw->phy.media_type == e1000_media_type_copper) {
13227 + ecmd->supported = (SUPPORTED_10baseT_Half |
13228 + SUPPORTED_10baseT_Full |
13229 + SUPPORTED_100baseT_Half |
13230 + SUPPORTED_100baseT_Full |
13231 + SUPPORTED_1000baseT_Full|
13232 + SUPPORTED_Autoneg |
13234 + ecmd->advertising = ADVERTISED_TP;
13236 + if (hw->mac.autoneg == 1) {
13237 + ecmd->advertising |= ADVERTISED_Autoneg;
13238 + /* the e1000 autoneg seems to match ethtool nicely */
13239 + ecmd->advertising |= hw->phy.autoneg_advertised;
13242 + ecmd->port = PORT_TP;
13243 + ecmd->phy_address = hw->phy.addr;
13245 + ecmd->supported = (SUPPORTED_1000baseT_Full |
13246 + SUPPORTED_FIBRE |
13247 + SUPPORTED_Autoneg);
13249 + ecmd->advertising = (ADVERTISED_1000baseT_Full |
13250 + ADVERTISED_FIBRE |
13251 + ADVERTISED_Autoneg);
13253 + ecmd->port = PORT_FIBRE;
13256 + ecmd->transceiver = XCVR_INTERNAL;
13258 + status = E1000_READ_REG(hw, E1000_STATUS);
13260 + if (status & E1000_STATUS_LU) {
13262 + if ((status & E1000_STATUS_SPEED_1000) ||
13263 + hw->phy.media_type != e1000_media_type_copper)
13264 + ecmd->speed = SPEED_1000;
13265 + else if (status & E1000_STATUS_SPEED_100)
13266 + ecmd->speed = SPEED_100;
13268 + ecmd->speed = SPEED_10;
13270 + if ((status & E1000_STATUS_FD) ||
13271 + hw->phy.media_type != e1000_media_type_copper)
13272 + ecmd->duplex = DUPLEX_FULL;
13274 + ecmd->duplex = DUPLEX_HALF;
13276 + ecmd->speed = -1;
13277 + ecmd->duplex = -1;
13280 + ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
13284 +static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
13286 + struct igb_adapter *adapter = netdev_priv(netdev);
13287 + struct e1000_hw *hw = &adapter->hw;
13289 + /* When SoL/IDER sessions are active, autoneg/speed/duplex
13290 + * cannot be changed */
13291 + if (e1000_check_reset_block(hw)) {
13292 + DPRINTK(DRV, ERR, "Cannot change link characteristics "
13293 + "when SoL/IDER is active.\n");
13297 + while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
13300 + if (ecmd->autoneg == AUTONEG_ENABLE) {
13301 + hw->mac.autoneg = 1;
13302 + hw->phy.autoneg_advertised = ecmd->advertising |
13304 + ADVERTISED_Autoneg;
13305 + ecmd->advertising = hw->phy.autoneg_advertised;
13306 + if (adapter->fc_autoneg)
13307 + hw->fc.requested_mode = e1000_fc_default;
13309 + if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
13310 + clear_bit(__IGB_RESETTING, &adapter->state);
13315 + /* reset the link */
13316 + if (netif_running(adapter->netdev)) {
13317 + igb_down(adapter);
13320 + igb_reset(adapter);
13322 + clear_bit(__IGB_RESETTING, &adapter->state);
13326 +static void igb_get_pauseparam(struct net_device *netdev,
13327 + struct ethtool_pauseparam *pause)
13329 + struct igb_adapter *adapter = netdev_priv(netdev);
13330 + struct e1000_hw *hw = &adapter->hw;
13333 + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
13335 + if (hw->fc.current_mode == e1000_fc_rx_pause)
13336 + pause->rx_pause = 1;
13337 + else if (hw->fc.current_mode == e1000_fc_tx_pause)
13338 + pause->tx_pause = 1;
13339 + else if (hw->fc.current_mode == e1000_fc_full) {
13340 + pause->rx_pause = 1;
13341 + pause->tx_pause = 1;
13345 +static int igb_set_pauseparam(struct net_device *netdev,
13346 + struct ethtool_pauseparam *pause)
13348 + struct igb_adapter *adapter = netdev_priv(netdev);
13349 + struct e1000_hw *hw = &adapter->hw;
13352 + adapter->fc_autoneg = pause->autoneg;
13354 + while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
13357 + if (adapter->fc_autoneg == AUTONEG_ENABLE) {
13358 + hw->fc.requested_mode = e1000_fc_default;
13359 + if (netif_running(adapter->netdev)) {
13360 + igb_down(adapter);
13363 + igb_reset(adapter);
13366 + if (pause->rx_pause && pause->tx_pause)
13367 + hw->fc.requested_mode = e1000_fc_full;
13368 + else if (pause->rx_pause && !pause->tx_pause)
13369 + hw->fc.requested_mode = e1000_fc_rx_pause;
13370 + else if (!pause->rx_pause && pause->tx_pause)
13371 + hw->fc.requested_mode = e1000_fc_tx_pause;
13372 + else if (!pause->rx_pause && !pause->tx_pause)
13373 + hw->fc.requested_mode = e1000_fc_none;
13375 + hw->fc.current_mode = hw->fc.requested_mode;
13377 + retval = ((hw->phy.media_type == e1000_media_type_copper) ?
13378 + e1000_force_mac_fc(hw) : hw->mac.ops.setup_link(hw));
13381 + clear_bit(__IGB_RESETTING, &adapter->state);
13385 +static u32 igb_get_rx_csum(struct net_device *netdev)
13387 + struct igb_adapter *adapter = netdev_priv(netdev);
13388 + return adapter->rx_ring[0].rx_csum;
13391 +static int igb_set_rx_csum(struct net_device *netdev, u32 data)
13393 + struct igb_adapter *adapter = netdev_priv(netdev);
13396 + for (i = 0; i < adapter->num_rx_queues; i++)
13397 + adapter->rx_ring[i].rx_csum = !!data;
13402 +static u32 igb_get_tx_csum(struct net_device *netdev)
13404 + return (netdev->features & NETIF_F_IP_CSUM) != 0;
13407 +static int igb_set_tx_csum(struct net_device *netdev, u32 data)
13409 + struct igb_adapter *adapter = netdev_priv(netdev);
13412 +#ifdef NETIF_F_IPV6_CSUM
13413 + netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
13414 + if (adapter->hw.mac.type >= e1000_82576)
13415 + netdev->features |= NETIF_F_SCTP_CSUM;
13417 + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13418 + NETIF_F_SCTP_CSUM);
13420 + netdev->features |= NETIF_F_IP_CSUM;
13421 + if (adapter->hw.mac.type == e1000_82576)
13422 + netdev->features |= NETIF_F_SCTP_CSUM;
13424 + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CSUM);
13431 +#ifdef NETIF_F_TSO
13432 +static int igb_set_tso(struct net_device *netdev, u32 data)
13434 + struct igb_adapter *adapter = netdev_priv(netdev);
13436 + struct net_device *v_netdev;
13439 + netdev->features |= NETIF_F_TSO;
13440 +#ifdef NETIF_F_TSO6
13441 + netdev->features |= NETIF_F_TSO6;
13444 + netdev->features &= ~NETIF_F_TSO;
13445 +#ifdef NETIF_F_TSO6
13446 + netdev->features &= ~NETIF_F_TSO6;
13448 + /* disable TSO on all VLANs if they're present */
13449 + if (!adapter->vlgrp)
13451 + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
13452 + v_netdev = vlan_group_get_device(adapter->vlgrp, i);
13456 + v_netdev->features &= ~NETIF_F_TSO;
13457 +#ifdef NETIF_F_TSO6
13458 + v_netdev->features &= ~NETIF_F_TSO6;
13460 + vlan_group_set_device(adapter->vlgrp, i, v_netdev);
13465 + DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
13468 +#endif /* NETIF_F_TSO */
13470 +static u32 igb_get_msglevel(struct net_device *netdev)
13472 + struct igb_adapter *adapter = netdev_priv(netdev);
13473 + return adapter->msg_enable;
13476 +static void igb_set_msglevel(struct net_device *netdev, u32 data)
13478 + struct igb_adapter *adapter = netdev_priv(netdev);
13479 + adapter->msg_enable = data;
13482 +static int igb_get_regs_len(struct net_device *netdev)
13484 +#define IGB_REGS_LEN 551
13485 + return IGB_REGS_LEN * sizeof(u32);
13488 +static void igb_get_regs(struct net_device *netdev,
13489 + struct ethtool_regs *regs, void *p)
13491 + struct igb_adapter *adapter = netdev_priv(netdev);
13492 + struct e1000_hw *hw = &adapter->hw;
13493 + u32 *regs_buff = p;
13496 + memset(p, 0, IGB_REGS_LEN * sizeof(u32));
13498 + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
13500 + /* General Registers */
13501 + regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
13502 + regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
13503 + regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
13504 + regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
13505 + regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
13506 + regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
13507 + regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
13508 + regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
13509 + regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
13510 + regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
13511 + regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
13512 + regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
13514 + /* NVM Register */
13515 + regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
13518 + /* Reading EICS for EICR because they read the
13519 + * same but EICS does not clear on read */
13520 + regs_buff[13] = E1000_READ_REG(hw, E1000_EICS);
13521 + regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
13522 + regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
13523 + regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
13524 + regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
13525 + regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
13526 + /* Reading ICS for ICR because they read the
13527 + * same but ICS does not clear on read */
13528 + regs_buff[19] = E1000_READ_REG(hw, E1000_ICS);
13529 + regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
13530 + regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
13531 + regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
13532 + regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
13533 + regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
13534 + regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
13536 + /* Flow Control */
13537 + regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
13538 + regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
13539 + regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
13540 + regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
13541 + regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
13542 + regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
13545 + regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
13546 + regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
13547 + regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
13548 + regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
13549 + regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
13550 + regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL);
13553 + regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
13554 + regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
13555 + regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
13556 + regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
13559 + regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
13560 + regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
13561 + regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
13562 + regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
13563 + regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
13566 + regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
13567 + regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
13568 + regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
13569 + regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
13570 + regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
13571 + regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
13572 + regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
13575 + regs_buff[54] = adapter->stats.crcerrs;
13576 + regs_buff[55] = adapter->stats.algnerrc;
13577 + regs_buff[56] = adapter->stats.symerrs;
13578 + regs_buff[57] = adapter->stats.rxerrc;
13579 + regs_buff[58] = adapter->stats.mpc;
13580 + regs_buff[59] = adapter->stats.scc;
13581 + regs_buff[60] = adapter->stats.ecol;
13582 + regs_buff[61] = adapter->stats.mcc;
13583 + regs_buff[62] = adapter->stats.latecol;
13584 + regs_buff[63] = adapter->stats.colc;
13585 + regs_buff[64] = adapter->stats.dc;
13586 + regs_buff[65] = adapter->stats.tncrs;
13587 + regs_buff[66] = adapter->stats.sec;
13588 + regs_buff[67] = adapter->stats.htdpmc;
13589 + regs_buff[68] = adapter->stats.rlec;
13590 + regs_buff[69] = adapter->stats.xonrxc;
13591 + regs_buff[70] = adapter->stats.xontxc;
13592 + regs_buff[71] = adapter->stats.xoffrxc;
13593 + regs_buff[72] = adapter->stats.xofftxc;
13594 + regs_buff[73] = adapter->stats.fcruc;
13595 + regs_buff[74] = adapter->stats.prc64;
13596 + regs_buff[75] = adapter->stats.prc127;
13597 + regs_buff[76] = adapter->stats.prc255;
13598 + regs_buff[77] = adapter->stats.prc511;
13599 + regs_buff[78] = adapter->stats.prc1023;
13600 + regs_buff[79] = adapter->stats.prc1522;
13601 + regs_buff[80] = adapter->stats.gprc;
13602 + regs_buff[81] = adapter->stats.bprc;
13603 + regs_buff[82] = adapter->stats.mprc;
13604 + regs_buff[83] = adapter->stats.gptc;
13605 + regs_buff[84] = adapter->stats.gorc;
13606 + regs_buff[86] = adapter->stats.gotc;
13607 + regs_buff[88] = adapter->stats.rnbc;
13608 + regs_buff[89] = adapter->stats.ruc;
13609 + regs_buff[90] = adapter->stats.rfc;
13610 + regs_buff[91] = adapter->stats.roc;
13611 + regs_buff[92] = adapter->stats.rjc;
13612 + regs_buff[93] = adapter->stats.mgprc;
13613 + regs_buff[94] = adapter->stats.mgpdc;
13614 + regs_buff[95] = adapter->stats.mgptc;
13615 + regs_buff[96] = adapter->stats.tor;
13616 + regs_buff[98] = adapter->stats.tot;
13617 + regs_buff[100] = adapter->stats.tpr;
13618 + regs_buff[101] = adapter->stats.tpt;
13619 + regs_buff[102] = adapter->stats.ptc64;
13620 + regs_buff[103] = adapter->stats.ptc127;
13621 + regs_buff[104] = adapter->stats.ptc255;
13622 + regs_buff[105] = adapter->stats.ptc511;
13623 + regs_buff[106] = adapter->stats.ptc1023;
13624 + regs_buff[107] = adapter->stats.ptc1522;
13625 + regs_buff[108] = adapter->stats.mptc;
13626 + regs_buff[109] = adapter->stats.bptc;
13627 + regs_buff[110] = adapter->stats.tsctc;
13628 + regs_buff[111] = adapter->stats.iac;
13629 + regs_buff[112] = adapter->stats.rpthc;
13630 + regs_buff[113] = adapter->stats.hgptc;
13631 + regs_buff[114] = adapter->stats.hgorc;
13632 + regs_buff[116] = adapter->stats.hgotc;
13633 + regs_buff[118] = adapter->stats.lenerrs;
13634 + regs_buff[119] = adapter->stats.scvpc;
13635 + regs_buff[120] = adapter->stats.hrmpc;
13637 + for (i = 0; i < 4; i++)
13638 + regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
13639 + for (i = 0; i < 4; i++)
13640 + regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i));
13641 + for (i = 0; i < 4; i++)
13642 + regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
13643 + for (i = 0; i < 4; i++)
13644 + regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
13645 + for (i = 0; i < 4; i++)
13646 + regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
13647 + for (i = 0; i < 4; i++)
13648 + regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
13649 + for (i = 0; i < 4; i++)
13650 + regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
13651 + for (i = 0; i < 4; i++)
13652 + regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
13654 + for (i = 0; i < 10; i++)
13655 + regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
13656 + for (i = 0; i < 8; i++)
13657 + regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
13658 + for (i = 0; i < 8; i++)
13659 + regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
13660 + for (i = 0; i < 16; i++)
13661 + regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
13662 + for (i = 0; i < 16; i++)
13663 + regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
13665 + for (i = 0; i < 4; i++)
13666 + regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
13667 + for (i = 0; i < 4; i++)
13668 + regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
13669 + for (i = 0; i < 4; i++)
13670 + regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
13671 + for (i = 0; i < 4; i++)
13672 + regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
13673 + for (i = 0; i < 4; i++)
13674 + regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
13675 + for (i = 0; i < 4; i++)
13676 + regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
13677 + for (i = 0; i < 4; i++)
13678 + regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
13679 + for (i = 0; i < 4; i++)
13680 + regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
13681 + for (i = 0; i < 4; i++)
13682 + regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
13684 + for (i = 0; i < 4; i++)
13685 + regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
13686 + for (i = 0; i < 4; i++)
13687 + regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
13688 + for (i = 0; i < 32; i++)
13689 + regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
13690 + for (i = 0; i < 128; i++)
13691 + regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
13692 + for (i = 0; i < 128; i++)
13693 + regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
13694 + for (i = 0; i < 4; i++)
13695 + regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
13697 + regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
13698 + regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
13699 + regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
13700 + regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
13704 +static int igb_get_eeprom_len(struct net_device *netdev)
13706 + struct igb_adapter *adapter = netdev_priv(netdev);
13707 + return adapter->hw.nvm.word_size * 2;
13710 +static int igb_get_eeprom(struct net_device *netdev,
13711 + struct ethtool_eeprom *eeprom, u8 *bytes)
13713 + struct igb_adapter *adapter = netdev_priv(netdev);
13714 + struct e1000_hw *hw = &adapter->hw;
13715 + u16 *eeprom_buff;
13716 + int first_word, last_word;
13720 + if (eeprom->len == 0)
13723 + eeprom->magic = hw->vendor_id | (hw->device_id << 16);
13725 + first_word = eeprom->offset >> 1;
13726 + last_word = (eeprom->offset + eeprom->len - 1) >> 1;
13728 + eeprom_buff = kmalloc(sizeof(u16) *
13729 + (last_word - first_word + 1), GFP_KERNEL);
13730 + if (!eeprom_buff)
13733 + if (hw->nvm.type == e1000_nvm_eeprom_spi)
13734 + ret_val = e1000_read_nvm(hw, first_word,
13735 + last_word - first_word + 1,
13738 + for (i = 0; i < last_word - first_word + 1; i++) {
13739 + ret_val = e1000_read_nvm(hw, first_word + i, 1,
13740 + &eeprom_buff[i]);
13746 + /* Device's eeprom is always little-endian, word addressable */
13747 + for (i = 0; i < last_word - first_word + 1; i++)
13748 + le16_to_cpus(&eeprom_buff[i]);
13750 + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
13752 + kfree(eeprom_buff);
13757 +static int igb_set_eeprom(struct net_device *netdev,
13758 + struct ethtool_eeprom *eeprom, u8 *bytes)
13760 + struct igb_adapter *adapter = netdev_priv(netdev);
13761 + struct e1000_hw *hw = &adapter->hw;
13762 + u16 *eeprom_buff;
13764 + int max_len, first_word, last_word, ret_val = 0;
13767 + if (eeprom->len == 0)
13768 + return -EOPNOTSUPP;
13770 + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
13773 + max_len = hw->nvm.word_size * 2;
13775 + first_word = eeprom->offset >> 1;
13776 + last_word = (eeprom->offset + eeprom->len - 1) >> 1;
13777 + eeprom_buff = kmalloc(max_len, GFP_KERNEL);
13778 + if (!eeprom_buff)
13781 + ptr = (void *)eeprom_buff;
13783 + if (eeprom->offset & 1) {
13784 + /* need read/modify/write of first changed EEPROM word */
13785 + /* only the second byte of the word is being modified */
13786 + ret_val = e1000_read_nvm(hw, first_word, 1,
13787 + &eeprom_buff[0]);
13790 + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
13791 + /* need read/modify/write of last changed EEPROM word */
13792 + /* only the first byte of the word is being modified */
13793 + ret_val = e1000_read_nvm(hw, last_word, 1,
13794 + &eeprom_buff[last_word - first_word]);
13797 + /* Device's eeprom is always little-endian, word addressable */
13798 + for (i = 0; i < last_word - first_word + 1; i++)
13799 + le16_to_cpus(&eeprom_buff[i]);
13801 + memcpy(ptr, bytes, eeprom->len);
13803 + for (i = 0; i < last_word - first_word + 1; i++)
13804 + cpu_to_le16s(&eeprom_buff[i]);
13806 + ret_val = e1000_write_nvm(hw, first_word,
13807 + last_word - first_word + 1, eeprom_buff);
13809 + /* Update the checksum over the first part of the EEPROM if needed
13810 + * and flush shadow RAM for 82573 controllers */
13811 + if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
13812 + e1000_update_nvm_checksum(hw);
13814 + kfree(eeprom_buff);
13818 +static void igb_get_drvinfo(struct net_device *netdev,
13819 + struct ethtool_drvinfo *drvinfo)
13821 + struct igb_adapter *adapter = netdev_priv(netdev);
13824 + strncpy(drvinfo->driver, igb_driver_name, 32);
13825 + strncpy(drvinfo->version, igb_driver_version, 32);
13827 + /* EEPROM image version # is reported as firmware version # for
13828 + * 82575 controllers */
13829 + e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
13830 + snprintf(drvinfo->fw_version, 32, "%d.%d-%d",
13831 + (eeprom_data & 0xF000) >> 12,
13832 + (eeprom_data & 0x0FF0) >> 4,
13833 + eeprom_data & 0x000F);
13835 + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
13836 + drvinfo->n_stats = IGB_STATS_LEN;
13837 + drvinfo->testinfo_len = IGB_TEST_LEN;
13838 + drvinfo->regdump_len = igb_get_regs_len(netdev);
13839 + drvinfo->eedump_len = igb_get_eeprom_len(netdev);
13842 +static void igb_get_ringparam(struct net_device *netdev,
13843 + struct ethtool_ringparam *ring)
13845 + struct igb_adapter *adapter = netdev_priv(netdev);
13847 + ring->rx_max_pending = IGB_MAX_RXD;
13848 + ring->tx_max_pending = IGB_MAX_TXD;
13849 + ring->rx_mini_max_pending = 0;
13850 + ring->rx_jumbo_max_pending = 0;
13851 + ring->rx_pending = adapter->rx_ring_count;
13852 + ring->tx_pending = adapter->tx_ring_count;
13853 + ring->rx_mini_pending = 0;
13854 + ring->rx_jumbo_pending = 0;
13857 +static int igb_set_ringparam(struct net_device *netdev,
13858 + struct ethtool_ringparam *ring)
13860 + struct igb_adapter *adapter = netdev_priv(netdev);
13861 + struct igb_ring *temp_ring;
13863 + u16 new_rx_count, new_tx_count;
13865 + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
13868 + new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD);
13869 + new_rx_count = max(new_rx_count, (u16)IGB_MIN_RXD);
13870 + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
13872 + new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD);
13873 + new_tx_count = max(new_tx_count, (u16)IGB_MIN_TXD);
13874 + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
13876 + if ((new_tx_count == adapter->tx_ring_count) &&
13877 + (new_rx_count == adapter->rx_ring_count)) {
13878 + /* nothing to do */
13882 + if (adapter->num_tx_queues > adapter->num_rx_queues)
13883 + temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
13885 + temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
13889 + while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
13892 + if (netif_running(adapter->netdev))
13893 + igb_down(adapter);
13896 + * We can't just free everything and then setup again,
13897 + * because the ISRs in MSI-X mode get passed pointers
13898 + * to the tx and rx ring structs.
13900 + if (new_tx_count != adapter->tx_ring_count) {
13901 + memcpy(temp_ring, adapter->tx_ring,
13902 + adapter->num_tx_queues * sizeof(struct igb_ring));
13904 + for (i = 0; i < adapter->num_tx_queues; i++) {
13905 + temp_ring[i].count = new_tx_count;
13906 + err = igb_setup_tx_resources(&temp_ring[i]);
13910 + igb_free_tx_resources(&temp_ring[i]);
13916 + for (i = 0; i < adapter->num_tx_queues; i++)
13917 + igb_free_tx_resources(&adapter->tx_ring[i]);
13919 + memcpy(adapter->tx_ring, temp_ring,
13920 + adapter->num_tx_queues * sizeof(struct igb_ring));
13922 + adapter->tx_ring_count = new_tx_count;
13925 + if (new_rx_count != adapter->rx_ring->count) {
13926 + memcpy(temp_ring, adapter->rx_ring,
13927 + adapter->num_rx_queues * sizeof(struct igb_ring));
13929 + for (i = 0; i < adapter->num_rx_queues; i++) {
13930 + temp_ring[i].count = new_rx_count;
13931 + err = igb_setup_rx_resources(&temp_ring[i]);
13935 + igb_free_rx_resources(&temp_ring[i]);
13942 + for (i = 0; i < adapter->num_rx_queues; i++)
13943 + igb_free_rx_resources(&adapter->rx_ring[i]);
13945 + memcpy(adapter->rx_ring, temp_ring,
13946 + adapter->num_rx_queues * sizeof(struct igb_ring));
13948 + adapter->rx_ring_count = new_rx_count;
13953 + if (netif_running(adapter->netdev))
13956 + clear_bit(__IGB_RESETTING, &adapter->state);
13957 + vfree(temp_ring);
13961 +static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
13962 + int reg, u32 mask, u32 write)
13964 + struct e1000_hw *hw = &adapter->hw;
13966 + static const u32 _test[] =
13967 + {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
13968 + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
13969 + E1000_WRITE_REG(hw, reg, (_test[pat] & write));
13970 + val = E1000_READ_REG(hw, reg);
13971 + if (val != (_test[pat] & write & mask)) {
13972 + DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "
13973 + "0x%08X expected 0x%08X\n",
13974 + E1000_REGISTER(hw, reg), val,
13975 + (_test[pat] & write & mask));
13976 + *data = E1000_REGISTER(hw, reg);
13984 +static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
13985 + int reg, u32 mask, u32 write)
13987 + struct e1000_hw *hw = &adapter->hw;
13989 + E1000_WRITE_REG(hw, reg, write & mask);
13990 + val = E1000_READ_REG(hw, reg);
13991 + if ((write & mask) != (val & mask)) {
13992 + DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "
13993 + "expected 0x%08X\n", reg, (val & mask), (write & mask));
13994 + *data = E1000_REGISTER(hw, reg);
14001 +#define REG_PATTERN_TEST(reg, mask, write) \
14003 + if (reg_pattern_test(adapter, data, reg, mask, write)) \
14007 +#define REG_SET_AND_CHECK(reg, mask, write) \
14009 + if (reg_set_and_check(adapter, data, reg, mask, write)) \
14013 +static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
14015 + struct e1000_hw *hw = &adapter->hw;
14016 + struct igb_reg_test *test;
14017 + u32 value, before, after;
14020 + switch (adapter->hw.mac.type) {
14021 + case e1000_82576:
14022 + test = reg_test_82576;
14023 + toggle = 0x7FFFF3FF;
14026 + test = reg_test_82575;
14027 + toggle = 0x7FFFF3FF;
14031 + /* Because the status register is such a special case,
14032 + * we handle it separately from the rest of the register
14033 + * tests. Some bits are read-only, some toggle, and some
14034 + * are writable on newer MACs.
14036 + before = E1000_READ_REG(hw, E1000_STATUS);
14037 + value = (E1000_READ_REG(hw, E1000_STATUS) & toggle);
14038 + E1000_WRITE_REG(hw, E1000_STATUS, toggle);
14039 + after = E1000_READ_REG(hw, E1000_STATUS) & toggle;
14040 + if (value != after) {
14041 + DPRINTK(DRV, ERR, "failed STATUS register test got: "
14042 + "0x%08X expected: 0x%08X\n", after, value);
14046 + /* restore previous status */
14047 + E1000_WRITE_REG(hw, E1000_STATUS, before);
14049 + /* Perform the remainder of the register test, looping through
14050 + * the test table until we either fail or reach the null entry.
14052 + while (test->reg) {
14053 + for (i = 0; i < test->array_len; i++) {
14054 + switch (test->test_type) {
14055 + case PATTERN_TEST:
14056 + REG_PATTERN_TEST(test->reg +
14057 + (i * test->reg_offset),
14061 + case SET_READ_TEST:
14062 + REG_SET_AND_CHECK(test->reg +
14063 + (i * test->reg_offset),
14067 + case WRITE_NO_TEST:
14068 + writel(test->write,
14069 + (adapter->hw.hw_addr + test->reg)
14070 + + (i * test->reg_offset));
14072 + case TABLE32_TEST:
14073 + REG_PATTERN_TEST(test->reg + (i * 4),
14077 + case TABLE64_TEST_LO:
14078 + REG_PATTERN_TEST(test->reg + (i * 8),
14082 + case TABLE64_TEST_HI:
14083 + REG_PATTERN_TEST((test->reg + 4) + (i * 8),
14096 +static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
14099 + u16 checksum = 0;
14103 + /* Read and add up the contents of the EEPROM */
14104 + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
14105 + if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
14109 + checksum += temp;
14112 + /* If Checksum is not Correct return error else test passed */
14113 + if ((checksum != (u16) NVM_SUM) && !(*data))
14119 +static irqreturn_t igb_test_intr(int irq, void *data)
14121 + struct igb_adapter *adapter = (struct igb_adapter *) data;
14122 + struct e1000_hw *hw = &adapter->hw;
14124 + adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
14126 + return IRQ_HANDLED;
14129 +static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
14131 + struct e1000_hw *hw = &adapter->hw;
14132 + struct net_device *netdev = adapter->netdev;
14133 + u32 mask, ics_mask, i = 0, shared_int = TRUE;
14134 + u32 irq = adapter->pdev->irq;
14138 + /* Hook up test interrupt handler just for this test */
14139 + if (adapter->msix_entries) {
14140 + if (request_irq(adapter->msix_entries[0].vector,
14141 + &igb_test_intr, 0, netdev->name, adapter)) {
14145 + } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
14146 + shared_int = FALSE;
14147 + if (request_irq(irq, &igb_test_intr, 0, netdev->name, adapter)) {
14151 + } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
14152 + netdev->name, adapter)) {
14153 + shared_int = FALSE;
14154 + } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
14155 + netdev->name, adapter)) {
14159 + DPRINTK(HW, INFO, "testing %s interrupt\n",
14160 + (shared_int ? "shared" : "unshared"));
14162 + /* Disable all the interrupts */
14163 + E1000_WRITE_REG(hw, E1000_IMC, ~0);
14166 + /* Define all writable bits for ICS */
14167 + switch (hw->mac.type) {
14168 + case e1000_82575:
14169 + ics_mask = 0x37F47EDD;
14171 + case e1000_82576:
14172 + ics_mask = 0x77D4FBFD;
14175 + ics_mask = 0x7FFFFFFF;
14179 + /* Test each interrupt */
14180 + for (; i < 31; i++) {
14181 + /* Interrupt to test */
14184 + if (!(mask & ics_mask))
14187 + if (!shared_int) {
14188 + /* Disable the interrupt to be reported in
14189 + * the cause register and then force the same
14190 + * interrupt and see if one gets posted. If
14191 + * an interrupt was posted to the bus, the
14194 + adapter->test_icr = 0;
14196 + /* Flush any pending interrupts */
14197 + E1000_WRITE_REG(hw, E1000_ICR, ~0);
14199 + E1000_WRITE_REG(hw, E1000_IMC, mask);
14200 + E1000_WRITE_REG(hw, E1000_ICS, mask);
14203 + if (adapter->test_icr & mask) {
14209 + /* Enable the interrupt to be reported in
14210 + * the cause register and then force the same
14211 + * interrupt and see if one gets posted. If
14212 + * an interrupt was not posted to the bus, the
14215 + adapter->test_icr = 0;
14217 + /* Flush any pending interrupts */
14218 + E1000_WRITE_REG(hw, E1000_ICR, ~0);
14220 + E1000_WRITE_REG(hw, E1000_IMS, mask);
14221 + E1000_WRITE_REG(hw, E1000_ICS, mask);
14224 + if (!(adapter->test_icr & mask)) {
14229 + if (!shared_int) {
14230 + /* Disable the other interrupts to be reported in
14231 + * the cause register and then force the other
14232 + * interrupts and see if any get posted. If
14233 + * an interrupt was posted to the bus, the
14236 + adapter->test_icr = 0;
14238 + /* Flush any pending interrupts */
14239 + E1000_WRITE_REG(hw, E1000_ICR, ~0);
14241 + E1000_WRITE_REG(hw, E1000_IMC, ~mask);
14242 + E1000_WRITE_REG(hw, E1000_ICS, ~mask);
14245 + if (adapter->test_icr & mask) {
14252 + /* Disable all the interrupts */
14253 + E1000_WRITE_REG(hw, E1000_IMC, ~0);
14256 + /* Unhook test interrupt handler */
14257 + if (adapter->msix_entries)
14258 + free_irq(adapter->msix_entries[0].vector, adapter);
14260 + free_irq(irq, adapter);
14265 +static void igb_free_desc_rings(struct igb_adapter *adapter)
14267 + igb_free_tx_resources(&adapter->test_tx_ring);
14268 + igb_free_rx_resources(&adapter->test_rx_ring);
14271 +static int igb_setup_desc_rings(struct igb_adapter *adapter)
14273 + struct igb_ring *tx_ring = &adapter->test_tx_ring;
14274 + struct igb_ring *rx_ring = &adapter->test_rx_ring;
14277 + /* Setup Tx descriptor ring and Tx buffers */
14278 + tx_ring->count = IGB_DEFAULT_TXD;
14279 + tx_ring->pdev = adapter->pdev;
14280 + tx_ring->reg_idx = adapter->vfs_allocated_count;
14282 + if (igb_setup_tx_resources(tx_ring)) {
14287 + igb_setup_tctl(adapter);
14288 + igb_configure_tx_ring(adapter, tx_ring);
14290 + for (i = 0; i < tx_ring->count; i++) {
14291 + union e1000_adv_tx_desc *tx_desc;
14292 + unsigned int size = 1024;
14293 + struct sk_buff *skb = alloc_skb(size, GFP_KERNEL);
14299 + skb_put(skb, size);
14300 + tx_ring->buffer_info[i].skb = skb;
14301 + tx_ring->buffer_info[i].length = skb->len;
14302 + tx_ring->buffer_info[i].dma =
14303 + pci_map_single(tx_ring->pdev, skb->data, skb->len,
14304 + PCI_DMA_TODEVICE);
14305 + tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
14306 + tx_desc->read.buffer_addr =
14307 + cpu_to_le64(tx_ring->buffer_info[i].dma);
14308 + tx_desc->read.olinfo_status =
14309 + cpu_to_le32(skb->len << E1000_ADVTXD_PAYLEN_SHIFT);
14310 + tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
14311 + tx_desc->read.cmd_type_len |=
14312 + cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
14313 + E1000_ADVTXD_DCMD_DEXT);
14314 + tx_desc->read.cmd_type_len |=
14315 + cpu_to_le32(IGB_ADVTXD_DCMD |
14316 + E1000_ADVTXD_DTYP_DATA |
14317 + E1000_ADVTXD_DCMD_IFCS |
14318 + E1000_ADVTXD_DCMD_DEXT);
14321 + /* Setup Rx descriptor ring and Rx buffers */
14322 + rx_ring->count = IGB_DEFAULT_RXD;
14323 + rx_ring->pdev = adapter->pdev;
14324 + rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
14325 + rx_ring->reg_idx = adapter->vfs_allocated_count;
14327 + if (igb_setup_rx_resources(rx_ring)) {
14332 + /* set the default queue to queue 0 of PF */
14333 + E1000_WRITE_REG(&adapter->hw, E1000_MRQC,
14334 + adapter->vfs_allocated_count << 3);
14336 + /* enable receive ring */
14337 + igb_setup_rctl(adapter);
14338 + igb_configure_rx_ring(adapter, rx_ring);
14340 + if (igb_alloc_rx_buffers_adv(rx_ring, rx_ring->count)) {
14349 + igb_free_desc_rings(adapter);
14353 +static void igb_phy_disable_receiver(struct igb_adapter *adapter)
14355 + /* Write out to PHY registers 29 and 30 to disable the Receiver. */
14356 + e1000_write_phy_reg(&adapter->hw, 29, 0x001F);
14357 + e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC);
14358 + e1000_write_phy_reg(&adapter->hw, 29, 0x001A);
14359 + e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0);
14362 +static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
14364 + struct e1000_hw *hw = &adapter->hw;
14365 + u32 ctrl_reg = 0;
14367 + hw->mac.autoneg = FALSE;
14369 + if (hw->phy.type == e1000_phy_m88) {
14370 + /* Auto-MDI/MDIX Off */
14371 + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
14372 + /* reset to update Auto-MDI/MDIX */
14373 + e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
14374 + /* autoneg off */
14375 + e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
14378 + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
14380 + /* force 1000, set loopback */
14381 + e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
14383 + /* Now set up the MAC to the same speed/duplex as the PHY. */
14384 + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
14385 + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
14386 + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
14387 + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
14388 + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
14389 + E1000_CTRL_FD | /* Force Duplex to FULL */
14390 + E1000_CTRL_SLU); /* Set link up enable bit */
14392 + if (hw->phy.type == e1000_phy_m88)
14393 + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
14395 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
14397 + /* Disable the receiver on the PHY so when a cable is plugged in, the
14398 + * PHY does not begin to autoneg when a cable is reconnected to the NIC.
14400 + if (hw->phy.type == e1000_phy_m88)
14401 + igb_phy_disable_receiver(adapter);
14408 +static int igb_set_phy_loopback(struct igb_adapter *adapter)
14410 + return igb_integrated_phy_loopback(adapter);
14413 +static int igb_setup_loopback_test(struct igb_adapter *adapter)
14415 + struct e1000_hw *hw = &adapter->hw;
14418 + reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
14420 + /* use CTRL_EXT to identify link type as SGMII can appear as copper */
14421 + if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
14422 + reg = E1000_READ_REG(hw, E1000_RCTL);
14423 + reg |= E1000_RCTL_LBM_TCVR;
14424 + E1000_WRITE_REG(hw, E1000_RCTL, reg);
14426 + E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
14428 + reg = E1000_READ_REG(hw, E1000_CTRL);
14429 + reg &= ~(E1000_CTRL_RFCE |
14430 + E1000_CTRL_TFCE |
14431 + E1000_CTRL_LRST);
14432 + reg |= E1000_CTRL_SLU |
14434 + E1000_WRITE_REG(hw, E1000_CTRL, reg);
14436 + /* Unset switch control to serdes energy detect */
14437 + reg = E1000_READ_REG(hw, E1000_CONNSW);
14438 + reg &= ~E1000_CONNSW_ENRGSRC;
14439 + E1000_WRITE_REG(hw, E1000_CONNSW, reg);
14441 + /* Set PCS register for forced speed */
14442 + reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
14443 + reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
14444 + reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
14445 + E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
14446 + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
14447 + E1000_PCS_LCTL_FSD | /* Force Speed */
14448 + E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
14449 + E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
14454 + return igb_set_phy_loopback(adapter);
14457 +static void igb_loopback_cleanup(struct igb_adapter *adapter)
14459 + struct e1000_hw *hw = &adapter->hw;
14463 + rctl = E1000_READ_REG(hw, E1000_RCTL);
14464 + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
14465 + E1000_WRITE_REG(hw, E1000_RCTL, rctl);
14467 + hw->mac.autoneg = TRUE;
14468 + e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
14469 + if (phy_reg & MII_CR_LOOPBACK) {
14470 + phy_reg &= ~MII_CR_LOOPBACK;
14471 + e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
14472 + e1000_phy_commit(hw);
14476 +static void igb_create_lbtest_frame(struct sk_buff *skb,
14477 + unsigned int frame_size)
14479 + memset(skb->data, 0xFF, frame_size);
14480 + frame_size &= ~1;
14481 + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
14482 + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
14483 + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
14486 +static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
14488 + frame_size &= ~1;
14489 + if (*(skb->data + 3) == 0xFF) {
14490 + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
14491 + (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
14498 +static int igb_run_loopback_test(struct igb_adapter *adapter)
14500 + struct igb_ring *tx_ring = &adapter->test_tx_ring;
14501 + struct igb_ring *rx_ring = &adapter->test_rx_ring;
14502 + int i, j, k, l, lc, good_cnt, ret_val = 0;
14503 + unsigned long time;
14505 + writel(rx_ring->count - 1, rx_ring->tail);
14507 + /* Calculate the loop count based on the largest descriptor ring
14508 + * The idea is to wrap the largest ring a number of times using 64
14509 + * send/receive pairs during each loop
14512 + if (rx_ring->count <= tx_ring->count)
14513 + lc = ((tx_ring->count / 64) * 2) + 1;
14515 + lc = ((rx_ring->count / 64) * 2) + 1;
14518 + for (j = 0; j <= lc; j++) { /* loop count loop */
14519 + for (i = 0; i < 64; i++) { /* send the packets */
14520 + igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
14522 + pci_dma_sync_single_for_device(tx_ring->pdev,
14523 + tx_ring->buffer_info[k].dma,
14524 + tx_ring->buffer_info[k].length,
14525 + PCI_DMA_TODEVICE);
14526 + if (unlikely(++k == tx_ring->count))
14529 + writel(k, tx_ring->tail);
14532 + time = jiffies; /* set the start time for the receive */
14534 + do { /* receive the sent packets */
14535 + pci_dma_sync_single_for_cpu(rx_ring->pdev,
14536 + rx_ring->buffer_info[l].dma,
14537 + rx_ring->rx_buffer_len,
14538 + PCI_DMA_FROMDEVICE);
14540 + ret_val = igb_check_lbtest_frame(
14541 + rx_ring->buffer_info[l].skb, 1024);
14544 + if (unlikely(++l == rx_ring->count))
14546 + /* time + 20 msecs (200 msecs on 2.4) is more than
14547 + * enough time to complete the receives, if it's
14548 + * exceeded, break and error off
14550 + } while (good_cnt < 64 && jiffies < (time + 20));
14551 + if (good_cnt != 64) {
14552 + ret_val = 13; /* ret_val is the same as mis-compare */
14555 + if (jiffies >= (time + 20)) {
14556 + ret_val = 14; /* error code for time out error */
14559 + } /* end loop count loop */
14563 +static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
14565 + /* PHY loopback cannot be performed if SoL/IDER
14566 + * sessions are active */
14567 + if (e1000_check_reset_block(&adapter->hw)) {
14568 + DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
14569 + "when SoL/IDER is active.\n");
14573 + *data = igb_setup_desc_rings(adapter);
14576 + *data = igb_setup_loopback_test(adapter);
14578 + goto err_loopback;
14579 + *data = igb_run_loopback_test(adapter);
14580 + igb_loopback_cleanup(adapter);
14583 + igb_free_desc_rings(adapter);
14588 +static int igb_link_test(struct igb_adapter *adapter, u64 *data)
14590 + struct e1000_hw *hw = &adapter->hw;
14592 + if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
14594 + adapter->hw.mac.serdes_has_link = FALSE;
14596 + /* On some blade server designs, link establishment
14597 + * could take as long as 2-3 minutes */
14599 + e1000_check_for_link(&adapter->hw);
14600 + if (adapter->hw.mac.serdes_has_link)
14603 + } while (i++ < 3750);
14607 + e1000_check_for_link(&adapter->hw);
14608 + if (adapter->hw.mac.autoneg)
14611 + if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
14617 +static void igb_diag_test(struct net_device *netdev,
14618 + struct ethtool_test *eth_test, u64 *data)
14620 + struct igb_adapter *adapter = netdev_priv(netdev);
14621 + u16 autoneg_advertised;
14622 + u8 forced_speed_duplex, autoneg;
14623 + bool if_running = netif_running(netdev);
14625 + set_bit(__IGB_TESTING, &adapter->state);
14626 + if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
14627 + /* Offline tests */
14629 + /* save speed, duplex, autoneg settings */
14630 + autoneg_advertised = adapter->hw.phy.autoneg_advertised;
14631 + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
14632 + autoneg = adapter->hw.mac.autoneg;
14634 + DPRINTK(HW, INFO, "offline testing starting\n");
14636 + /* Link test performed before hardware reset so autoneg doesn't
14637 + * interfere with test result */
14638 + if (igb_link_test(adapter, &data[4]))
14639 + eth_test->flags |= ETH_TEST_FL_FAILED;
14642 + /* indicate we're in test mode */
14643 + dev_close(netdev);
14645 + igb_reset(adapter);
14647 + if (igb_reg_test(adapter, &data[0]))
14648 + eth_test->flags |= ETH_TEST_FL_FAILED;
14650 + igb_reset(adapter);
14651 + if (igb_eeprom_test(adapter, &data[1]))
14652 + eth_test->flags |= ETH_TEST_FL_FAILED;
14654 + igb_reset(adapter);
14655 + if (igb_intr_test(adapter, &data[2]))
14656 + eth_test->flags |= ETH_TEST_FL_FAILED;
14658 + igb_reset(adapter);
14659 + if (igb_loopback_test(adapter, &data[3]))
14660 + eth_test->flags |= ETH_TEST_FL_FAILED;
14662 + /* restore speed, duplex, autoneg settings */
14663 + adapter->hw.phy.autoneg_advertised = autoneg_advertised;
14664 + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
14665 + adapter->hw.mac.autoneg = autoneg;
14667 + /* force this routine to wait until autoneg complete/timeout */
14668 + adapter->hw.phy.autoneg_wait_to_complete = TRUE;
14669 + igb_reset(adapter);
14670 + adapter->hw.phy.autoneg_wait_to_complete = FALSE;
14672 + clear_bit(__IGB_TESTING, &adapter->state);
14674 + dev_open(netdev);
14676 + DPRINTK(HW, INFO, "online testing starting\n");
14677 + /* Online tests */
14678 + if (igb_link_test(adapter, &data[4]))
14679 + eth_test->flags |= ETH_TEST_FL_FAILED;
14681 + /* Online tests aren't run; pass by default */
14687 + clear_bit(__IGB_TESTING, &adapter->state);
14689 + msleep_interruptible(4 * 1000);
14692 +static int igb_wol_exclusion(struct igb_adapter *adapter,
14693 + struct ethtool_wolinfo *wol)
14695 + struct e1000_hw *hw = &adapter->hw;
14696 + int retval = 1; /* fail by default */
14698 + switch (hw->device_id) {
14699 + case E1000_DEV_ID_82575GB_QUAD_COPPER:
14700 + /* WoL not supported */
14701 + wol->supported = 0;
14703 + case E1000_DEV_ID_82575EB_FIBER_SERDES:
14704 + case E1000_DEV_ID_82576_FIBER:
14705 + case E1000_DEV_ID_82576_SERDES:
14706 + /* Wake events not supported on port B */
14707 + if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) {
14708 + wol->supported = 0;
14711 + /* return success for non excluded adapter ports */
14714 + case E1000_DEV_ID_82576_QUAD_COPPER:
14715 + /* quad port adapters only support WoL on port A */
14716 + if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
14717 + wol->supported = 0;
14720 + /* return success for non excluded adapter ports */
14724 + /* dual port cards only support WoL on port A from now on
14725 + * unless it was enabled in the eeprom for port B
14726 + * so exclude FUNC_1 ports from having WoL enabled */
14727 + if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1 &&
14728 + !adapter->eeprom_wol) {
14729 + wol->supported = 0;
14739 +static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
14741 + struct igb_adapter *adapter = netdev_priv(netdev);
14743 + wol->supported = WAKE_UCAST | WAKE_MCAST |
14744 + WAKE_BCAST | WAKE_MAGIC;
14745 + wol->wolopts = 0;
14747 + /* this function will set ->supported = 0 and return 1 if wol is not
14748 + * supported by this hardware */
14749 + if (igb_wol_exclusion(adapter, wol) ||
14750 + !device_can_wakeup(&adapter->pdev->dev))
14753 + /* apply any specific unsupported masks here */
14754 + switch (adapter->hw.device_id) {
14759 + if (adapter->wol & E1000_WUFC_EX)
14760 + wol->wolopts |= WAKE_UCAST;
14761 + if (adapter->wol & E1000_WUFC_MC)
14762 + wol->wolopts |= WAKE_MCAST;
14763 + if (adapter->wol & E1000_WUFC_BC)
14764 + wol->wolopts |= WAKE_BCAST;
14765 + if (adapter->wol & E1000_WUFC_MAG)
14766 + wol->wolopts |= WAKE_MAGIC;
14771 +static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
14773 + struct igb_adapter *adapter = netdev_priv(netdev);
14775 + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
14776 + return -EOPNOTSUPP;
14778 + if (igb_wol_exclusion(adapter, wol))
14779 + return wol->wolopts ? -EOPNOTSUPP : 0;
14781 + /* these settings will always override what we currently have */
14782 + adapter->wol = 0;
14784 + if (wol->wolopts & WAKE_UCAST)
14785 + adapter->wol |= E1000_WUFC_EX;
14786 + if (wol->wolopts & WAKE_MCAST)
14787 + adapter->wol |= E1000_WUFC_MC;
14788 + if (wol->wolopts & WAKE_BCAST)
14789 + adapter->wol |= E1000_WUFC_BC;
14790 + if (wol->wolopts & WAKE_MAGIC)
14791 + adapter->wol |= E1000_WUFC_MAG;
14792 + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
14797 +/* bit defines for adapter->led_status */
14798 +#define IGB_LED_ON 0
14800 +static int igb_phys_id(struct net_device *netdev, u32 data)
14802 + struct igb_adapter *adapter = netdev_priv(netdev);
14803 + struct e1000_hw *hw = &adapter->hw;
14804 + unsigned long timeout;
14806 + timeout = data * 1000;
14809 + * msleep_interruptable only accepts unsigned int so we are limited
14810 + * in how long a duration we can wait
14812 + if (!timeout || timeout > UINT_MAX)
14813 + timeout = UINT_MAX;
14815 + e1000_blink_led(hw);
14816 + msleep_interruptible(timeout);
14818 + e1000_led_off(hw);
14819 + clear_bit(IGB_LED_ON, &adapter->led_status);
14820 + e1000_cleanup_led(hw);
14825 +static int igb_set_coalesce(struct net_device *netdev,
14826 + struct ethtool_coalesce *ec)
14828 + struct igb_adapter *adapter = netdev_priv(netdev);
14831 + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
14832 + ((ec->rx_coalesce_usecs > 3) &&
14833 + (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
14834 + (ec->rx_coalesce_usecs == 2))
14837 + /* convert to rate of irq's per second */
14838 + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
14839 + adapter->itr = IGB_START_ITR;
14840 + adapter->itr_setting = ec->rx_coalesce_usecs;
14842 + adapter->itr = ec->rx_coalesce_usecs << 2;
14843 + adapter->itr_setting = adapter->itr;
14846 + for (i = 0; i < adapter->num_q_vectors; i++) {
14847 + struct igb_q_vector *q_vector = adapter->q_vector[i];
14848 + q_vector->itr_val = adapter->itr;
14849 + q_vector->set_itr = 1;
14855 +static int igb_get_coalesce(struct net_device *netdev,
14856 + struct ethtool_coalesce *ec)
14858 + struct igb_adapter *adapter = netdev_priv(netdev);
14860 + if (adapter->itr_setting <= 3)
14861 + ec->rx_coalesce_usecs = adapter->itr_setting;
14863 + ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
14868 +static int igb_nway_reset(struct net_device *netdev)
14870 + struct igb_adapter *adapter = netdev_priv(netdev);
14871 + if (netif_running(netdev))
14872 + igb_reinit_locked(adapter);
14876 +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
14877 +static int igb_get_sset_count(struct net_device *netdev, int sset)
14880 + case ETH_SS_STATS:
14881 + return IGB_STATS_LEN;
14882 + case ETH_SS_TEST:
14883 + return IGB_TEST_LEN;
14885 + return -ENOTSUPP;
14889 +static int igb_get_stats_count(struct net_device *netdev)
14891 + return IGB_STATS_LEN;
14894 +static int igb_diag_test_count(struct net_device *netdev)
14896 + return IGB_TEST_LEN;
14900 +static void igb_get_ethtool_stats(struct net_device *netdev,
14901 + struct ethtool_stats *stats, u64 *data)
14903 + struct igb_adapter *adapter = netdev_priv(netdev);
14905 + int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
14908 + u64 restart_queue = 0, hw_csum_err = 0, hw_csum_good = 0;
14910 + int aggregated = 0, flushed = 0, no_desc = 0;
14913 + /* collect tx ring stats */
14914 + for (i = 0; i < adapter->num_tx_queues; i++)
14915 + restart_queue += adapter->tx_ring[i].restart_queue;
14916 + adapter->restart_queue = restart_queue;
14919 + for (i = 0; i < adapter->num_rx_queues; i++) {
14920 + hw_csum_err += adapter->rx_ring[i].hw_csum_err;
14921 + hw_csum_good += adapter->rx_ring[i].hw_csum_good;
14923 + aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
14924 + flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
14925 + no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
14927 + adapter->lro_aggregated = aggregated;
14928 + adapter->lro_flushed = flushed;
14929 + adapter->lro_no_desc = no_desc;
14933 + adapter->hw_csum_err = hw_csum_err;
14934 + adapter->hw_csum_good = hw_csum_good;
14936 + igb_update_stats(adapter);
14938 + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
14939 + char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset;
14940 + data[i] = (igb_gstrings_stats[i].sizeof_stat ==
14941 + sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
14943 + for (j = 0; j < adapter->num_tx_queues; j++) {
14945 + queue_stat = (u64 *)&adapter->tx_ring[j].stats;
14946 + for (k = 0; k < stat_count; k++)
14947 + data[i + k] = queue_stat[k];
14950 + for (j = 0; j < adapter->num_rx_queues; j++) {
14952 + queue_stat = (u64 *)&adapter->rx_ring[j].stats;
14953 + for (k = 0; k < stat_count; k++)
14954 + data[i + k] = queue_stat[k];
14959 +static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
14961 + struct igb_adapter *adapter = netdev_priv(netdev);
14965 + switch (stringset) {
14966 + case ETH_SS_TEST:
14967 + memcpy(data, *igb_gstrings_test,
14968 + IGB_TEST_LEN*ETH_GSTRING_LEN);
14970 + case ETH_SS_STATS:
14971 + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
14972 + memcpy(p, igb_gstrings_stats[i].stat_string,
14973 + ETH_GSTRING_LEN);
14974 + p += ETH_GSTRING_LEN;
14976 + for (i = 0; i < adapter->num_tx_queues; i++) {
14977 + sprintf(p, "tx_queue_%u_packets", i);
14978 + p += ETH_GSTRING_LEN;
14979 + sprintf(p, "tx_queue_%u_bytes", i);
14980 + p += ETH_GSTRING_LEN;
14982 + for (i = 0; i < adapter->num_rx_queues; i++) {
14983 + sprintf(p, "rx_queue_%u_packets", i);
14984 + p += ETH_GSTRING_LEN;
14985 + sprintf(p, "rx_queue_%u_bytes", i);
14986 + p += ETH_GSTRING_LEN;
14988 +/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
14993 +static struct ethtool_ops igb_ethtool_ops = {
14994 + .get_settings = igb_get_settings,
14995 + .set_settings = igb_set_settings,
14996 + .get_drvinfo = igb_get_drvinfo,
14997 + .get_regs_len = igb_get_regs_len,
14998 + .get_regs = igb_get_regs,
14999 + .get_wol = igb_get_wol,
15000 + .set_wol = igb_set_wol,
15001 + .get_msglevel = igb_get_msglevel,
15002 + .set_msglevel = igb_set_msglevel,
15003 + .nway_reset = igb_nway_reset,
15004 + .get_link = ethtool_op_get_link,
15005 + .get_eeprom_len = igb_get_eeprom_len,
15006 + .get_eeprom = igb_get_eeprom,
15007 + .set_eeprom = igb_set_eeprom,
15008 + .get_ringparam = igb_get_ringparam,
15009 + .set_ringparam = igb_set_ringparam,
15010 + .get_pauseparam = igb_get_pauseparam,
15011 + .set_pauseparam = igb_set_pauseparam,
15012 + .get_rx_csum = igb_get_rx_csum,
15013 + .set_rx_csum = igb_set_rx_csum,
15014 + .get_tx_csum = igb_get_tx_csum,
15015 + .set_tx_csum = igb_set_tx_csum,
15016 + .get_sg = ethtool_op_get_sg,
15017 + .set_sg = ethtool_op_set_sg,
15018 +#ifdef NETIF_F_TSO
15019 + .get_tso = ethtool_op_get_tso,
15020 + .set_tso = igb_set_tso,
15022 +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
15023 + .get_sset_count = igb_get_sset_count,
15025 + .get_stats_count = igb_get_stats_count,
15026 + .self_test_count = igb_diag_test_count,
15028 + .self_test = igb_diag_test,
15029 + .get_strings = igb_get_strings,
15030 + .phys_id = igb_phys_id,
15031 + .get_ethtool_stats = igb_get_ethtool_stats,
15032 +#ifdef ETHTOOL_GPERMADDR
15033 + .get_perm_addr = ethtool_op_get_perm_addr,
15035 + .get_coalesce = igb_get_coalesce,
15036 + .set_coalesce = igb_set_coalesce,
15037 +#ifdef NETIF_F_LRO
15038 + .get_flags = ethtool_op_get_flags,
15039 + .set_flags = ethtool_op_set_flags,
15043 +void igb_set_ethtool_ops(struct net_device *netdev)
15045 + SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
15047 +#endif /* SIOCETHTOOL */
15048 Index: linux-2.6.22/drivers/net/igb/igb_main.c
15049 ===================================================================
15050 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
15051 +++ linux-2.6.22/drivers/net/igb/igb_main.c 2009-12-18 12:39:22.000000000 -0500
15053 +/*******************************************************************************
15055 + Intel(R) Gigabit Ethernet Linux driver
15056 + Copyright(c) 2007-2009 Intel Corporation.
15058 + This program is free software; you can redistribute it and/or modify it
15059 + under the terms and conditions of the GNU General Public License,
15060 + version 2, as published by the Free Software Foundation.
15062 + This program is distributed in the hope it will be useful, but WITHOUT
15063 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15064 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15067 + You should have received a copy of the GNU General Public License along with
15068 + this program; if not, write to the Free Software Foundation, Inc.,
15069 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15071 + The full GNU General Public License is included in this distribution in
15072 + the file called "COPYING".
15074 + Contact Information:
15075 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
15076 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
15078 +*******************************************************************************/
15080 +#include <linux/module.h>
15081 +#include <linux/types.h>
15082 +#include <linux/init.h>
15083 +#include <linux/vmalloc.h>
15084 +#include <linux/pagemap.h>
15085 +#include <linux/netdevice.h>
15086 +#include <linux/tcp.h>
15087 +#ifdef NETIF_F_TSO
15088 +#include <net/checksum.h>
15089 +#ifdef NETIF_F_TSO6
15090 +#include <linux/ipv6.h>
15091 +#include <net/ip6_checksum.h>
15094 +#ifdef SIOCGMIIPHY
15095 +#include <linux/mii.h>
15097 +#ifdef SIOCETHTOOL
15098 +#include <linux/ethtool.h>
15100 +#include <linux/if_vlan.h>
15105 +#define DRV_HW_PERF
15106 +#define VERSION_SUFFIX
15108 +#define DRV_VERSION "2.0.6" VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF
15110 +char igb_driver_name[] = "igb";
15111 +char igb_driver_version[] = DRV_VERSION;
15112 +static const char igb_driver_string[] =
15113 + "Intel(R) Gigabit Ethernet Network Driver";
15114 +static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
15116 +static struct pci_device_id igb_pci_tbl[] = {
15117 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) },
15118 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) },
15119 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) },
15120 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) },
15121 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) },
15122 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) },
15123 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) },
15124 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) },
15125 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) },
15126 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) },
15127 + /* required last entry */
15131 +MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
15133 +void igb_reset(struct igb_adapter *);
15134 +static int igb_setup_all_tx_resources(struct igb_adapter *);
15135 +static int igb_setup_all_rx_resources(struct igb_adapter *);
15136 +static void igb_free_all_tx_resources(struct igb_adapter *);
15137 +static void igb_free_all_rx_resources(struct igb_adapter *);
15138 +static void igb_setup_mrqc(struct igb_adapter *);
15139 +void igb_update_stats(struct igb_adapter *);
15140 +static int igb_probe(struct pci_dev *, const struct pci_device_id *);
15141 +static void __devexit igb_remove(struct pci_dev *pdev);
15142 +static int igb_sw_init(struct igb_adapter *);
15143 +static int igb_open(struct net_device *);
15144 +static int igb_close(struct net_device *);
15145 +static void igb_configure_tx(struct igb_adapter *);
15146 +static void igb_configure_rx(struct igb_adapter *);
15147 +static void igb_clean_all_tx_rings(struct igb_adapter *);
15148 +static void igb_clean_all_rx_rings(struct igb_adapter *);
15149 +static void igb_clean_tx_ring(struct igb_ring *);
15150 +static void igb_clean_rx_ring(struct igb_ring *);
15151 +static void igb_set_rx_mode(struct net_device *);
15152 +static void igb_update_phy_info(unsigned long);
15153 +static void igb_watchdog(unsigned long);
15154 +static void igb_watchdog_task(struct work_struct *);
15155 +static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
15156 + struct igb_ring *);
15157 +static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
15158 +static struct net_device_stats *igb_get_stats(struct net_device *);
15159 +static int igb_change_mtu(struct net_device *, int);
15160 +static int igb_set_mac(struct net_device *, void *);
15161 +static void igb_set_uta(struct igb_adapter *adapter);
15162 +static irqreturn_t igb_intr(int irq, void *);
15163 +static irqreturn_t igb_intr_msi(int irq, void *);
15164 +static irqreturn_t igb_msix_other(int irq, void *);
15165 +static irqreturn_t igb_msix_ring(int irq, void *);
15167 +static void igb_update_dca(struct igb_q_vector *);
15168 +static void igb_setup_dca(struct igb_adapter *);
15169 +#endif /* IGB_DCA */
15170 +static bool igb_clean_tx_irq(struct igb_q_vector *);
15171 +static int igb_poll(struct napi_struct *, int);
15172 +static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
15174 +static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
15176 +static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
15177 +static void igb_tx_timeout(struct net_device *);
15178 +static void igb_reset_task(struct work_struct *);
15179 +static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
15180 +static void igb_vlan_rx_add_vid(struct net_device *, u16);
15181 +static void igb_vlan_rx_kill_vid(struct net_device *, u16);
15182 +static void igb_restore_vlan(struct igb_adapter *);
15183 +static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
15184 +static void igb_ping_all_vfs(struct igb_adapter *);
15185 +static void igb_msg_task(struct igb_adapter *);
15186 +static void igb_vmm_control(struct igb_adapter *);
15187 +static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
15188 +static void igb_vf_configuration(struct pci_dev *, unsigned int);
15191 +static int igb_suspend(struct pci_dev *, pm_message_t);
15192 +static int igb_resume(struct pci_dev *);
15194 +#ifndef USE_REBOOT_NOTIFIER
15195 +static void igb_shutdown(struct pci_dev *);
15197 +static int igb_notify_reboot(struct notifier_block *, unsigned long, void *);
15198 +static struct notifier_block igb_notifier_reboot = {
15199 + .notifier_call = igb_notify_reboot,
15205 +static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
15206 +static struct notifier_block dca_notifier = {
15207 + .notifier_call = igb_notify_dca,
15213 +#ifdef CONFIG_NET_POLL_CONTROLLER
15214 +/* for netdump / net console */
15215 +static void igb_netpoll (struct net_device *);
15218 +#ifdef HAVE_PCI_ERS
15219 +static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
15220 + pci_channel_state_t);
15221 +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
15222 +static void igb_io_resume(struct pci_dev *);
15224 +static struct pci_error_handlers igb_err_handler = {
15225 + .error_detected = igb_io_error_detected,
15226 + .slot_reset = igb_io_slot_reset,
15227 + .resume = igb_io_resume,
15232 +static struct pci_driver igb_driver = {
15233 + .name = igb_driver_name,
15234 + .id_table = igb_pci_tbl,
15235 + .probe = igb_probe,
15236 + .remove = __devexit_p(igb_remove),
15238 + /* Power Managment Hooks */
15239 + .suspend = igb_suspend,
15240 + .resume = igb_resume,
15242 +#ifndef USE_REBOOT_NOTIFIER
15243 + .shutdown = igb_shutdown,
15245 +#ifdef HAVE_PCI_ERS
15246 + .err_handler = &igb_err_handler,
15250 +MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
15251 +MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
15252 +MODULE_LICENSE("GPL");
15253 +MODULE_VERSION(DRV_VERSION);
15255 +static void igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
15257 + struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie;
15258 + u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
15259 + u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
15263 + * if this is the management vlan the only option is to add it in so
15264 + * that the management pass through will continue to work
15266 + if ((mng_cookie->status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
15267 + (vid == mng_cookie->vlan_id))
15270 + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
15276 + e1000_write_vfta(hw, index, vfta);
15279 +#ifdef SIOCSHWTSTAMP
15281 + * igb_read_clock - read raw cycle counter (to be used by time counter)
15283 +static cycle_t igb_read_clock(const struct cyclecounter *tc)
15285 + struct igb_adapter *adapter =
15286 + container_of(tc, struct igb_adapter, cycles);
15287 + struct e1000_hw *hw = &adapter->hw;
15291 + stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIML) << shift;
15292 + stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIMH) << (shift + 32);
15296 +#endif /* SIOCSHWTSTAMP */
15297 +static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
15298 +module_param(debug, int, 0);
15299 +MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)");
15302 + * igb_init_module - Driver Registration Routine
15304 + * igb_init_module is the first routine called when the driver is
15305 + * loaded. All it does is register with the PCI subsystem.
15307 +static int __init igb_init_module(void)
15310 + printk(KERN_INFO "%s - version %s\n",
15311 + igb_driver_string, igb_driver_version);
15313 + printk(KERN_INFO "%s\n", igb_copyright);
15316 + dca_register_notify(&dca_notifier);
15318 + ret = pci_register_driver(&igb_driver);
15319 +#ifdef USE_REBOOT_NOTIFIER
15321 + register_reboot_notifier(&igb_notifier_reboot);
15327 +module_init(igb_init_module);
15330 + * igb_exit_module - Driver Exit Cleanup Routine
15332 + * igb_exit_module is called just before the driver is removed
15335 +static void __exit igb_exit_module(void)
15338 + dca_unregister_notify(&dca_notifier);
15340 +#ifdef USE_REBOOT_NOTIFIER
15341 + unregister_reboot_notifier(&igb_notifier_reboot);
15343 + pci_unregister_driver(&igb_driver);
15346 +module_exit(igb_exit_module);
15349 + * igb_cache_ring_register - Descriptor ring to register mapping
15350 + * @adapter: board private structure to initialize
15352 + * Once we know the feature-set enabled for the device, we'll cache
15353 + * the register offset the descriptor ring is assigned to.
15355 +static void igb_cache_ring_register(struct igb_adapter *adapter)
15357 + int i = 0, j = 0;
15358 + u32 rbase_offset = adapter->vfs_allocated_count;
15360 + switch (adapter->hw.mac.type) {
15361 + case e1000_82576:
15362 + /* The queues are allocated for virtualization such that VF 0
15363 + * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
15364 + * In order to avoid collision we start at the first free queue
15365 + * and continue consuming queues in the same sequence
15367 + if ((adapter->RSS_queues > 1) && adapter->VMDQ_queues) {
15368 + for (; i < adapter->RSS_queues; i++)
15369 + adapter->rx_ring[i].reg_idx = rbase_offset +
15370 + ((i & 0x1) << 3) + (i >> 1);
15372 + for (; j < adapter->RSS_queues; j++)
15373 + adapter->tx_ring[j].reg_idx = rbase_offset +
15374 + ((j & 0x1) << 3) + (j >> 1);
15377 + case e1000_82575:
15379 + for (; i < adapter->num_rx_queues; i++)
15380 + adapter->rx_ring[i].reg_idx = rbase_offset + i;
15381 + for (; j < adapter->num_tx_queues; j++)
15382 + adapter->tx_ring[j].reg_idx = rbase_offset + j;
15387 +static void igb_free_queues(struct igb_adapter *adapter)
15389 + kfree(adapter->tx_ring);
15390 + kfree(adapter->rx_ring);
15392 + adapter->tx_ring = NULL;
15393 + adapter->rx_ring = NULL;
15395 + adapter->num_rx_queues = 0;
15396 + adapter->num_tx_queues = 0;
15401 + * igb_alloc_queues - Allocate memory for all rings
15402 + * @adapter: board private structure to initialize
15404 + * We allocate one ring per queue at run-time since we don't know the
15405 + * number of queues at compile-time.
15407 +static int igb_alloc_queues(struct igb_adapter *adapter)
15411 + adapter->tx_ring = kcalloc(adapter->num_tx_queues,
15412 + sizeof(struct igb_ring), GFP_KERNEL);
15413 + if (!adapter->tx_ring)
15416 + adapter->rx_ring = kcalloc(adapter->num_rx_queues,
15417 + sizeof(struct igb_ring), GFP_KERNEL);
15418 + if (!adapter->rx_ring)
15421 + for (i = 0; i < adapter->num_tx_queues; i++) {
15422 + struct igb_ring *ring = &(adapter->tx_ring[i]);
15423 + ring->count = adapter->tx_ring_count;
15424 + ring->queue_index = i;
15425 + ring->pdev = adapter->pdev;
15426 + /* For 82575, context index must be unique per ring. */
15427 + if (adapter->hw.mac.type == e1000_82575)
15428 + ring->ctx_idx = i << 4;
15431 + for (i = 0; i < adapter->num_rx_queues; i++) {
15432 + struct igb_ring *ring = &(adapter->rx_ring[i]);
15433 + ring->count = adapter->rx_ring_count;
15434 + ring->queue_index = i;
15435 + ring->pdev = adapter->pdev;
15436 + ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
15437 + ring->rx_ps_hdr_size = 0; /* disable packet split */
15438 + ring->rx_csum = true; /* enable rx checksum */
15441 + /* Intitial LRO Settings */
15442 + ring->lro_mgr.max_aggr = adapter->lro_max_aggr;
15443 + ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
15444 + ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
15445 + ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
15446 + ring->lro_mgr.dev = adapter->netdev;
15447 + ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
15448 + ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
15452 + igb_cache_ring_register(adapter);
15454 + return E1000_SUCCESS;
15457 + igb_free_queues(adapter);
15462 +static void igb_configure_lli(struct igb_adapter *adapter)
15464 + struct e1000_hw *hw = &adapter->hw;
15467 + /* LLI should only be enabled for MSI-X or MSI interrupts */
15468 + if (!adapter->msix_entries && !(adapter->flags & IGB_FLAG_HAS_MSI))
15471 + if (adapter->lli_port) {
15472 + /* use filter 0 for port */
15473 + port = htons((u16)adapter->lli_port);
15474 + E1000_WRITE_REG(hw, E1000_IMIR(0),
15475 + (port | E1000_IMIR_PORT_IM_EN));
15476 + E1000_WRITE_REG(hw, E1000_IMIREXT(0),
15477 + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
15480 + if (adapter->flags & IGB_FLAG_LLI_PUSH) {
15481 + /* use filter 1 for push flag */
15482 + E1000_WRITE_REG(hw, E1000_IMIR(1),
15483 + (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
15484 + E1000_WRITE_REG(hw, E1000_IMIREXT(1),
15485 + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH));
15488 + if (adapter->lli_size) {
15489 + /* use filter 2 for size */
15490 + E1000_WRITE_REG(hw, E1000_IMIR(2),
15491 + (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
15492 + E1000_WRITE_REG(hw, E1000_IMIREXT(2),
15493 + (adapter->lli_size | E1000_IMIREXT_CTRL_BP));
15498 +#define IGB_N0_QUEUE -1
15499 +static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
15502 + struct igb_adapter *adapter = q_vector->adapter;
15503 + struct e1000_hw *hw = &adapter->hw;
15505 + int rx_queue = IGB_N0_QUEUE;
15506 + int tx_queue = IGB_N0_QUEUE;
15508 + if (q_vector->rx_ring)
15509 + rx_queue = q_vector->rx_ring->reg_idx;
15510 + if (q_vector->tx_ring)
15511 + tx_queue = q_vector->tx_ring->reg_idx;
15513 + switch (hw->mac.type) {
15514 + case e1000_82575:
15515 + /* The 82575 assigns vectors using a bitmask, which matches the
15516 + bitmask for the EICR/EIMS/EIMC registers. To assign one
15517 + or more queues to a vector, we write the appropriate bits
15518 + into the MSIXBM register for that vector. */
15519 + if (rx_queue > IGB_N0_QUEUE)
15520 + msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
15521 + if (tx_queue > IGB_N0_QUEUE)
15522 + msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
15523 + E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm);
15524 + q_vector->eims_value = msixbm;
15526 + case e1000_82576:
15527 + /* 82576 uses a table-based method for assigning vectors.
15528 + Each queue has a single entry in the table to which we write
15529 + a vector number along with a "valid" bit. Sadly, the layout
15530 + of the table is somewhat counterintuitive. */
15531 + if (rx_queue > IGB_N0_QUEUE) {
15532 + index = (rx_queue & 0x7);
15533 + ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
15534 + if (rx_queue < 8) {
15535 + /* vector goes into low byte of register */
15536 + ivar = ivar & 0xFFFFFF00;
15537 + ivar |= msix_vector | E1000_IVAR_VALID;
15539 + /* vector goes into third byte of register */
15540 + ivar = ivar & 0xFF00FFFF;
15541 + ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
15543 + E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
15545 + if (tx_queue > IGB_N0_QUEUE) {
15546 + index = (tx_queue & 0x7);
15547 + ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
15548 + if (tx_queue < 8) {
15549 + /* vector goes into second byte of register */
15550 + ivar = ivar & 0xFFFF00FF;
15551 + ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
15553 + /* vector goes into high byte of register */
15554 + ivar = ivar & 0x00FFFFFF;
15555 + ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
15557 + E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
15559 + q_vector->eims_value = 1 << msix_vector;
15568 + * igb_configure_msix - Configure MSI-X hardware
15570 + * igb_configure_msix sets up the hardware to properly
15571 + * generate MSI-X interrupts.
15573 +static void igb_configure_msix(struct igb_adapter *adapter)
15576 + int i, vector = 0;
15577 + struct e1000_hw *hw = &adapter->hw;
15579 + adapter->eims_enable_mask = 0;
15581 + /* set vector for other causes, i.e. link changes */
15582 + switch (hw->mac.type) {
15583 + case e1000_82575:
15584 + tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
15585 + /* enable MSI-X PBA support*/
15586 + tmp |= E1000_CTRL_EXT_PBA_CLR;
15588 + /* Auto-Mask interrupts upon ICR read. */
15589 + tmp |= E1000_CTRL_EXT_EIAME;
15590 + tmp |= E1000_CTRL_EXT_IRCA;
15592 + E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
15594 + /* enable msix_other interrupt */
15595 + E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++,
15596 + E1000_EIMS_OTHER);
15597 + adapter->eims_other = E1000_EIMS_OTHER;
15601 + case e1000_82576:
15602 + /* Turn on MSI-X capability first, or our settings
15603 + * won't stick. And it will take days to debug. */
15604 + E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
15605 + E1000_GPIE_PBA | E1000_GPIE_EIAME |
15606 + E1000_GPIE_NSICR);
15608 + /* enable msix_other interrupt */
15609 + adapter->eims_other = 1 << vector;
15610 + tmp = (vector++ | E1000_IVAR_VALID) << 8;
15612 + E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmp);
15615 + /* do nothing, since nothing else supports MSI-X */
15617 + } /* switch (hw->mac.type) */
15619 + adapter->eims_enable_mask |= adapter->eims_other;
15621 + for (i = 0; i < adapter->num_q_vectors; i++) {
15622 + struct igb_q_vector *q_vector = adapter->q_vector[i];
15623 + igb_assign_vector(q_vector, vector++);
15624 + adapter->eims_enable_mask |= q_vector->eims_value;
15627 + E1000_WRITE_FLUSH(hw);
15631 + * igb_request_msix - Initialize MSI-X interrupts
15633 + * igb_request_msix allocates MSI-X vectors and requests interrupts from the
15636 +static int igb_request_msix(struct igb_adapter *adapter)
15638 + struct net_device *netdev = adapter->netdev;
15639 + struct e1000_hw *hw = &adapter->hw;
15640 + int i, err = 0, vector = 0;
15642 + err = request_irq(adapter->msix_entries[vector].vector,
15643 + &igb_msix_other, 0, netdev->name, adapter);
15648 + for (i = 0; i < adapter->num_q_vectors; i++) {
15649 + struct igb_q_vector *q_vector = adapter->q_vector[i];
15651 + q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
15653 + if (q_vector->rx_ring && q_vector->tx_ring)
15654 + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
15655 + q_vector->rx_ring->queue_index);
15656 + else if (q_vector->tx_ring)
15657 + sprintf(q_vector->name, "%s-tx-%u", netdev->name,
15658 + q_vector->tx_ring->queue_index);
15659 + else if (q_vector->rx_ring)
15660 + sprintf(q_vector->name, "%s-rx-%u", netdev->name,
15661 + q_vector->rx_ring->queue_index);
15663 + sprintf(q_vector->name, "%s-unused", netdev->name);
15665 + err = request_irq(adapter->msix_entries[vector].vector,
15666 + &igb_msix_ring, 0, q_vector->name,
15673 + igb_configure_msix(adapter);
15679 +static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
15681 + if (adapter->msix_entries) {
15682 + pci_disable_msix(adapter->pdev);
15683 + kfree(adapter->msix_entries);
15684 + adapter->msix_entries = NULL;
15685 + } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
15686 + pci_disable_msi(adapter->pdev);
15689 + adapter->num_rx_queues = 0;
15690 + adapter->num_tx_queues = 0;
15696 + * igb_free_q_vectors - Free memory allocated for interrupt vectors
15697 + * @adapter: board private structure to initialize
15699 + * This function frees the memory allocated to the q_vectors. In addition if
15700 + * NAPI is enabled it will delete any references to the NAPI struct prior
15701 + * to freeing the q_vector.
15703 +static void igb_free_q_vectors(struct igb_adapter *adapter)
15707 + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
15708 + struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
15709 + adapter->q_vector[v_idx] = NULL;
15710 + netif_napi_del(&q_vector->napi);
15713 + adapter->num_q_vectors = 0;
15717 + * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
15719 + * This function resets the device so that it has 0 rx queues, tx queues, and
15720 + * MSI-X interrupts allocated.
15722 +static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
15724 + igb_free_queues(adapter);
15725 + igb_free_q_vectors(adapter);
15726 + igb_reset_interrupt_capability(adapter);
15730 + * igb_set_interrupt_capability - set MSI or MSI-X if supported
15732 + * Attempt to configure interrupts using the best available
15733 + * capabilities of the hardware and kernel.
15735 +static void igb_set_interrupt_capability(struct igb_adapter *adapter)
15740 + /* Number of supported queues. */
15741 + adapter->num_rx_queues = adapter->RSS_queues;
15743 + if (adapter->VMDQ_queues > 1)
15744 + adapter->num_rx_queues += adapter->VMDQ_queues - 1;
15747 + adapter->num_tx_queues = adapter->num_rx_queues;
15749 + adapter->num_tx_queues = max_t(u32, 1, adapter->VMDQ_queues);
15752 + switch (adapter->int_mode) {
15753 + case IGB_INT_MODE_MSIX:
15754 + /* start with one vector for every rx queue */
15755 + numvecs = adapter->num_rx_queues;
15757 + /* if tx handler is seperate add 1 for every tx queue */
15758 + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
15759 + numvecs += adapter->num_tx_queues;
15761 + /* store the number of vectors reserved for queues */
15762 + adapter->num_q_vectors = numvecs;
15764 + /* add 1 vector for link status interrupts */
15766 + adapter->msix_entries = kcalloc(numvecs,
15767 + sizeof(struct msix_entry),
15769 + if (adapter->msix_entries) {
15770 + for (i = 0; i < numvecs; i++)
15771 + adapter->msix_entries[i].entry = i;
15773 + err = pci_enable_msix(adapter->pdev,
15774 + adapter->msix_entries, numvecs);
15778 + /* MSI-X failed, so fall through and try MSI */
15779 + DPRINTK(PROBE, WARNING, "Failed to initialize MSI-X interrupts."
15780 + " Falling back to MSI interrupts.\n");
15781 + igb_reset_interrupt_capability(adapter);
15782 + case IGB_INT_MODE_MSI:
15783 + if (!pci_enable_msi(adapter->pdev))
15784 + adapter->flags |= IGB_FLAG_HAS_MSI;
15786 + DPRINTK(PROBE, WARNING, "Failed to initialize MSI "
15787 + "interrupts. Falling back to legacy interrupts.\n");
15788 + /* Fall through */
15789 + case IGB_INT_MODE_LEGACY:
15790 + /* disable advanced features and set number of queues to 1 */
15791 + adapter->vfs_allocated_count = 0;
15792 + adapter->VMDQ_queues = 0;
15793 + adapter->RSS_queues = 1;
15794 + adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
15795 + adapter->num_rx_queues = 1;
15796 + adapter->num_tx_queues = 1;
15797 + adapter->num_q_vectors = 1;
15798 + /* Don't do anything; this is system default */
15803 + /* Notify the stack of the (possibly) reduced Tx Queue count. */
15804 +#ifdef CONFIG_NETDEVICES_MULTIQUEUE
15805 + adapter->netdev->egress_subqueue_count =
15806 + min_t(u32, adapter->num_tx_queues, adapter->RSS_queues);
15808 + adapter->netdev->real_num_tx_queues =
15809 + min_t(u32, adapter->num_tx_queues, adapter->RSS_queues);
15817 + * igb_alloc_q_vectors - Allocate memory for interrupt vectors
15818 + * @adapter: board private structure to initialize
15820 + * We allocate one q_vector per queue interrupt. If allocation fails we
15821 + * return -ENOMEM.
15823 +static int igb_alloc_q_vectors(struct igb_adapter *adapter)
15825 + struct igb_q_vector *q_vector;
15826 + struct e1000_hw *hw = &adapter->hw;
15829 + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
15830 + q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
15833 + q_vector->adapter = adapter;
15834 + q_vector->itr_val = adapter->itr;
15835 + q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
15836 + q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
15837 + q_vector->set_itr = 1;
15838 + netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
15839 + adapter->q_vector[v_idx] = q_vector;
15846 + q_vector = adapter->q_vector[v_idx];
15847 + netif_napi_del(&q_vector->napi);
15849 + adapter->q_vector[v_idx] = NULL;
15855 + * igb_map_ring_to_vector - maps allocated queues to vectors
15857 + * This function maps the recently allocated queues to vectors.
15859 +static int igb_map_ring_to_vector(struct igb_adapter *adapter)
15861 + struct igb_q_vector *q_vector;
15865 + if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
15866 + (adapter->num_q_vectors < adapter->num_tx_queues))
15869 + if (adapter->num_q_vectors == (adapter->num_rx_queues + adapter->num_tx_queues)) {
15870 + for (i = 0; i < adapter->num_tx_queues; i++) {
15871 + q_vector = adapter->q_vector[v_idx++];
15872 + adapter->tx_ring[i].q_vector = q_vector;
15873 + q_vector->tx_ring = &adapter->tx_ring[i];
15875 + for (i = 0; i < adapter->num_rx_queues; i++) {
15876 + q_vector = adapter->q_vector[v_idx++];
15877 + adapter->rx_ring[i].q_vector = q_vector;
15878 + q_vector->rx_ring = &adapter->rx_ring[i];
15879 + q_vector->rx_ring->q_vector = q_vector;
15882 + for (i = 0; i < adapter->num_rx_queues; i++) {
15883 + q_vector = adapter->q_vector[v_idx++];
15884 + adapter->rx_ring[i].q_vector = q_vector;
15885 + q_vector->rx_ring = &adapter->rx_ring[i];
15886 + if (i < adapter->num_tx_queues) {
15887 + adapter->tx_ring[i].q_vector = q_vector;
15888 + q_vector->tx_ring = &adapter->tx_ring[i];
15891 + for (; i < adapter->num_tx_queues; i++) {
15892 + q_vector = adapter->q_vector[v_idx++];
15893 + adapter->tx_ring[i].q_vector = q_vector;
15894 + q_vector->tx_ring = &adapter->tx_ring[i];
15901 + * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
15903 + * This function initializes the interrupts and allocates all of the queues.
15905 +static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
15909 + igb_set_interrupt_capability(adapter);
15911 + err = igb_alloc_q_vectors(adapter);
15913 + DPRINTK(PROBE, ERR, "Unable to allocate memory for q_vectors\n");
15914 + goto err_alloc_q_vectors;
15917 + err = igb_alloc_queues(adapter);
15919 + DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
15920 + goto err_alloc_queues;
15923 + err = igb_map_ring_to_vector(adapter);
15925 + DPRINTK(PROBE, ERR, "Invalid q_vector to ring mapping\n");
15926 + goto err_map_queues;
15932 + igb_free_queues(adapter);
15934 + igb_free_q_vectors(adapter);
15935 +err_alloc_q_vectors:
15936 + igb_reset_interrupt_capability(adapter);
15941 + * igb_request_irq - initialize interrupts
15943 + * Attempts to configure interrupts using the best available
15944 + * capabilities of the hardware and kernel.
15946 +static int igb_request_irq(struct igb_adapter *adapter)
15948 + struct net_device *netdev = adapter->netdev;
15949 + struct e1000_hw *hw = &adapter->hw;
15952 + if (adapter->msix_entries) {
15953 + err = igb_request_msix(adapter);
15955 + goto request_done;
15956 + /* fall back to MSI */
15957 + igb_clear_interrupt_scheme(adapter);
15958 + if (!pci_enable_msi(adapter->pdev))
15959 + adapter->flags |= IGB_FLAG_HAS_MSI;
15960 + igb_free_all_tx_resources(adapter);
15961 + igb_free_all_rx_resources(adapter);
15962 + adapter->num_tx_queues = 1;
15963 + adapter->num_rx_queues = 1;
15964 + adapter->num_q_vectors = 1;
15965 + err = igb_alloc_q_vectors(adapter);
15967 + DPRINTK(PROBE, ERR, "Unable to allocate memory for q_vectors\n");
15968 + goto request_done;
15970 + err = igb_alloc_queues(adapter);
15972 + DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
15973 + igb_free_q_vectors(adapter);
15974 + goto request_done;
15976 + igb_setup_all_tx_resources(adapter);
15977 + igb_setup_all_rx_resources(adapter);
15979 + switch (hw->mac.type) {
15980 + case e1000_82575:
15981 + E1000_WRITE_REG(hw, E1000_MSIXBM(0),
15982 + (E1000_EICR_RX_QUEUE0 |
15983 + E1000_EICR_TX_QUEUE0 |
15984 + E1000_EIMS_OTHER));
15986 + case e1000_82576:
15987 + E1000_WRITE_REG(hw, E1000_IVAR0, E1000_IVAR_VALID);
15993 + if (adapter->flags & IGB_FLAG_HAS_MSI) {
15994 + err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
15995 + netdev->name, adapter);
15997 + goto request_done;
15999 + /* fall back to legacy interrupts */
16000 + igb_reset_interrupt_capability(adapter);
16001 + adapter->flags &= ~IGB_FLAG_HAS_MSI;
16004 + err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
16005 + netdev->name, adapter);
16008 + DPRINTK(PROBE, ERR, "Error %d getting interrupt\n", err);
16009 + goto request_done;
16016 +static void igb_free_irq(struct igb_adapter *adapter)
16018 + if (adapter->msix_entries) {
16019 + int vector = 0, i;
16021 + free_irq(adapter->msix_entries[vector++].vector, adapter);
16023 + for (i = 0; i < adapter->num_q_vectors; i++) {
16024 + struct igb_q_vector *q_vector = adapter->q_vector[i];
16025 + free_irq(adapter->msix_entries[vector++].vector,
16029 + free_irq(adapter->pdev->irq, adapter);
16034 + * igb_irq_disable - Mask off interrupt generation on the NIC
16035 + * @adapter: board private structure
16037 +static void igb_irq_disable(struct igb_adapter *adapter)
16039 + struct e1000_hw *hw = &adapter->hw;
16042 + * we need to be careful when disabling interrupts. The VFs are also
16043 + * mapped into these registers and so clearing the bits can cause
16044 + * issues on the VF drivers so we only need to clear what we set
16046 + if (adapter->msix_entries) {
16047 + u32 regval = E1000_READ_REG(hw, E1000_EIAM);
16048 + regval &= ~adapter->eims_enable_mask;
16049 + E1000_WRITE_REG(hw, E1000_EIAM, regval);
16050 + E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask);
16051 + regval = E1000_READ_REG(hw, E1000_EIAC);
16052 + regval &= ~adapter->eims_enable_mask;
16053 + E1000_WRITE_REG(hw, E1000_EIAC, regval);
16056 + E1000_WRITE_REG(hw, E1000_IAM, 0);
16057 + E1000_WRITE_REG(hw, E1000_IMC, ~0);
16058 + E1000_WRITE_FLUSH(hw);
16060 + synchronize_irq(adapter->pdev->irq);
16064 + * igb_irq_enable - Enable default interrupt generation settings
16065 + * @adapter: board private structure
16067 +static void igb_irq_enable(struct igb_adapter *adapter)
16069 + struct e1000_hw *hw = &adapter->hw;
16071 + if (adapter->msix_entries) {
16072 + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
16073 + u32 regval = E1000_READ_REG(hw, E1000_EIAC);
16074 + E1000_WRITE_REG(hw, E1000_EIAC,
16075 + regval | adapter->eims_enable_mask);
16076 + regval = E1000_READ_REG(hw, E1000_EIAM);
16077 + E1000_WRITE_REG(hw, E1000_EIAM,
16078 + regval | adapter->eims_enable_mask);
16079 + E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask);
16080 + if (adapter->vfs_allocated_count) {
16081 + E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF);
16082 + ims |= E1000_IMS_VMMB;
16084 + E1000_WRITE_REG(hw, E1000_IMS, ims);
16086 + E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
16087 + E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK);
16091 +static void igb_update_mng_vlan(struct igb_adapter *adapter)
16093 + struct e1000_hw *hw = &adapter->hw;
16094 + u16 vid = adapter->hw.mng_cookie.vlan_id;
16095 + u16 old_vid = adapter->mng_vlan_id;
16097 + if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
16098 + /* add VID to filter table */
16099 + igb_vfta_set(hw, vid, TRUE);
16100 + adapter->mng_vlan_id = vid;
16102 + adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
16105 + if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
16106 + (vid != old_vid) &&
16107 + !vlan_group_get_device(adapter->vlgrp, old_vid)) {
16108 + /* remove VID from filter table */
16109 + igb_vfta_set(hw, old_vid, FALSE);
16114 + * igb_release_hw_control - release control of the h/w to f/w
16115 + * @adapter: address of board private structure
16117 + * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
16118 + * For ASF and Pass Through versions of f/w this means that the
16119 + * driver is no longer loaded.
16122 +static void igb_release_hw_control(struct igb_adapter *adapter)
16124 + struct e1000_hw *hw = &adapter->hw;
16127 + /* Let firmware take over control of h/w */
16128 + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
16129 + E1000_WRITE_REG(hw, E1000_CTRL_EXT,
16130 + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
16134 + * igb_get_hw_control - get control of the h/w from f/w
16135 + * @adapter: address of board private structure
16137 + * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
16138 + * For ASF and Pass Through versions of f/w this means that
16139 + * the driver is loaded.
16142 +static void igb_get_hw_control(struct igb_adapter *adapter)
16144 + struct e1000_hw *hw = &adapter->hw;
16147 + /* Let firmware know the driver has taken over */
16148 + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
16149 + E1000_WRITE_REG(hw, E1000_CTRL_EXT,
16150 + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
16154 + * igb_configure - configure the hardware for RX and TX
16155 + * @adapter: private board structure
16157 +static void igb_configure(struct igb_adapter *adapter)
16159 + struct net_device *netdev = adapter->netdev;
16162 + igb_get_hw_control(adapter);
16163 + igb_set_rx_mode(netdev);
16165 + igb_restore_vlan(adapter);
16167 + igb_setup_tctl(adapter);
16168 + igb_setup_mrqc(adapter);
16169 + igb_setup_rctl(adapter);
16171 + igb_configure_tx(adapter);
16172 + igb_configure_rx(adapter);
16174 + e1000_rx_fifo_flush_82575(&adapter->hw);
16175 +#ifdef CONFIG_NETDEVICES_MULTIQUEUE
16176 + if (adapter->num_tx_queues > 1)
16177 + netdev->features |= NETIF_F_MULTI_QUEUE;
16179 + netdev->features &= ~NETIF_F_MULTI_QUEUE;
16182 + /* call IGB_DESC_UNUSED which always leaves
16183 + * at least 1 descriptor unused to make sure
16184 + * next_to_use != next_to_clean */
16185 + for (i = 0; i < adapter->num_rx_queues; i++) {
16186 + struct igb_ring *ring = &adapter->rx_ring[i];
16187 + if (igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring)))
16188 + adapter->alloc_rx_buff_failed++;
16192 + adapter->tx_queue_len = netdev->tx_queue_len;
16197 + * igb_up - Open the interface and prepare it to handle traffic
16198 + * @adapter: board private structure
16200 +int igb_up(struct igb_adapter *adapter)
16202 + struct e1000_hw *hw = &adapter->hw;
16205 + /* hardware has been reset, we need to reload some things */
16206 + igb_configure(adapter);
16208 + clear_bit(__IGB_DOWN, &adapter->state);
16210 + for (i = 0; i < adapter->num_q_vectors; i++) {
16211 + struct igb_q_vector *q_vector = adapter->q_vector[i];
16212 + napi_enable(&q_vector->napi);
16214 + if (adapter->msix_entries)
16215 + igb_configure_msix(adapter);
16217 + igb_configure_lli(adapter);
16219 + /* Clear any pending interrupts. */
16220 + E1000_READ_REG(hw, E1000_ICR);
16221 + igb_irq_enable(adapter);
16223 + /* notify VFs that reset has been completed */
16224 + if (adapter->vfs_allocated_count) {
16225 + u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
16226 + reg_data |= E1000_CTRL_EXT_PFRSTD;
16227 + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
16230 + /* start the watchdog. */
16231 + hw->mac.get_link_status = 1;
16232 + mod_timer(&adapter->watchdog_timer, jiffies + 1);
16237 +void igb_down(struct igb_adapter *adapter)
16239 + struct net_device *netdev = adapter->netdev;
16240 + struct e1000_hw *hw = &adapter->hw;
16244 + /* signal that we're down so the interrupt handler does not
16245 + * reschedule our watchdog timer */
16246 + set_bit(__IGB_DOWN, &adapter->state);
16248 + /* disable receives in the hardware */
16249 + rctl = E1000_READ_REG(hw, E1000_RCTL);
16250 + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
16251 + /* flush and sleep below */
16253 + netif_tx_stop_all_queues(netdev);
16255 + /* disable transmits in the hardware */
16256 + tctl = E1000_READ_REG(hw, E1000_TCTL);
16257 + tctl &= ~E1000_TCTL_EN;
16258 + E1000_WRITE_REG(hw, E1000_TCTL, tctl);
16259 + /* flush both disables and wait for them to finish */
16260 + E1000_WRITE_FLUSH(hw);
16263 + for (i = 0; i < adapter->num_q_vectors; i++) {
16264 + struct igb_q_vector *q_vector = adapter->q_vector[i];
16265 + napi_disable(&q_vector->napi);
16268 + igb_irq_disable(adapter);
16270 + del_timer_sync(&adapter->watchdog_timer);
16271 + del_timer_sync(&adapter->phy_info_timer);
16273 + netdev->tx_queue_len = adapter->tx_queue_len;
16274 + netif_carrier_off(netdev);
16276 + /* record the stats before reset*/
16277 + igb_update_stats(adapter);
16279 + adapter->link_speed = 0;
16280 + adapter->link_duplex = 0;
16281 +#ifdef HAVE_PCI_ERS
16282 + if (!pci_channel_offline(adapter->pdev))
16283 + igb_reset(adapter);
16285 + igb_reset(adapter);
16287 + igb_clean_all_tx_rings(adapter);
16288 + igb_clean_all_rx_rings(adapter);
16291 + /* since we reset the hardware DCA settings were cleared */
16292 + igb_setup_dca(adapter);
16296 +void igb_reinit_locked(struct igb_adapter *adapter)
16298 + WARN_ON(in_interrupt());
16299 + while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
16301 + igb_down(adapter);
16303 + clear_bit(__IGB_RESETTING, &adapter->state);
16306 +void igb_reset(struct igb_adapter *adapter)
16308 + struct e1000_hw *hw = &adapter->hw;
16309 + struct e1000_mac_info *mac = &hw->mac;
16310 + struct e1000_fc_info *fc = &hw->fc;
16311 + u32 pba = 0, tx_space, min_tx_space, min_rx_space;
16314 + /* Repartition Pba for greater than 9k mtu
16315 + * To take effect CTRL.RST is required.
16317 + switch (mac->type) {
16318 + case e1000_82576:
16319 + pba = E1000_READ_REG(hw, E1000_RXPBS);
16320 + pba &= E1000_RXPBS_SIZE_MASK_82576;
16322 + case e1000_82575:
16324 + pba = E1000_PBA_34K;
16328 + if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
16329 + (mac->type < e1000_82576)) {
16330 + /* adjust PBA for jumbo frames */
16331 + E1000_WRITE_REG(hw, E1000_PBA, pba);
16333 + /* To maintain wire speed transmits, the Tx FIFO should be
16334 + * large enough to accommodate two full transmit packets,
16335 + * rounded up to the next 1KB and expressed in KB. Likewise,
16336 + * the Rx FIFO should be large enough to accommodate at least
16337 + * one full receive packet and is similarly rounded up and
16338 + * expressed in KB. */
16339 + pba = E1000_READ_REG(hw, E1000_PBA);
16340 + /* upper 16 bits has Tx packet buffer allocation size in KB */
16341 + tx_space = pba >> 16;
16342 + /* lower 16 bits has Rx packet buffer allocation size in KB */
16344 + /* the tx fifo also stores 16 bytes of information about the tx
16345 + * but don't include ethernet FCS because hardware appends it */
16346 + min_tx_space = (adapter->max_frame_size +
16347 + sizeof(struct e1000_tx_desc) -
16348 + ETH_FCS_LEN) * 2;
16349 + min_tx_space = ALIGN(min_tx_space, 1024);
16350 + min_tx_space >>= 10;
16351 + /* software strips receive CRC, so leave room for it */
16352 + min_rx_space = adapter->max_frame_size;
16353 + min_rx_space = ALIGN(min_rx_space, 1024);
16354 + min_rx_space >>= 10;
16356 + /* If current Tx allocation is less than the min Tx FIFO size,
16357 + * and the min Tx FIFO size is less than the current Rx FIFO
16358 + * allocation, take space away from current Rx allocation */
16359 + if (tx_space < min_tx_space &&
16360 + ((min_tx_space - tx_space) < pba)) {
16361 + pba = pba - (min_tx_space - tx_space);
16363 + /* if short on rx space, rx wins and must trump tx
16365 + if (pba < min_rx_space)
16366 + pba = min_rx_space;
16368 + E1000_WRITE_REG(hw, E1000_PBA, pba);
16371 + /* flow control settings */
16372 + /* The high water mark must be low enough to fit one full frame
16373 + * (or the size used for early receive) above it in the Rx FIFO.
16374 + * Set it to the lower of:
16375 + * - 90% of the Rx FIFO size, or
16376 + * - the full Rx FIFO size minus one full frame */
16377 + hwm = min(((pba << 10) * 9 / 10),
16378 + ((pba << 10) - 2 * adapter->max_frame_size));
16380 + if (mac->type < e1000_82576) {
16381 + fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
16382 + fc->low_water = fc->high_water - 8;
16384 + fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
16385 + fc->low_water = fc->high_water - 16;
16387 + fc->pause_time = 0xFFFF;
16388 + fc->send_xon = 1;
16389 + fc->current_mode = fc->requested_mode;
16391 + /* disable receive for all VFs and wait one second */
16392 + if (adapter->vfs_allocated_count) {
16394 + for (i = 0 ; i < adapter->vfs_allocated_count; i++)
16395 + adapter->vf_data[i].flags = 0;
16397 + /* ping all the active vfs to let them know we are going down */
16398 + igb_ping_all_vfs(adapter);
16400 + /* disable transmits and receives */
16401 + E1000_WRITE_REG(hw, E1000_VFRE, 0);
16402 + E1000_WRITE_REG(hw, E1000_VFTE, 0);
16405 + /* Allow time for pending master requests to run */
16406 + e1000_reset_hw(hw);
16407 + E1000_WRITE_REG(hw, E1000_WUC, 0);
16409 + if (e1000_init_hw(hw))
16410 + DPRINTK(PROBE, ERR, "Hardware Error\n");
16412 + igb_update_mng_vlan(adapter);
16414 + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
16415 + E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
16417 + e1000_get_phy_info(hw);
16420 +#ifdef HAVE_NET_DEVICE_OPS
16421 +static const struct net_device_ops igb_netdev_ops = {
16422 + .ndo_open = igb_open,
16423 + .ndo_stop = igb_close,
16424 + .ndo_start_xmit = igb_xmit_frame_adv,
16425 + .ndo_get_stats = igb_get_stats,
16426 + .ndo_set_rx_mode = igb_set_rx_mode,
16427 + .ndo_set_multicast_list = igb_set_rx_mode,
16428 + .ndo_set_mac_address = igb_set_mac,
16429 + .ndo_change_mtu = igb_change_mtu,
16430 + .ndo_do_ioctl = igb_ioctl,
16431 + .ndo_tx_timeout = igb_tx_timeout,
16432 + .ndo_validate_addr = eth_validate_addr,
16433 + .ndo_vlan_rx_register = igb_vlan_rx_register,
16434 + .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
16435 + .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
16436 +#ifdef CONFIG_NET_POLL_CONTROLLER
16437 + .ndo_poll_controller = igb_netpoll,
16440 +#endif /* HAVE_NET_DEVICE_OPS */
16443 + * igb_probe - Device Initialization Routine
16444 + * @pdev: PCI device information struct
16445 + * @ent: entry in igb_pci_tbl
16447 + * Returns 0 on success, negative on failure
16449 + * igb_probe initializes an adapter identified by a pci_dev structure.
16450 + * The OS initialization, configuring of the adapter private structure,
16451 + * and a hardware reset occur.
16453 +static int __devinit igb_probe(struct pci_dev *pdev,
16454 + const struct pci_device_id *ent)
16456 + struct net_device *netdev;
16457 + struct igb_adapter *adapter;
16458 + struct e1000_hw *hw;
16459 + int i, err, pci_using_dac;
16460 + u16 eeprom_data = 0;
16461 + static int cards_found;
16462 + static int global_quad_port_a; /* global quad port a indication */
16464 + err = pci_enable_device_mem(pdev);
16468 + pci_using_dac = 0;
16469 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
16471 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
16473 + pci_using_dac = 1;
16475 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16477 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
16479 + IGB_ERR("No usable DMA configuration, "
16486 +#ifndef HAVE_ASPM_QUIRKS
16487 + /* 82575 requires that the pci-e link partner disable the L0s state */
16488 + switch (pdev->device) {
16489 + case E1000_DEV_ID_82575EB_COPPER:
16490 + case E1000_DEV_ID_82575EB_FIBER_SERDES:
16491 + case E1000_DEV_ID_82575GB_QUAD_COPPER:
16492 + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
16497 +#endif /* HAVE_ASPM_QUIRKS */
16498 + err = pci_request_selected_regions(pdev,
16499 + pci_select_bars(pdev,
16501 + igb_driver_name);
16503 + goto err_pci_reg;
16505 + pci_enable_pcie_error_reporting(pdev);
16507 + pci_set_master(pdev);
16511 + netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_ABS_MAX_TX_QUEUES);
16513 + netdev = alloc_etherdev(sizeof(struct igb_adapter));
16514 +#endif /* HAVE_TX_MQ */
16516 + goto err_alloc_etherdev;
16518 + SET_MODULE_OWNER(netdev);
16519 + SET_NETDEV_DEV(netdev, &pdev->dev);
16521 + pci_set_drvdata(pdev, netdev);
16522 + adapter = netdev_priv(netdev);
16523 + adapter->netdev = netdev;
16524 + adapter->pdev = pdev;
16525 + hw = &adapter->hw;
16526 + hw->back = adapter;
16527 + adapter->msg_enable = (1 << debug) - 1;
16529 +#ifdef HAVE_PCI_ERS
16530 + err = pci_save_state(pdev);
16532 + goto err_ioremap;
16535 + hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
16536 + pci_resource_len(pdev, 0));
16537 + if (!hw->hw_addr)
16538 + goto err_ioremap;
16540 +#ifdef HAVE_NET_DEVICE_OPS
16541 + netdev->netdev_ops = &igb_netdev_ops;
16542 +#else /* HAVE_NET_DEVICE_OPS */
16543 + netdev->open = &igb_open;
16544 + netdev->stop = &igb_close;
16545 + netdev->get_stats = &igb_get_stats;
16546 +#ifdef HAVE_SET_RX_MODE
16547 + netdev->set_rx_mode = &igb_set_rx_mode;
16549 + netdev->set_multicast_list = &igb_set_rx_mode;
16550 + netdev->set_mac_address = &igb_set_mac;
16551 + netdev->change_mtu = &igb_change_mtu;
16552 + netdev->do_ioctl = &igb_ioctl;
16553 +#ifdef HAVE_TX_TIMEOUT
16554 + netdev->tx_timeout = &igb_tx_timeout;
16556 + netdev->vlan_rx_register = igb_vlan_rx_register;
16557 + netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
16558 + netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
16559 +#ifdef CONFIG_NET_POLL_CONTROLLER
16560 + netdev->poll_controller = igb_netpoll;
16562 + netdev->hard_start_xmit = &igb_xmit_frame_adv;
16563 +#endif /* HAVE_NET_DEVICE_OPS */
16564 + igb_set_ethtool_ops(netdev);
16565 +#ifdef HAVE_TX_TIMEOUT
16566 + netdev->watchdog_timeo = 5 * HZ;
16569 + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
16571 + adapter->bd_number = cards_found;
16573 + /* setup the private structure */
16574 + err = igb_sw_init(adapter);
16576 + goto err_sw_init;
16578 + e1000_get_bus_info(hw);
16580 + hw->phy.autoneg_wait_to_complete = FALSE;
16581 + hw->mac.adaptive_ifs = FALSE;
16583 + /* Copper options */
16584 + if (hw->phy.media_type == e1000_media_type_copper) {
16585 + hw->phy.mdix = AUTO_ALL_MODES;
16586 + hw->phy.disable_polarity_correction = FALSE;
16587 + hw->phy.ms_type = e1000_ms_hw_default;
16590 + if (e1000_check_reset_block(hw))
16591 + DPRINTK(PROBE, INFO,
16592 + "PHY reset is blocked due to SOL/IDER session.\n");
16594 + netdev->features = NETIF_F_SG |
16595 + NETIF_F_IP_CSUM |
16596 + NETIF_F_HW_VLAN_TX |
16597 + NETIF_F_HW_VLAN_RX |
16598 + NETIF_F_HW_VLAN_FILTER;
16600 +#ifdef NETIF_F_IPV6_CSUM
16601 + netdev->features |= NETIF_F_IPV6_CSUM;
16603 +#ifdef NETIF_F_TSO
16604 + netdev->features |= NETIF_F_TSO;
16605 +#ifdef NETIF_F_TSO6
16606 + netdev->features |= NETIF_F_TSO6;
16608 +#endif /* NETIF_F_TSO */
16611 + netdev->features |= NETIF_F_LRO;
16613 +#ifdef NETIF_F_GRO
16614 + netdev->features |= NETIF_F_GRO;
16617 +#ifdef HAVE_NETDEV_VLAN_FEATURES
16618 + netdev->vlan_features |= NETIF_F_TSO;
16619 + netdev->vlan_features |= NETIF_F_TSO6;
16620 + netdev->vlan_features |= NETIF_F_IP_CSUM;
16621 + netdev->vlan_features |= NETIF_F_IPV6_CSUM;
16622 + netdev->vlan_features |= NETIF_F_SG;
16625 + if (pci_using_dac)
16626 + netdev->features |= NETIF_F_HIGHDMA;
16628 + if (hw->mac.type >= e1000_82576)
16629 + netdev->features |= NETIF_F_SCTP_CSUM;
16631 + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
16633 + /* before reading the NVM, reset the controller to put the device in a
16634 + * known good starting state */
16635 + e1000_reset_hw(hw);
16637 + /* make sure the NVM is good */
16638 + if (e1000_validate_nvm_checksum(hw) < 0) {
16639 + DPRINTK(PROBE, ERR, "The NVM Checksum Is Not Valid\n");
16644 + /* copy the MAC address out of the NVM */
16645 + if (e1000_read_mac_addr(hw))
16646 + DPRINTK(PROBE, ERR, "NVM Read Error\n");
16647 + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
16648 +#ifdef ETHTOOL_GPERMADDR
16649 + memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
16651 + if (!is_valid_ether_addr(netdev->perm_addr)) {
16653 + if (!is_valid_ether_addr(netdev->dev_addr)) {
16655 + DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
16660 + init_timer(&adapter->watchdog_timer);
16661 + adapter->watchdog_timer.function = &igb_watchdog;
16662 + adapter->watchdog_timer.data = (unsigned long) adapter;
16664 + init_timer(&adapter->phy_info_timer);
16665 + adapter->phy_info_timer.function = &igb_update_phy_info;
16666 + adapter->phy_info_timer.data = (unsigned long) adapter;
16668 + INIT_WORK(&adapter->reset_task, igb_reset_task);
16669 + INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
16671 + /* Initialize link properties that are user-changeable */
16672 + adapter->fc_autoneg = true;
16673 + hw->mac.autoneg = true;
16674 + hw->phy.autoneg_advertised = 0x2f;
16676 + hw->fc.requested_mode = e1000_fc_default;
16677 + hw->fc.current_mode = e1000_fc_default;
16679 + e1000_validate_mdi_setting(hw);
16681 + /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
16682 + * enable the ACPI Magic Packet filter
16685 + if (hw->bus.func == 0)
16686 + e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
16687 + else if (hw->bus.func == 1)
16688 + e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
16690 + if (eeprom_data & IGB_EEPROM_APME)
16691 + adapter->eeprom_wol |= E1000_WUFC_MAG;
16693 + /* now that we have the eeprom settings, apply the special cases where
16694 + * the eeprom may be wrong or the board simply won't support wake on
16695 + * lan on a particular port */
16696 + switch (pdev->device) {
16697 + case E1000_DEV_ID_82575GB_QUAD_COPPER:
16698 + adapter->eeprom_wol = 0;
16700 + case E1000_DEV_ID_82575EB_FIBER_SERDES:
16701 + case E1000_DEV_ID_82576_FIBER:
16702 + case E1000_DEV_ID_82576_SERDES:
16703 + /* Wake events only supported on port A for dual fiber
16704 + * regardless of eeprom setting */
16705 + if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
16706 + adapter->eeprom_wol = 0;
16708 + case E1000_DEV_ID_82576_QUAD_COPPER:
16709 + /* if quad port adapter, disable WoL on all but port A */
16710 + if (global_quad_port_a != 0)
16711 + adapter->eeprom_wol = 0;
16713 + adapter->flags |= IGB_FLAG_QUAD_PORT_A;
16714 + /* Reset for multiple quad port adapters */
16715 + if (++global_quad_port_a == 4)
16716 + global_quad_port_a = 0;
16720 + /* initialize the wol settings based on the eeprom settings */
16721 + adapter->wol = adapter->eeprom_wol;
16722 + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
16724 + /* reset the hardware with the new settings */
16725 + igb_reset(adapter);
16727 + /* let the f/w know that the h/w is now under the control of the
16729 + igb_get_hw_control(adapter);
16731 + /* tell the stack to leave us alone until igb_open() is called */
16732 + netif_carrier_off(netdev);
16733 + netif_tx_stop_all_queues(netdev);
16735 + strncpy(netdev->name, "eth%d", IFNAMSIZ);
16736 + err = register_netdev(netdev);
16738 + goto err_register;
16741 + if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) {
16742 + adapter->flags |= IGB_FLAG_DCA_ENABLED;
16743 + DPRINTK(PROBE, INFO, "DCA enabled\n");
16744 + igb_setup_dca(adapter);
16748 +#ifdef SIOCSHWTSTAMP
16749 + switch (hw->mac.type) {
16750 + case e1000_82576:
16752 + * Initialize hardware timer: we keep it running just in case
16753 + * that some program needs it later on.
16755 + memset(&adapter->cycles, 0, sizeof(adapter->cycles));
16756 + adapter->cycles.read = igb_read_clock;
16757 + adapter->cycles.mask = CLOCKSOURCE_MASK(64);
16758 + adapter->cycles.mult = 1;
16760 + * Scale the NIC clock cycle by a large factor so that
16761 + * relatively small clock corrections can be added or
16762 + * substracted at each clock tick. The drawbacks of a large
16763 + * factor are a) that the clock register overflows more quickly
16764 + * (not such a big deal) and b) that the increment per tick has
16765 + * to fit into 24 bits. As a result we need to use a shift of
16766 + * 19 so we can fit a value of 16 into the TIMINCA register.
16768 + adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
16769 + E1000_WRITE_REG(hw, E1000_TIMINCA,
16770 + (1 << E1000_TIMINCA_16NS_SHIFT) |
16771 + (16 << IGB_82576_TSYNC_SHIFT));
16773 + /* Set registers so that rollover occurs soon to test this. */
16774 + E1000_WRITE_REG(hw, E1000_SYSTIML, 0x00000000);
16775 + E1000_WRITE_REG(hw, E1000_SYSTIMH, 0xFF800000);
16776 + E1000_WRITE_FLUSH(hw);
16778 + timecounter_init(&adapter->clock,
16779 + &adapter->cycles,
16780 + ktime_to_ns(ktime_get_real()));
16782 + * Synchronize our NIC clock against system wall clock. NIC
16783 + * time stamp reading requires ~3us per sample, each sample
16784 + * was pretty stable even under load => only require 10
16785 + * samples for each offset comparison.
16787 + memset(&adapter->compare, 0, sizeof(adapter->compare));
16788 + adapter->compare.source = &adapter->clock;
16789 + adapter->compare.target = ktime_get_real;
16790 + adapter->compare.num_samples = 10;
16791 + timecompare_update(&adapter->compare, 0);
16793 + case e1000_82575:
16794 + /* 82575 does not support timesync */
16799 +#endif /* SIOCSHWTSTAMP */
16800 + DPRINTK(PROBE, INFO, "Intel(R) Gigabit Ethernet Network Connection\n");
16801 + /* print bus type/speed/width info */
16802 + DPRINTK(PROBE, INFO, "(PCIe:%s:%s) ",
16803 + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : "unknown"),
16804 + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
16805 + (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
16806 + (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
16809 + for (i = 0; i < 6; i++)
16810 + printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
16812 + for (i = 0; i < adapter->vfs_allocated_count; i++)
16813 + igb_vf_configuration(pdev, (i | 0x10000000));
16815 + DPRINTK(PROBE, INFO,
16816 + "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
16817 + adapter->msix_entries ? "MSI-X" :
16818 + adapter->flags & IGB_FLAG_HAS_MSI ? "MSI" :
16820 + adapter->num_rx_queues, adapter->num_tx_queues);
16826 + igb_release_hw_control(adapter);
16828 + if (!e1000_check_reset_block(hw))
16829 + e1000_phy_hw_reset(hw);
16831 + if (hw->flash_address)
16832 + iounmap(hw->flash_address);
16834 + igb_clear_interrupt_scheme(adapter);
16835 + iounmap(hw->hw_addr);
16837 + free_netdev(netdev);
16838 +err_alloc_etherdev:
16839 + pci_release_selected_regions(pdev,
16840 + pci_select_bars(pdev, IORESOURCE_MEM));
16843 + pci_disable_device(pdev);
16848 + * igb_remove - Device Removal Routine
16849 + * @pdev: PCI device information struct
16851 + * igb_remove is called by the PCI subsystem to alert the driver
16852 + * that it should release a PCI device. The could be caused by a
16853 + * Hot-Plug event, or because the driver is going to be removed from
16856 +static void __devexit igb_remove(struct pci_dev *pdev)
16858 + struct net_device *netdev = pci_get_drvdata(pdev);
16859 + struct igb_adapter *adapter = netdev_priv(netdev);
16860 + struct e1000_hw *hw = &adapter->hw;
16862 + /* flush_scheduled work may reschedule our watchdog task, so
16863 + * explicitly disable watchdog tasks from being rescheduled */
16864 + set_bit(__IGB_DOWN, &adapter->state);
16865 + del_timer_sync(&adapter->watchdog_timer);
16866 + del_timer_sync(&adapter->phy_info_timer);
16868 + flush_scheduled_work();
16872 + if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
16873 + DPRINTK(PROBE, INFO, "DCA disabled\n");
16874 + dca_remove_requester(&pdev->dev);
16875 + adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
16876 + E1000_WRITE_REG(hw, E1000_DCA_CTRL, 1);
16880 + /* Release control of h/w to f/w. If f/w is AMT enabled, this
16881 + * would have already happened in close and is redundant. */
16882 + igb_release_hw_control(adapter);
16884 + unregister_netdev(netdev);
16886 + if (!e1000_check_reset_block(hw))
16887 + e1000_phy_hw_reset(hw);
16889 + igb_clear_interrupt_scheme(adapter);
16891 +#ifdef CONFIG_PCI_IOV
16892 + if (adapter->vf_data) {
16893 + /* disable iov and allow time for transactions to clear */
16894 + pci_disable_sriov(pdev);
16897 + kfree(adapter->vf_data);
16898 + adapter->vf_data = NULL;
16899 + E1000_WRITE_REG(&adapter->hw, E1000_IOVCTL,
16900 + E1000_IOVCTL_REUSE_VFQ);
16902 + dev_info(&adapter->pdev->dev, "IOV Disabled\n");
16906 + iounmap(hw->hw_addr);
16907 + if (hw->flash_address)
16908 + iounmap(adapter->hw.flash_address);
16909 + pci_release_selected_regions(pdev,
16910 + pci_select_bars(pdev, IORESOURCE_MEM));
16912 + free_netdev(netdev);
16914 + pci_disable_pcie_error_reporting(pdev);
16916 + pci_disable_device(pdev);
16920 + * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
16921 + * @adapter: board private structure to initialize
16923 + * This function initializes the vf specific data storage and then attempts to
16924 + * allocate the VFs. The reason for ordering it this way is because it is much
16925 + * more expensive time wise to disable SR-IOV than it is to allocate and free
16926 + * the memory for the VFs.
16928 +static void __devinit igb_probe_vfs(struct igb_adapter *adapter)
16930 +#ifdef CONFIG_PCI_IOV
16931 + struct pci_dev *pdev = adapter->pdev;
16933 + if (adapter->vfs_allocated_count) {
16934 + adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
16935 + sizeof(struct vf_data_storage),
16937 + /* if allocation failed then we do not support SR-IOV */
16938 + if (!adapter->vf_data) {
16939 + adapter->vfs_allocated_count = 0;
16940 + dev_err(&pdev->dev, "Unable to allocate memory for VF "
16941 + "Data Storage\n");
16945 + if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
16946 + kfree(adapter->vf_data);
16947 + adapter->vf_data = NULL;
16948 +#endif /* CONFIG_PCI_IOV */
16949 + adapter->vfs_allocated_count = 0;
16950 +#ifdef CONFIG_PCI_IOV
16952 + dev_info(&pdev->dev, "IOV1 VFs enabled := %d\n",
16953 + adapter->vfs_allocated_count);
16956 +#endif /* CONFIG_PCI_IOV */
16959 + * igb_sw_init - Initialize general software structures (struct igb_adapter)
16960 + * @adapter: board private structure to initialize
16962 + * igb_sw_init initializes the Adapter private data structure.
16963 + * Fields are initialized based on PCI device information and
16964 + * OS network device settings (MTU size).
16966 +static int __devinit igb_sw_init(struct igb_adapter *adapter)
16968 + struct e1000_hw *hw = &adapter->hw;
16969 + struct net_device *netdev = adapter->netdev;
16970 + struct pci_dev *pdev = adapter->pdev;
16972 + /* PCI config space info */
16974 + hw->vendor_id = pdev->vendor;
16975 + hw->device_id = pdev->device;
16976 + hw->subsystem_vendor_id = pdev->subsystem_vendor;
16977 + hw->subsystem_device_id = pdev->subsystem_device;
16979 + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
16981 + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
16983 + adapter->tx_ring_count = IGB_DEFAULT_TXD;
16984 + adapter->rx_ring_count = IGB_DEFAULT_RXD;
16985 + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
16986 + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
16988 + /* Initialize the hardware-specific values */
16989 + if (e1000_setup_init_funcs(hw, TRUE)) {
16990 + DPRINTK(PROBE, ERR, "Hardware Initialization Failure\n");
16994 + igb_check_options(adapter);
16996 + if (igb_init_interrupt_scheme(adapter)) {
16997 + DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
17001 + igb_probe_vfs(adapter);
17003 + /* Explicitly disable IRQ since the NIC can be in any state. */
17004 + igb_irq_disable(adapter);
17006 + set_bit(__IGB_DOWN, &adapter->state);
17011 + * igb_open - Called when a network interface is made active
17012 + * @netdev: network interface device structure
17014 + * Returns 0 on success, negative value on failure
17016 + * The open entry point is called when a network interface is made
17017 + * active by the system (IFF_UP). At this point all resources needed
17018 + * for transmit and receive operations are allocated, the interrupt
17019 + * handler is registered with the OS, the watchdog timer is started,
17020 + * and the stack is notified that the interface is ready.
17022 +static int igb_open(struct net_device *netdev)
17024 + struct igb_adapter *adapter = netdev_priv(netdev);
17025 + struct e1000_hw *hw = &adapter->hw;
17029 + /* disallow open during test */
17030 + if (test_bit(__IGB_TESTING, &adapter->state))
17033 + /* allocate transmit descriptors */
17034 + err = igb_setup_all_tx_resources(adapter);
17036 + goto err_setup_tx;
17038 + /* allocate receive descriptors */
17039 + err = igb_setup_all_rx_resources(adapter);
17041 + goto err_setup_rx;
17043 + /* e1000_power_up_phy(adapter); */
17045 + /* before we allocate an interrupt, we must be ready to handle it.
17046 + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
17047 + * as soon as we call pci_request_irq, so we have to setup our
17048 + * clean_rx handler before we do so. */
17049 + igb_configure(adapter);
17051 + err = igb_request_irq(adapter);
17053 + goto err_req_irq;
17055 + /* From here on the code is the same as igb_up() */
17056 + clear_bit(__IGB_DOWN, &adapter->state);
17058 + for (i = 0; i < adapter->num_q_vectors; i++) {
17059 + struct igb_q_vector *q_vector = adapter->q_vector[i];
17060 + napi_enable(&q_vector->napi);
17062 + igb_configure_lli(adapter);
17064 + /* Clear any pending interrupts. */
17065 + E1000_READ_REG(hw, E1000_ICR);
17067 + igb_irq_enable(adapter);
17069 + /* notify VFs that reset has been completed */
17070 + if (adapter->vfs_allocated_count) {
17071 + u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
17072 + reg_data |= E1000_CTRL_EXT_PFRSTD;
17073 + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
17076 + netif_tx_start_all_queues(netdev);
17078 + /* start the watchdog. */
17079 + hw->mac.get_link_status = 1;
17080 + mod_timer(&adapter->watchdog_timer, jiffies + 1);
17082 + return E1000_SUCCESS;
17085 + igb_release_hw_control(adapter);
17086 + /* e1000_power_down_phy(adapter); */
17087 + igb_free_all_rx_resources(adapter);
17089 + igb_free_all_tx_resources(adapter);
17091 + igb_reset(adapter);
17097 + * igb_close - Disables a network interface
17098 + * @netdev: network interface device structure
17100 + * Returns 0, this is not allowed to fail
17102 + * The close entry point is called when an interface is de-activated
17103 + * by the OS. The hardware is still under the driver's control, but
17104 + * needs to be disabled. A global MAC reset is issued to stop the
17105 + * hardware, and all transmit and receive resources are freed.
17107 +static int igb_close(struct net_device *netdev)
17109 + struct igb_adapter *adapter = netdev_priv(netdev);
17111 + WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
17112 + igb_down(adapter);
17114 + igb_free_irq(adapter);
17116 + igb_free_all_tx_resources(adapter);
17117 + igb_free_all_rx_resources(adapter);
17123 + * igb_setup_tx_resources - allocate Tx resources (Descriptors)
17124 + * @tx_ring: tx descriptor ring (for a specific queue) to setup
17126 + * Return 0 on success, negative on failure
17128 +int igb_setup_tx_resources(struct igb_ring *tx_ring)
17130 + struct pci_dev *pdev = tx_ring->pdev;
17133 + size = sizeof(struct igb_buffer) * tx_ring->count;
17134 + tx_ring->buffer_info = vmalloc(size);
17135 + if (!tx_ring->buffer_info)
17137 + memset(tx_ring->buffer_info, 0, size);
17139 + /* round up to nearest 4K */
17140 + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
17141 + tx_ring->size = ALIGN(tx_ring->size, 4096);
17143 + tx_ring->desc = pci_alloc_consistent(pdev,
17147 + if (!tx_ring->desc)
17150 + tx_ring->next_to_use = 0;
17151 + tx_ring->next_to_clean = 0;
17155 + vfree(tx_ring->buffer_info);
17156 + dev_err(&pdev->dev, "Unable to allocate memory for the "
17157 + "transmit descriptor ring\n");
17162 + * igb_setup_all_tx_resources - wrapper to allocate Tx resources
17163 + * (Descriptors) for all queues
17164 + * @adapter: board private structure
17166 + * Return 0 on success, negative on failure
17168 +static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
17172 + for (i = 0; i < adapter->num_tx_queues; i++) {
17173 + err = igb_setup_tx_resources(&adapter->tx_ring[i]);
17175 + DPRINTK(PROBE, ERR,
17176 + "Allocation for Tx Queue %u failed\n", i);
17177 + for (i--; i >= 0; i--)
17178 + igb_free_tx_resources(&adapter->tx_ring[i]);
17184 + for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
17185 +#ifdef CONFIG_NETDEVICES_MULTIQUEUE
17186 + int r_idx = i % adapter->netdev->egress_subqueue_count;
17188 + int r_idx = i % adapter->netdev->real_num_tx_queues;
17190 + adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
17197 + * igb_setup_tctl - configure the transmit control registers
17198 + * @adapter: Board private structure
17200 +void igb_setup_tctl(struct igb_adapter *adapter)
17202 + struct e1000_hw *hw = &adapter->hw;
17205 + /* disable queue 0 which is enabled by default on 82575 and 82576 */
17206 + E1000_WRITE_REG(hw, E1000_TXDCTL(0), 0);
17208 + /* Program the Transmit Control Register */
17209 + tctl = E1000_READ_REG(hw, E1000_TCTL);
17210 + tctl &= ~E1000_TCTL_CT;
17211 + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
17212 + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
17214 + e1000_config_collision_dist(hw);
17216 + /* Enable transmits */
17217 + tctl |= E1000_TCTL_EN;
17219 + E1000_WRITE_REG(hw, E1000_TCTL, tctl);
17223 + * igb_configure_tx_ring - Configure transmit ring after Reset
17224 + * @adapter: board private structure
17225 + * @ring: tx ring to configure
17227 + * Configure a transmit ring after a reset.
17229 +void igb_configure_tx_ring(struct igb_adapter *adapter,
17230 + struct igb_ring *ring)
17232 + struct e1000_hw *hw = &adapter->hw;
17234 + u64 tdba = ring->dma;
17235 + int reg_idx = ring->reg_idx;
17237 + /* disable the queue */
17238 + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(reg_idx));
17239 + E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx),
17240 + txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
17241 + E1000_WRITE_FLUSH(hw);
17244 + E1000_WRITE_REG(hw, E1000_TDLEN(reg_idx),
17245 + ring->count * sizeof(struct e1000_tx_desc));
17246 + E1000_WRITE_REG(hw, E1000_TDBAL(reg_idx),
17247 + tdba & 0x00000000ffffffffULL);
17248 + E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32);
17250 + ring->head = hw->hw_addr + E1000_TDH(reg_idx);
17251 + ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
17252 + writel(0, ring->head);
17253 + writel(0, ring->tail);
17255 + txdctl |= IGB_TX_PTHRESH;
17256 + txdctl |= IGB_TX_HTHRESH << 8;
17257 + txdctl |= IGB_TX_WTHRESH << 16;
17259 + txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
17260 + E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl);
17264 + * igb_configure_tx - Configure transmit Unit after Reset
17265 + * @adapter: board private structure
17267 + * Configure the Tx unit of the MAC after a reset.
17269 +static void igb_configure_tx(struct igb_adapter *adapter)
17273 + for (i = 0; i < adapter->num_tx_queues; i++)
17274 + igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
17279 + * igb_setup_rx_resources - allocate Rx resources (Descriptors)
17280 + * @rx_ring: rx descriptor ring (for a specific queue) to setup
17282 + * Returns 0 on success, negative on failure
17284 +int igb_setup_rx_resources(struct igb_ring *rx_ring)
17286 + struct pci_dev *pdev = rx_ring->pdev;
17287 + int size, desc_len;
17290 + size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
17291 + rx_ring->lro_mgr.lro_arr = vmalloc(size);
17292 + if (!rx_ring->lro_mgr.lro_arr)
17294 + memset(rx_ring->lro_mgr.lro_arr, 0, size);
17295 +#endif /* IGB_LRO */
17297 + size = sizeof(struct igb_buffer) * rx_ring->count;
17298 + rx_ring->buffer_info = vmalloc(size);
17299 + if (!rx_ring->buffer_info)
17301 + memset(rx_ring->buffer_info, 0, size);
17303 + desc_len = sizeof(union e1000_adv_rx_desc);
17305 + /* Round up to nearest 4K */
17306 + rx_ring->size = rx_ring->count * desc_len;
17307 + rx_ring->size = ALIGN(rx_ring->size, 4096);
17309 + rx_ring->desc = pci_alloc_consistent(pdev,
17313 + if (!rx_ring->desc)
17316 + rx_ring->next_to_clean = 0;
17317 + rx_ring->next_to_use = 0;
17324 + vfree(rx_ring->lro_mgr.lro_arr);
17325 + rx_ring->lro_mgr.lro_arr = NULL;
17327 + vfree(rx_ring->buffer_info);
17328 + rx_ring->buffer_info = NULL;
17329 + dev_err(&pdev->dev, "Unable to allocate memory for the "
17330 + "receive descriptor ring\n");
17335 + * igb_setup_all_rx_resources - wrapper to allocate Rx resources
17336 + * (Descriptors) for all queues
17337 + * @adapter: board private structure
17339 + * Return 0 on success, negative on failure
17341 +static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
17345 + for (i = 0; i < adapter->num_rx_queues; i++) {
17346 + err = igb_setup_rx_resources(&adapter->rx_ring[i]);
17348 + DPRINTK(PROBE, ERR,
17349 + "Allocation for Rx Queue %u failed\n", i);
17350 + for (i--; i >= 0; i--)
17351 + igb_free_rx_resources(&adapter->rx_ring[i]);
17360 + * igb_setup_mrqc - configure the multiple receive queue control registers
17361 + * @adapter: Board private structure
17363 +static void igb_setup_mrqc(struct igb_adapter *adapter)
17365 + struct e1000_hw *hw = &adapter->hw;
17366 + u32 mrqc, rxcsum;
17367 + u32 j, num_rx_queues, shift = 0, shift2 = 0;
17368 + union e1000_reta {
17372 + static const u8 rsshash[40] = {
17373 + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
17374 + 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
17375 + 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
17376 + 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
17378 + /* Fill out hash function seeds */
17379 + for (j = 0; j < 10; j++) {
17380 + u32 rsskey = rsshash[(j * 4)];
17381 + rsskey |= rsshash[(j * 4) + 1] << 8;
17382 + rsskey |= rsshash[(j * 4) + 2] << 16;
17383 + rsskey |= rsshash[(j * 4) + 3] << 24;
17384 + E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), j, rsskey);
17387 + num_rx_queues = adapter->RSS_queues;
17389 + if (adapter->vfs_allocated_count || adapter->VMDQ_queues) {
17390 + /* 82575 and 82576 supports 2 RSS queues for VMDq */
17391 + switch (hw->mac.type) {
17392 + case e1000_82576:
17394 + num_rx_queues = 2;
17396 + case e1000_82575:
17403 + if (hw->mac.type == e1000_82575)
17407 + for (j = 0; j < (32 * 4); j++) {
17408 + reta.bytes[j & 3] = (j % num_rx_queues) << shift;
17410 + reta.bytes[j & 3] |= num_rx_queues << shift2;
17411 + if ((j & 3) == 3)
17412 + E1000_WRITE_REG(hw, E1000_RETA(j >> 2), reta.dword);
17416 + * Disable raw packet checksumming so that RSS hash is placed in
17417 + * descriptor on writeback. No need to enable TCP/UDP/IP checksum
17418 + * offloads as they are enabled by default
17420 + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
17421 + rxcsum |= E1000_RXCSUM_PCSD;
17423 + if (adapter->hw.mac.type >= e1000_82576)
17424 + /* Enable Receive Checksum Offload for SCTP */
17425 + rxcsum |= E1000_RXCSUM_CRCOFL;
17427 + /* Don't need to set TUOFL or IPOFL, they default to 1 */
17428 + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
17430 + /* If VMDq is enabled then we set the appropriate mode for that, else
17431 + * we default to RSS so that an RSS hash is calculated per packet even
17432 + * if we are only using one queue */
17433 + if (adapter->vfs_allocated_count || adapter->VMDQ_queues) {
17434 + if (hw->mac.type > e1000_82575) {
17435 + /* Set the default pool for the PF's first queue */
17436 + u32 vtctl = E1000_READ_REG(hw, E1000_VT_CTL);
17437 + vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
17438 + E1000_VT_CTL_DISABLE_DEF_POOL);
17439 + vtctl |= adapter->vfs_allocated_count <<
17440 + E1000_VT_CTL_DEFAULT_POOL_SHIFT;
17441 + E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl);
17442 + } else if (adapter->RSS_queues > 1) {
17443 + /* set default queue for pool 1 to queue 2 */
17444 + E1000_WRITE_REG(hw, E1000_VT_CTL,
17445 + adapter->RSS_queues << 7);
17447 + if (adapter->RSS_queues > 1)
17448 + mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
17450 + mrqc = E1000_MRQC_ENABLE_VMDQ;
17452 + mrqc = E1000_MRQC_ENABLE_RSS_4Q;
17454 + igb_vmm_control(adapter);
17456 + mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
17457 + E1000_MRQC_RSS_FIELD_IPV4_TCP);
17458 + mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
17459 + E1000_MRQC_RSS_FIELD_IPV6_TCP);
17460 + mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
17461 + E1000_MRQC_RSS_FIELD_IPV6_UDP);
17462 + mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
17463 + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
17465 + E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
17469 + * igb_setup_rctl - configure the receive control registers
17470 + * @adapter: Board private structure
17472 +void igb_setup_rctl(struct igb_adapter *adapter)
17474 + struct e1000_hw *hw = &adapter->hw;
17477 + rctl = E1000_READ_REG(hw, E1000_RCTL);
17479 + rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
17480 + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
17482 + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
17483 + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
17486 + * enable stripping of CRC. It's unlikely this will break BMC
17487 + * redirection as it did with e1000. Newer features require
17488 + * that the HW strips the CRC.
17490 + rctl |= E1000_RCTL_SECRC;
17493 + /* disable store bad packets and clear size bits. */
17494 + rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
17496 + /* enable LPE to prevent packets larger than max_frame_size */
17497 + rctl |= E1000_RCTL_LPE;
17499 + /* disable rx queue 0 which is enabled by default on 82575 and 82576 */
17500 + E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0);
17502 + /* Attention!!! For SR-IOV PF driver operations you must enable
17503 + * queue drop for all VF and PF queues to prevent head of line blocking
17504 + * if an un-trusted VF does not provide descriptors to hardware.
17506 + if (adapter->vfs_allocated_count) {
17507 + /* set all queue drop enable bits */
17508 + E1000_WRITE_REG(hw, E1000_QDE, 0xFF);
17512 + E1000_WRITE_REG(hw, E1000_RCTL, rctl);
17515 +static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, int vfn)
17517 + struct e1000_hw *hw = &adapter->hw;
17520 + /* if it isn't the PF check to see if VFs are enabled and
17521 + * increase the size to support vlan tags */
17522 + if (vfn < adapter->vfs_allocated_count &&
17523 + adapter->vf_data[vfn].vlans_enabled)
17524 + size += VLAN_TAG_SIZE;
17526 + vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
17527 + vmolr &= ~E1000_VMOLR_RLPML_MASK;
17528 + vmolr |= size | E1000_VMOLR_LPE;
17529 + E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
17536 + * igb_set_rlpml - set receive large packet maximum length
17537 + * @adapter: board private structure
17539 + * Configure the maximum size of packets that will be received
17541 +static void igb_set_rlpml(struct igb_adapter *adapter)
17543 + int max_frame_size = adapter->max_frame_size;
17544 + struct e1000_hw *hw = &adapter->hw;
17545 + u16 pf_id = adapter->vfs_allocated_count;
17547 + if (adapter->vlgrp)
17548 + max_frame_size += VLAN_TAG_SIZE;
17549 + if (adapter->VMDQ_queues) {
17551 + for (i = 0; i < adapter->VMDQ_queues; i++)
17552 + igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i);
17553 + max_frame_size = MAX_JUMBO_FRAME_SIZE;
17555 + E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size);
17559 +static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
17561 + struct e1000_hw *hw = &adapter->hw;
17565 + * This register exists only on 82576 and newer so if we are older then
17566 + * we should exit and do nothing
17568 + if (hw->mac.type < e1000_82576)
17571 + vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
17572 + vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
17573 + E1000_VMOLR_STRVLAN; /* Strip vlan tags */
17575 + /* clear all bits that might not be set */
17576 + vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
17578 + if (adapter->RSS_queues > 1 && vfn == adapter->vfs_allocated_count)
17579 + vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
17581 + * for VMDq only allow the VFs and pool 0 to accept broadcast and
17582 + * multicast packets
17584 + if (vfn <= adapter->vfs_allocated_count)
17585 + vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
17587 + E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
17591 + * igb_configure_rx_ring - Configure a receive ring after Reset
17592 + * @adapter: board private structure
17593 + * @ring: receive ring to be configured
17595 + * Configure the Rx unit of the MAC after a reset.
17597 +void igb_configure_rx_ring(struct igb_adapter *adapter,
17598 + struct igb_ring *ring)
17600 + struct e1000_hw *hw = &adapter->hw;
17601 + u64 rdba = ring->dma;
17602 + int reg_idx = ring->reg_idx;
17603 + u32 srrctl, rxdctl;
17605 + /* disable the queue */
17606 + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(reg_idx));
17607 + E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx),
17608 + rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
17610 + /* Set DMA base address registers */
17611 + E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx),
17612 + rdba & 0x00000000ffffffffULL);
17613 + E1000_WRITE_REG(hw, E1000_RDBAH(reg_idx), rdba >> 32);
17614 + E1000_WRITE_REG(hw, E1000_RDLEN(reg_idx),
17615 + ring->count * sizeof(union e1000_adv_rx_desc));
17617 + /* initialize head and tail */
17618 + ring->head = hw->hw_addr + E1000_RDH(reg_idx);
17619 + ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
17620 + writel(0, ring->head);
17621 + writel(0, ring->tail);
17623 + /* set descriptor configuration */
17624 + srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
17625 + E1000_SRRCTL_BSIZEPKT_SHIFT;
17626 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
17627 + srrctl |= ALIGN(ring->rx_ps_hdr_size, 64) <<
17628 + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
17629 + if (ring->rx_ps_hdr_size)
17630 + srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
17632 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
17633 + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
17635 + E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl);
17637 + /* set filtering for VMDQ pools */
17638 + igb_set_vmolr(adapter, reg_idx & 0x7);
17640 + /* enable receive descriptor fetching */
17641 + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(reg_idx));
17642 + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
17643 + rxdctl &= 0xFFF00000;
17644 + rxdctl |= IGB_RX_PTHRESH;
17645 + rxdctl |= IGB_RX_HTHRESH << 8;
17646 + rxdctl |= IGB_RX_WTHRESH << 16;
17647 + E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl);
17650 +static inline void igb_set_vlan_stripping(struct igb_adapter *adapter)
17652 + struct e1000_hw *hw = &adapter->hw;
17655 + /* enable replication vlan tag stripping */
17656 + reg = E1000_READ_REG(hw, E1000_RPLOLR);
17657 + reg |= E1000_RPLOLR_STRVLAN;
17658 + E1000_WRITE_REG(hw, E1000_RPLOLR, reg);
17660 + /* notify HW that the MAC is adding vlan tags */
17661 + reg = E1000_READ_REG(hw, E1000_DTXCTL);
17662 + reg |= E1000_DTXCTL_VLAN_ADDED;
17663 + E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
17667 + * igb_configure_rx - Configure receive Unit after Reset
17668 + * @adapter: board private structure
17670 + * Configure the Rx unit of the MAC after a reset.
17672 +static void igb_configure_rx(struct igb_adapter *adapter)
17676 + /* enable vlan tag stripping for replicated packets */
17677 + igb_set_vlan_stripping(adapter);
17679 + /* set UTA to appropriate mode */
17680 + igb_set_uta(adapter);
17682 + /* set the correct pool for the PF default MAC address in entry 0 */
17683 + igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
17684 + adapter->vfs_allocated_count);
17686 + /* Setup the HW Rx Head and Tail Descriptor Pointers and
17687 + * the Base and Length of the Rx Descriptor Ring */
17688 + for (i = 0; i < adapter->num_rx_queues; i++)
17689 + igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
17693 + * igb_free_tx_resources - Free Tx Resources per Queue
17694 + * @tx_ring: Tx descriptor ring for a specific queue
17696 + * Free all transmit software resources
17698 +void igb_free_tx_resources(struct igb_ring *tx_ring)
17700 + igb_clean_tx_ring(tx_ring);
17702 + vfree(tx_ring->buffer_info);
17703 + tx_ring->buffer_info = NULL;
17705 + /* if not set, then don't free */
17706 + if (!tx_ring->desc)
17709 + pci_free_consistent(tx_ring->pdev, tx_ring->size,
17710 + tx_ring->desc, tx_ring->dma);
17712 + tx_ring->desc = NULL;
17716 + * igb_free_all_tx_resources - Free Tx Resources for All Queues
17717 + * @adapter: board private structure
17719 + * Free all transmit software resources
17721 +static void igb_free_all_tx_resources(struct igb_adapter *adapter)
17725 + for (i = 0; i < adapter->num_tx_queues; i++)
17726 + igb_free_tx_resources(&adapter->tx_ring[i]);
17729 +static void igb_unmap_and_free_tx_resource(struct pci_dev *pdev,
17730 + struct igb_buffer *buffer_info)
17732 + if (buffer_info->page_dma) {
17733 + pci_unmap_page(pdev,
17734 + buffer_info->page_dma,
17735 + buffer_info->length,
17736 + PCI_DMA_TODEVICE);
17737 + buffer_info->page_dma = 0;
17739 + if (buffer_info->dma) {
17740 + pci_unmap_single(pdev,
17741 + buffer_info->dma,
17742 + buffer_info->length,
17743 + PCI_DMA_TODEVICE);
17744 + buffer_info->dma = 0;
17746 + if (buffer_info->skb) {
17747 + dev_kfree_skb_any(buffer_info->skb);
17748 + buffer_info->skb = NULL;
17750 + buffer_info->time_stamp = 0;
17751 + buffer_info->next_to_watch = 0;
17752 + /* buffer_info must be completely set up in the transmit path */
17756 + * igb_clean_tx_ring - Free Tx Buffers
17757 + * @tx_ring: ring to be cleaned
17759 +static void igb_clean_tx_ring(struct igb_ring *tx_ring)
17761 + struct igb_buffer *buffer_info;
17762 + unsigned long size;
17765 + if (!tx_ring->buffer_info)
17767 + /* Free all the Tx ring sk_buffs */
17769 + for (i = 0; i < tx_ring->count; i++) {
17770 + buffer_info = &tx_ring->buffer_info[i];
17771 + igb_unmap_and_free_tx_resource(tx_ring->pdev, buffer_info);
17774 + size = sizeof(struct igb_buffer) * tx_ring->count;
17775 + memset(tx_ring->buffer_info, 0, size);
17777 + /* Zero out the descriptor ring */
17778 + memset(tx_ring->desc, 0, tx_ring->size);
17780 + tx_ring->next_to_use = 0;
17781 + tx_ring->next_to_clean = 0;
17785 + * igb_clean_all_tx_rings - Free Tx Buffers for all queues
17786 + * @adapter: board private structure
17788 +static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
17792 + for (i = 0; i < adapter->num_tx_queues; i++)
17793 + igb_clean_tx_ring(&adapter->tx_ring[i]);
17797 + * igb_free_rx_resources - Free Rx Resources
17798 + * @rx_ring: ring to clean the resources from
17800 + * Free all receive software resources
17802 +void igb_free_rx_resources(struct igb_ring *rx_ring)
17804 + igb_clean_rx_ring(rx_ring);
17806 + vfree(rx_ring->buffer_info);
17807 + rx_ring->buffer_info = NULL;
17810 + vfree(rx_ring->lro_mgr.lro_arr);
17811 + rx_ring->lro_mgr.lro_arr = NULL;
17812 +#endif /* IGB_LRO */
17814 + /* if not set, then don't free */
17815 + if (!rx_ring->desc)
17818 + pci_free_consistent(rx_ring->pdev, rx_ring->size,
17819 + rx_ring->desc, rx_ring->dma);
17821 + rx_ring->desc = NULL;
17825 + * igb_free_all_rx_resources - Free Rx Resources for All Queues
17826 + * @adapter: board private structure
17828 + * Free all receive software resources
17830 +static void igb_free_all_rx_resources(struct igb_adapter *adapter)
17834 + for (i = 0; i < adapter->num_rx_queues; i++)
17835 + igb_free_rx_resources(&adapter->rx_ring[i]);
17839 + * igb_clean_rx_ring - Free Rx Buffers per Queue
17840 + * @rx_ring: ring to free buffers from
17842 +static void igb_clean_rx_ring(struct igb_ring *rx_ring)
17844 + struct igb_buffer *buffer_info;
17845 + unsigned long size;
17848 + if (!rx_ring->buffer_info)
17851 + /* Free all the Rx ring sk_buffs */
17852 + for (i = 0; i < rx_ring->count; i++) {
17853 + buffer_info = &rx_ring->buffer_info[i];
17854 + if (buffer_info->dma) {
17855 + if (rx_ring->rx_ps_hdr_size)
17856 + pci_unmap_single(rx_ring->pdev,
17857 + buffer_info->dma,
17858 + rx_ring->rx_ps_hdr_size,
17859 + PCI_DMA_FROMDEVICE);
17861 + pci_unmap_single(rx_ring->pdev,
17862 + buffer_info->dma,
17863 + rx_ring->rx_buffer_len,
17864 + PCI_DMA_FROMDEVICE);
17865 + buffer_info->dma = 0;
17868 + if (buffer_info->skb) {
17869 + dev_kfree_skb(buffer_info->skb);
17870 + buffer_info->skb = NULL;
17872 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
17873 + if (buffer_info->page) {
17874 + if (buffer_info->page_dma)
17875 + pci_unmap_page(rx_ring->pdev,
17876 + buffer_info->page_dma,
17877 + rx_ring->rx_buffer_len,
17878 + PCI_DMA_FROMDEVICE);
17879 + put_page(buffer_info->page);
17880 + buffer_info->page = NULL;
17881 + buffer_info->page_dma = 0;
17882 + buffer_info->page_offset = 0;
17887 + size = sizeof(struct igb_buffer) * rx_ring->count;
17888 + memset(rx_ring->buffer_info, 0, size);
17890 + /* Zero out the descriptor ring */
17891 + memset(rx_ring->desc, 0, rx_ring->size);
17893 + rx_ring->next_to_clean = 0;
17894 + rx_ring->next_to_use = 0;
17898 + * igb_clean_all_rx_rings - Free Rx Buffers for all queues
17899 + * @adapter: board private structure
17901 +static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
17905 + for (i = 0; i < adapter->num_rx_queues; i++)
17906 + igb_clean_rx_ring(&adapter->rx_ring[i]);
17910 + * igb_set_mac - Change the Ethernet Address of the NIC
17911 + * @netdev: network interface device structure
17912 + * @p: pointer to an address structure
17914 + * Returns 0 on success, negative on failure
17916 +static int igb_set_mac(struct net_device *netdev, void *p)
17918 + struct igb_adapter *adapter = netdev_priv(netdev);
17919 + struct e1000_hw *hw = &adapter->hw;
17920 + struct sockaddr *addr = p;
17922 + if (!is_valid_ether_addr(addr->sa_data))
17923 + return -EADDRNOTAVAIL;
17925 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
17926 + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
17928 + /* set the correct pool for the new PF MAC address in entry 0 */
17929 + igb_rar_set_qsel(adapter, hw->mac.addr, 0,
17930 + adapter->vfs_allocated_count);
17936 + * igb_write_mc_addr_list - write multicast addresses to MTA
17937 + * @netdev: network interface device structure
17939 + * Writes multicast address list to the MTA hash table.
17940 + * Returns: -ENOMEM on failure
17941 + * 0 on no addresses written
17942 + * X on writing X addresses to MTA
17944 +static int igb_write_mc_addr_list(struct net_device *netdev)
17946 + struct igb_adapter *adapter = netdev_priv(netdev);
17947 + struct e1000_hw *hw = &adapter->hw;
17948 + struct dev_mc_list *mc_ptr = netdev->mc_list;
17953 + if (!netdev->mc_count) {
17954 + /* nothing to program, so clear mc list */
17955 + e1000_update_mc_addr_list(hw, NULL, 0);
17956 + igb_restore_vf_multicasts(adapter);
17960 + mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
17964 + /* set vmolr receive overflow multicast bit */
17965 + vmolr |= E1000_VMOLR_ROMPE;
17967 + /* The shared function expects a packed array of only addresses. */
17968 + mc_ptr = netdev->mc_list;
17970 + for (i = 0; i < netdev->mc_count; i++) {
17973 + memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
17974 + mc_ptr = mc_ptr->next;
17976 + e1000_update_mc_addr_list(hw, mta_list, i);
17979 + return netdev->mc_count;
17982 +#ifdef HAVE_SET_RX_MODE
17984 + * igb_write_uc_addr_list - write unicast addresses to RAR table
17985 + * @netdev: network interface device structure
17987 + * Writes unicast address list to the RAR table.
17988 + * Returns: -ENOMEM on failure/insufficient address space
17989 + * 0 on no addresses written
17990 + * X on writing X addresses to the RAR table
17992 +static int igb_write_uc_addr_list(struct net_device *netdev)
17994 + struct igb_adapter *adapter = netdev_priv(netdev);
17995 + struct e1000_hw *hw = &adapter->hw;
17996 + unsigned int vfn = adapter->vfs_allocated_count;
17997 + unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
17998 +#ifndef HAVE_NETDEV_HW_ADDR
17999 + struct dev_mc_list *uc_ptr = netdev->uc_list;
18003 + /* return ENOMEM indicating insufficient memory for addresses */
18004 +#ifndef HAVE_NETDEV_HW_ADDR
18005 + if (netdev->uc_count > rar_entries)
18007 + if (netdev->uc.count > rar_entries)
18011 +#ifdef HAVE_NETDEV_HW_ADDR
18012 + if (netdev->uc.count && rar_entries) {
18013 + struct netdev_hw_addr *ha;
18014 + list_for_each_entry(ha, &netdev->uc.list, list) {
18015 + if (!rar_entries)
18017 + igb_rar_set_qsel(adapter, ha->addr,
18025 + igb_rar_set_qsel(adapter, uc_ptr->da_addr,
18026 + rar_entries--, vfn);
18027 + uc_ptr = uc_ptr->next;
18031 + /* write the addresses in reverse order to avoid write combining */
18032 + for (; rar_entries > 0 ; rar_entries--) {
18033 + E1000_WRITE_REG(hw, E1000_RAH(rar_entries), 0);
18034 + E1000_WRITE_REG(hw, E1000_RAL(rar_entries), 0);
18036 + E1000_WRITE_FLUSH(hw);
18043 + * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
18044 + * @netdev: network interface device structure
18046 + * The set_rx_mode entry point is called whenever the unicast or multicast
18047 + * address lists or the network interface flags are updated. This routine is
18048 + * responsible for configuring the hardware for proper unicast, multicast,
18049 + * promiscuous mode, and all-multi behavior.
18051 +static void igb_set_rx_mode(struct net_device *netdev)
18053 + struct igb_adapter *adapter = netdev_priv(netdev);
18054 + struct e1000_hw *hw = &adapter->hw;
18055 + unsigned int vfn = adapter->vfs_allocated_count;
18056 + u32 rctl, vmolr = 0;
18059 + /* Check for Promiscuous and All Multicast modes */
18060 + rctl = E1000_READ_REG(hw, E1000_RCTL);
18062 + /* clear the effected bits */
18063 + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
18065 + if (netdev->flags & IFF_PROMISC) {
18066 + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
18067 + vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
18069 + if (netdev->flags & IFF_ALLMULTI) {
18070 + rctl |= E1000_RCTL_MPE;
18071 + vmolr |= E1000_VMOLR_MPME;
18074 + * Write addresses to the MTA, if the attempt fails
18075 + * then we should just turn on promiscous mode so
18076 + * that we can at least receive multicast traffic
18078 + count = igb_write_mc_addr_list(netdev);
18080 + rctl |= E1000_RCTL_MPE;
18081 + vmolr |= E1000_VMOLR_MPME;
18082 + } else if (count) {
18083 + vmolr |= E1000_VMOLR_ROMPE;
18086 +#ifdef HAVE_SET_RX_MODE
18088 + * Write addresses to available RAR registers, if there is not
18089 + * sufficient space to store all the addresses then enable
18090 + * unicast promiscous mode
18092 + count = igb_write_uc_addr_list(netdev);
18094 + rctl |= E1000_RCTL_UPE;
18095 + vmolr |= E1000_VMOLR_ROPE;
18098 + rctl |= E1000_RCTL_VFE;
18100 + E1000_WRITE_REG(hw, E1000_RCTL, rctl);
18103 + * In order to support SR-IOV and eventually VMDq it is necessary to set
18104 + * the VMOLR to enable the appropriate modes. Without this workaround
18105 + * we will have issues with VLAN tag stripping not being done for frames
18106 + * that are only arriving because we are the default pool
18108 + if (hw->mac.type < e1000_82576)
18111 + vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) &
18112 + ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
18113 + E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
18114 + igb_restore_vf_multicasts(adapter);
18117 +/* Need to wait a few seconds after link up to get diagnostic information from
18119 +static void igb_update_phy_info(unsigned long data)
18121 + struct igb_adapter *adapter = (struct igb_adapter *) data;
18122 + e1000_get_phy_info(&adapter->hw);
18126 + * igb_has_link - check shared code for link and determine up/down
18127 + * @adapter: pointer to driver private info
18129 +static bool igb_has_link(struct igb_adapter *adapter)
18131 + struct e1000_hw *hw = &adapter->hw;
18132 + bool link_active = FALSE;
18135 + /* get_link_status is set on LSC (link status) interrupt or
18136 + * rx sequence error interrupt. get_link_status will stay
18137 + * false until the e1000_check_for_link establishes link
18138 + * for copper adapters ONLY
18140 + switch (hw->phy.media_type) {
18141 + case e1000_media_type_copper:
18142 + if (hw->mac.get_link_status) {
18143 + ret_val = e1000_check_for_link(hw);
18144 + link_active = !hw->mac.get_link_status;
18146 + link_active = TRUE;
18149 + case e1000_media_type_internal_serdes:
18150 + ret_val = e1000_check_for_link(hw);
18151 + link_active = hw->mac.serdes_has_link;
18154 + case e1000_media_type_unknown:
18158 + return link_active;
18162 + * igb_watchdog - Timer Call-back
18163 + * @data: pointer to adapter cast into an unsigned long
18165 +static void igb_watchdog(unsigned long data)
18167 + struct igb_adapter *adapter = (struct igb_adapter *)data;
18168 + /* Do the rest outside of interrupt context */
18169 + schedule_work(&adapter->watchdog_task);
18172 +static void igb_watchdog_task(struct work_struct *work)
18174 + struct igb_adapter *adapter = container_of(work,
18175 + struct igb_adapter, watchdog_task);
18176 + struct e1000_hw *hw = &adapter->hw;
18177 + struct net_device *netdev = adapter->netdev;
18178 + struct igb_ring *tx_ring = adapter->tx_ring;
18182 + link = igb_has_link(adapter);
18185 + if (!netif_carrier_ok(netdev)) {
18187 + e1000_get_speed_and_duplex(hw, &adapter->link_speed,
18188 + &adapter->link_duplex);
18190 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
18191 + DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
18192 + "Flow Control: %s\n",
18193 + adapter->link_speed,
18194 + adapter->link_duplex == FULL_DUPLEX ?
18195 + "Full Duplex" : "Half Duplex",
18196 + ((ctrl & E1000_CTRL_TFCE) && (ctrl &
18197 + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
18198 + E1000_CTRL_RFCE) ? "RX" : ((ctrl &
18199 + E1000_CTRL_TFCE) ? "TX" : "None")));
18201 + /* tweak tx_queue_len according to speed/duplex and
18202 + * adjust the timeout factor */
18203 + netdev->tx_queue_len = adapter->tx_queue_len;
18204 + adapter->tx_timeout_factor = 1;
18205 + switch (adapter->link_speed) {
18207 + netdev->tx_queue_len = 10;
18208 + adapter->tx_timeout_factor = 14;
18211 + netdev->tx_queue_len = 100;
18212 + /* maybe add some timeout factor ? */
18216 + netif_carrier_on(netdev);
18217 + netif_tx_wake_all_queues(netdev);
18219 + igb_ping_all_vfs(adapter);
18221 + /* link state has changed, schedule phy info update */
18222 + if (!test_bit(__IGB_DOWN, &adapter->state))
18223 + mod_timer(&adapter->phy_info_timer,
18224 + round_jiffies(jiffies + 2 * HZ));
18227 + if (netif_carrier_ok(netdev)) {
18228 + adapter->link_speed = 0;
18229 + adapter->link_duplex = 0;
18230 + DPRINTK(LINK, INFO, "NIC Link is Down\n");
18231 + netif_carrier_off(netdev);
18232 + netif_tx_stop_all_queues(netdev);
18234 + igb_ping_all_vfs(adapter);
18236 + /* link state has changed, schedule phy info update */
18237 + if (!test_bit(__IGB_DOWN, &adapter->state))
18238 + mod_timer(&adapter->phy_info_timer,
18239 + round_jiffies(jiffies + 2 * HZ));
18243 + igb_update_stats(adapter);
18245 + if (!netif_carrier_ok(netdev)) {
18246 + if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
18247 + /* We've lost link, so the controller stops DMA,
18248 + * but we've got queued Tx work that's never going
18249 + * to get done, so reset controller to flush Tx.
18250 + * (Do the reset outside of interrupt context). */
18251 + adapter->tx_timeout_count++;
18252 + schedule_work(&adapter->reset_task);
18256 + /* Force detection of hung controller every watchdog period */
18257 + for (i = 0; i < adapter->num_tx_queues; i++)
18258 + adapter->tx_ring[i].detect_tx_hung = TRUE;
18260 + /* Cause software interrupt to ensure rx ring is cleaned */
18261 + if (adapter->msix_entries) {
18263 + for (i = 0; i < adapter->num_q_vectors; i++) {
18264 + struct igb_q_vector *q_vector = adapter->q_vector[i];
18265 + eics |= q_vector->eims_value;
18267 + E1000_WRITE_REG(hw, E1000_EICS, eics);
18269 + E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0);
18272 + /* Reset the timer */
18273 + if (!test_bit(__IGB_DOWN, &adapter->state))
18274 + mod_timer(&adapter->watchdog_timer,
18275 + round_jiffies(jiffies + 2 * HZ));
18278 +enum latency_range {
18279 + lowest_latency = 0,
18281 + bulk_latency = 2,
18282 + latency_invalid = 255
18287 + * igb_update_ring_itr - update the dynamic ITR value based on packet size
18289 + * Stores a new ITR value based on strictly on packet size. This
18290 + * algorithm is less sophisticated than that used in igb_update_itr,
18291 + * due to the difficulty of synchronizing statistics across multiple
18292 + * receive rings. The divisors and thresholds used by this fuction
18293 + * were determined based on theoretical maximum wire speed and testing
18294 + * data, in order to minimize response time while increasing bulk
18296 + * This functionality is controlled by the InterruptThrottleRate module
18297 + * parameter (see igb_param.c)
18298 + * NOTE: This function is called only when operating in a multiqueue
18299 + * receive environment.
18300 + * @q_vector: pointer to q_vector
18302 +static void igb_update_ring_itr(struct igb_q_vector *q_vector)
18304 + int new_val = q_vector->itr_val;
18305 + int avg_wire_size = 0;
18306 + struct igb_adapter *adapter = q_vector->adapter;
18308 + /* For non-gigabit speeds, just fix the interrupt rate at 4000
18309 + * ints/sec - ITR timer value of 120 ticks.
18311 + if (adapter->link_speed != SPEED_1000) {
18313 + goto set_itr_val;
18316 + if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
18317 + struct igb_ring *ring = q_vector->rx_ring;
18318 + avg_wire_size = ring->total_bytes / ring->total_packets;
18321 + if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
18322 + struct igb_ring *ring = q_vector->tx_ring;
18323 + avg_wire_size = max_t(u32, avg_wire_size,
18324 + (ring->total_bytes /
18325 + ring->total_packets));
18328 + /* if avg_wire_size isn't set no work was done */
18329 + if (!avg_wire_size)
18330 + goto clear_counts;
18332 + /* Add 24 bytes to size to account for CRC, preamble, and gap */
18333 + avg_wire_size += 24;
18335 + /* Don't starve jumbo frames */
18336 + avg_wire_size = min(avg_wire_size, 3000);
18338 + /* Give a little boost to mid-size frames */
18339 + if ((avg_wire_size > 300) && (avg_wire_size < 1200))
18340 + new_val = avg_wire_size / 3;
18342 + new_val = avg_wire_size / 2;
18345 + if (new_val != q_vector->itr_val) {
18346 + q_vector->itr_val = new_val;
18347 + q_vector->set_itr = 1;
18350 + if (q_vector->rx_ring) {
18351 + q_vector->rx_ring->total_bytes = 0;
18352 + q_vector->rx_ring->total_packets = 0;
18354 + if (q_vector->tx_ring) {
18355 + q_vector->tx_ring->total_bytes = 0;
18356 + q_vector->tx_ring->total_packets = 0;
18361 + * igb_update_itr - update the dynamic ITR value based on statistics
18362 + * Stores a new ITR value based on packets and byte
18363 + * counts during the last interrupt. The advantage of per interrupt
18364 + * computation is faster updates and more accurate ITR for the current
18365 + * traffic pattern. Constants in this function were computed
18366 + * based on theoretical maximum wire speed and thresholds were set based
18367 + * on testing data as well as attempting to minimize response time
18368 + * while increasing bulk throughput.
18369 + * this functionality is controlled by the InterruptThrottleRate module
18370 + * parameter (see igb_param.c)
18371 + * NOTE: These calculations are only valid when operating in a single-
18372 + * queue environment.
18373 + * @adapter: pointer to adapter
18374 + * @itr_setting: current adapter->itr
18375 + * @packets: the number of packets during this measurement interval
18376 + * @bytes: the number of bytes during this measurement interval
18378 +static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
18379 + int packets, int bytes)
18381 + unsigned int retval = itr_setting;
18383 + if (packets == 0)
18384 + goto update_itr_done;
18386 + switch (itr_setting) {
18387 + case lowest_latency:
18388 + /* handle TSO and jumbo frames */
18389 + if (bytes/packets > 8000)
18390 + retval = bulk_latency;
18391 + else if ((packets < 5) && (bytes > 512))
18392 + retval = low_latency;
18394 + case low_latency: /* 50 usec aka 20000 ints/s */
18395 + if (bytes > 10000) {
18396 + /* this if handles the TSO accounting */
18397 + if (bytes/packets > 8000) {
18398 + retval = bulk_latency;
18399 + } else if ((packets < 10) || ((bytes/packets) > 1200)) {
18400 + retval = bulk_latency;
18401 + } else if ((packets > 35)) {
18402 + retval = lowest_latency;
18404 + } else if (bytes/packets > 2000) {
18405 + retval = bulk_latency;
18406 + } else if (packets <= 2 && bytes < 512) {
18407 + retval = lowest_latency;
18410 + case bulk_latency: /* 250 usec aka 4000 ints/s */
18411 + if (bytes > 25000) {
18412 + if (packets > 35)
18413 + retval = low_latency;
18414 + } else if (bytes < 1500) {
18415 + retval = low_latency;
18423 +static void igb_set_itr(struct igb_adapter *adapter)
18426 + u32 new_itr = adapter->itr;
18428 + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
18429 + if (adapter->link_speed != SPEED_1000) {
18432 + goto set_itr_now;
18435 + adapter->rx_itr = igb_update_itr(adapter,
18437 + adapter->rx_ring->total_packets,
18438 + adapter->rx_ring->total_bytes);
18440 + adapter->tx_itr = igb_update_itr(adapter,
18442 + adapter->tx_ring->total_packets,
18443 + adapter->tx_ring->total_bytes);
18444 + current_itr = max(adapter->rx_itr, adapter->tx_itr);
18446 + /* conservative mode (itr 3) eliminates the lowest_latency setting */
18447 + if (adapter->itr_setting == 3 && current_itr == lowest_latency)
18448 + current_itr = low_latency;
18450 + switch (current_itr) {
18451 + /* counts and packets in update_itr are dependent on these numbers */
18452 + case lowest_latency:
18453 + new_itr = 56; /* aka 70,000 ints/sec */
18455 + case low_latency:
18456 + new_itr = 196; /* aka 20,000 ints/sec */
18458 + case bulk_latency:
18459 + new_itr = 980; /* aka 4,000 ints/sec */
18466 + adapter->rx_ring->total_bytes = 0;
18467 + adapter->rx_ring->total_packets = 0;
18468 + adapter->tx_ring->total_bytes = 0;
18469 + adapter->tx_ring->total_packets = 0;
18471 + if (new_itr != adapter->itr) {
18472 + struct igb_q_vector *q_vector = adapter->q_vector[0];
18473 + /* this attempts to bias the interrupt rate towards Bulk
18474 + * by adding intermediate steps when interrupt rate is
18476 + new_itr = new_itr > adapter->itr ?
18477 + max((new_itr * adapter->itr) /
18478 + (new_itr + (adapter->itr >> 2)), new_itr) :
18480 + /* Don't write the value here; it resets the adapter's
18481 + * internal timer, and causes us to delay far longer than
18482 + * we should between interrupts. Instead, we write the ITR
18483 + * value at the beginning of the next interrupt so the timing
18484 + * ends up being correct.
18486 + adapter->itr = new_itr;
18487 + q_vector->itr_val = new_itr;
18488 + q_vector->set_itr = 1;
18494 +#define IGB_TX_FLAGS_CSUM 0x00000001
18495 +#define IGB_TX_FLAGS_VLAN 0x00000002
18496 +#define IGB_TX_FLAGS_TSO 0x00000004
18497 +#define IGB_TX_FLAGS_IPV4 0x00000008
18498 +#define IGB_TX_FLAGS_TSTAMP 0x00000010
18499 +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
18500 +#define IGB_TX_FLAGS_VLAN_SHIFT 16
18502 +static inline int igb_tso_adv(struct igb_ring *tx_ring,
18503 + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
18505 +#ifdef NETIF_F_TSO
18506 + struct e1000_adv_tx_context_desc *context_desc;
18509 + struct igb_buffer *buffer_info;
18510 + u32 info = 0, tu_cmd = 0;
18511 + u32 mss_l4len_idx, l4len;
18514 + if (skb_header_cloned(skb)) {
18515 + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
18520 + l4len = tcp_hdrlen(skb);
18521 + *hdr_len += l4len;
18523 + if (skb->protocol == htons(ETH_P_IP)) {
18524 + struct iphdr *iph = ip_hdr(skb);
18525 + iph->tot_len = 0;
18527 + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
18531 +#ifdef NETIF_F_TSO6
18532 + } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
18533 + ipv6_hdr(skb)->payload_len = 0;
18534 + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
18535 + &ipv6_hdr(skb)->daddr,
18536 + 0, IPPROTO_TCP, 0);
18540 + i = tx_ring->next_to_use;
18542 + buffer_info = &tx_ring->buffer_info[i];
18543 + context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
18544 + /* VLAN MACLEN IPLEN */
18545 + if (tx_flags & IGB_TX_FLAGS_VLAN)
18546 + info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
18547 + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
18548 + *hdr_len += skb_network_offset(skb);
18549 + info |= skb_network_header_len(skb);
18550 + *hdr_len += skb_network_header_len(skb);
18551 + context_desc->vlan_macip_lens = cpu_to_le32(info);
18553 + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
18554 + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
18556 + if (skb->protocol == htons(ETH_P_IP))
18557 + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
18558 + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
18560 + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
18562 + /* MSS L4LEN IDX */
18563 + mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
18564 + mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
18565 + mss_l4len_idx |= tx_ring->ctx_idx;
18567 + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
18568 + context_desc->seqnum_seed = 0;
18570 + buffer_info->time_stamp = jiffies;
18571 + buffer_info->next_to_watch = i;
18572 + buffer_info->dma = 0;
18574 + if (i == tx_ring->count)
18577 + tx_ring->next_to_use = i;
18582 +#endif /* NETIF_F_TSO */
18585 +static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
18586 + struct sk_buff *skb, u32 tx_flags)
18588 + struct e1000_adv_tx_context_desc *context_desc;
18589 + struct pci_dev *pdev = tx_ring->pdev;
18590 + struct igb_buffer *buffer_info;
18591 + u32 info = 0, tu_cmd = 0;
18594 + if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
18595 + (tx_flags & IGB_TX_FLAGS_VLAN)) {
18596 + i = tx_ring->next_to_use;
18597 + buffer_info = &tx_ring->buffer_info[i];
18598 + context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
18600 + if (tx_flags & IGB_TX_FLAGS_VLAN)
18601 + info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
18603 + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
18604 + if (skb->ip_summed == CHECKSUM_PARTIAL)
18605 + info |= skb_network_header_len(skb);
18607 + context_desc->vlan_macip_lens = cpu_to_le32(info);
18609 + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
18611 + if (skb->ip_summed == CHECKSUM_PARTIAL) {
18614 + if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
18615 + const struct vlan_ethhdr *vhdr =
18616 + (const struct vlan_ethhdr*)skb->data;
18618 + protocol = vhdr->h_vlan_encapsulated_proto;
18620 + protocol = skb->protocol;
18623 + switch (protocol) {
18624 + case __constant_htons(ETH_P_IP):
18625 + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
18626 + if (ip_hdr(skb)->protocol == IPPROTO_TCP)
18627 + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
18628 + else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
18629 + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
18631 +#ifdef NETIF_F_IPV6_CSUM
18632 + case __constant_htons(ETH_P_IPV6):
18633 + /* XXX what about other V6 headers?? */
18634 + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
18635 + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
18636 + else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
18637 + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
18641 + if (unlikely(net_ratelimit())) {
18642 + dev_warn(&pdev->dev,
18643 + "partial checksum but proto=%x!\n",
18650 + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
18651 + context_desc->seqnum_seed = 0;
18652 + context_desc->mss_l4len_idx = cpu_to_le32(tx_ring->ctx_idx);
18654 + buffer_info->time_stamp = jiffies;
18655 + buffer_info->next_to_watch = i;
18656 + buffer_info->dma = 0;
18659 + if (i == tx_ring->count)
18661 + tx_ring->next_to_use = i;
18668 +#define IGB_MAX_TXD_PWR 16
18669 +#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
18671 +static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
18672 + unsigned int first)
18674 + struct igb_buffer *buffer_info;
18675 + unsigned int len = skb_headlen(skb);
18676 + unsigned int count = 0, i;
18679 + i = tx_ring->next_to_use;
18681 + buffer_info = &tx_ring->buffer_info[i];
18682 + BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
18683 + buffer_info->length = len;
18684 + /* set time_stamp *before* dma to help avoid a possible race */
18685 + buffer_info->time_stamp = jiffies;
18686 + buffer_info->next_to_watch = i;
18687 + buffer_info->dma = pci_map_single(tx_ring->pdev, skb->data, len,
18688 + PCI_DMA_TODEVICE);
18691 + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
18692 + struct skb_frag_struct *frag;
18694 + frag = &skb_shinfo(skb)->frags[f];
18695 + len = frag->size;
18698 + if (i == tx_ring->count)
18701 + buffer_info = &tx_ring->buffer_info[i];
18702 + BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
18703 + buffer_info->length = len;
18704 + buffer_info->time_stamp = jiffies;
18705 + buffer_info->next_to_watch = i;
18706 + buffer_info->page_dma = pci_map_page(tx_ring->pdev,
18708 + frag->page_offset,
18710 + PCI_DMA_TODEVICE);
18715 + tx_ring->buffer_info[i].skb = skb;
18716 + tx_ring->buffer_info[first].next_to_watch = i;
18721 +static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
18722 + int tx_flags, int count, u32 paylen,
18725 + union e1000_adv_tx_desc *tx_desc;
18726 + struct igb_buffer *buffer_info;
18727 + u32 olinfo_status = 0, cmd_type_len;
18728 + unsigned int i = tx_ring->next_to_use;
18730 + cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
18731 + E1000_ADVTXD_DCMD_DEXT);
18733 + if (tx_flags & IGB_TX_FLAGS_VLAN)
18734 + cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
18736 + if (tx_flags & IGB_TX_FLAGS_TSTAMP)
18737 + cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
18739 + if (tx_flags & IGB_TX_FLAGS_TSO) {
18740 + cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
18742 + /* insert tcp checksum */
18743 + olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
18745 + /* insert ip checksum */
18746 + if (tx_flags & IGB_TX_FLAGS_IPV4)
18747 + olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
18749 + } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
18750 + olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
18753 + if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
18754 + IGB_TX_FLAGS_VLAN))
18755 + olinfo_status |= tx_ring->ctx_idx;
18757 + olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
18760 + buffer_info = &tx_ring->buffer_info[i];
18761 + tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
18762 + tx_desc->read.buffer_addr = buffer_info->dma ?
18763 + cpu_to_le64(buffer_info->dma) :
18764 + cpu_to_le64(buffer_info->page_dma);
18765 + tx_desc->read.cmd_type_len =
18766 + cpu_to_le32(cmd_type_len | buffer_info->length);
18767 + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
18770 + if (i == tx_ring->count)
18772 + } while (count > 0);
18774 + tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
18775 + /* Force memory writes to complete before letting h/w
18776 + * know there are new descriptors to fetch. (Only
18777 + * applicable for weak-ordered memory model archs,
18778 + * such as IA-64). */
18781 + tx_ring->next_to_use = i;
18782 + writel(i, tx_ring->tail);
18783 + /* we need this if more than one processor can write to our tail
18784 + * at a time, it syncronizes IO on IA64/Altix systems */
18788 +static int __igb_maybe_stop_tx(struct net_device *netdev,
18789 + struct igb_ring *tx_ring, int size)
18791 + if (netif_is_multiqueue(netdev))
18792 + netif_stop_subqueue(netdev, tx_ring->queue_index);
18794 + netif_stop_queue(netdev);
18796 + /* Herbert's original patch had:
18797 + * smp_mb__after_netif_stop_queue();
18798 + * but since that doesn't exist yet, just open code it. */
18801 + /* We need to check again in a case another CPU has just
18802 + * made room available. */
18803 + if (IGB_DESC_UNUSED(tx_ring) < size)
18806 + /* A reprieve! */
18807 + if (netif_is_multiqueue(netdev))
18808 + netif_wake_subqueue(netdev, tx_ring->queue_index);
18810 + netif_wake_queue(netdev);
18811 + ++tx_ring->restart_queue;
18815 +static int igb_maybe_stop_tx(struct net_device *netdev,
18816 + struct igb_ring *tx_ring, int size)
18818 + if (IGB_DESC_UNUSED(tx_ring) >= size)
18820 + return __igb_maybe_stop_tx(netdev, tx_ring, size);
18823 +#define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
18825 +static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
18826 + struct net_device *netdev,
18827 + struct igb_ring *tx_ring)
18829 + struct igb_adapter *adapter = netdev_priv(netdev);
18830 + unsigned int first;
18831 + unsigned int tx_flags = 0;
18834 +#ifdef SIOCSHWTSTAMP
18835 + union skb_shared_tx *shtx = skb_tx(skb);
18838 + if (test_bit(__IGB_DOWN, &adapter->state)) {
18839 + dev_kfree_skb_any(skb);
18840 + return NETDEV_TX_OK;
18843 + if (skb->len <= 0) {
18844 + dev_kfree_skb_any(skb);
18845 + return NETDEV_TX_OK;
18848 + /* need: 1 descriptor per page,
18849 + * + 2 desc gap to keep tail from touching head,
18850 + * + 1 desc for skb->data,
18851 + * + 1 desc for context descriptor,
18852 + * otherwise try next time */
18853 + if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
18854 + /* this is a hard error */
18855 + return NETDEV_TX_BUSY;
18858 +#ifdef SIOCSHWTSTAMP
18859 + if (unlikely(shtx->hardware)) {
18860 + shtx->in_progress = 1;
18861 + tx_flags |= IGB_TX_FLAGS_TSTAMP;
18865 + if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
18866 + tx_flags |= IGB_TX_FLAGS_VLAN;
18867 + tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
18870 + if (skb->protocol == htons(ETH_P_IP))
18871 + tx_flags |= IGB_TX_FLAGS_IPV4;
18873 + first = tx_ring->next_to_use;
18874 +#ifdef NETIF_F_TSO
18875 + if (skb_is_gso(skb)) {
18876 + tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
18879 + dev_kfree_skb_any(skb);
18880 + return NETDEV_TX_OK;
18886 + tx_flags |= IGB_TX_FLAGS_TSO;
18887 + else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
18888 + (skb->ip_summed == CHECKSUM_PARTIAL))
18889 + tx_flags |= IGB_TX_FLAGS_CSUM;
18891 + igb_tx_queue_adv(tx_ring, tx_flags,
18892 + igb_tx_map_adv(tx_ring, skb, first),
18893 + skb->len, hdr_len);
18895 + netdev->trans_start = jiffies;
18897 + /* Make sure there is space in the ring for the next send. */
18898 + igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
18900 + return NETDEV_TX_OK;
18903 +static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
18905 + struct igb_adapter *adapter = netdev_priv(netdev);
18906 + struct igb_ring *tx_ring;
18910 + r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
18911 + tx_ring = adapter->multi_tx_table[r_idx];
18913 + tx_ring = &adapter->tx_ring[0];
18916 + /* This goes back to the question of how to logically map a tx queue
18917 + * to a flow. Right now, performance is impacted slightly negatively
18918 + * if using multiple tx queues. If the stack breaks away from a
18919 + * single qdisc implementation, we can look at this again. */
18920 + return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
18924 + * igb_tx_timeout - Respond to a Tx Hang
18925 + * @netdev: network interface device structure
18927 +static void igb_tx_timeout(struct net_device *netdev)
18929 + struct igb_adapter *adapter = netdev_priv(netdev);
18930 + struct e1000_hw *hw = &adapter->hw;
18932 + /* Do the reset outside of interrupt context */
18933 + adapter->tx_timeout_count++;
18935 + schedule_work(&adapter->reset_task);
18936 + E1000_WRITE_REG(hw, E1000_EICS,
18937 + (adapter->eims_enable_mask & ~adapter->eims_other));
18940 +static void igb_reset_task(struct work_struct *work)
18942 + struct igb_adapter *adapter;
18943 + adapter = container_of(work, struct igb_adapter, reset_task);
18945 + igb_reinit_locked(adapter);
18949 + * igb_get_stats - Get System Network Statistics
18950 + * @netdev: network interface device structure
18952 + * Returns the address of the device statistics structure.
18953 + * The statistics are actually updated from the timer callback.
18955 +static struct net_device_stats *igb_get_stats(struct net_device *netdev)
18957 + struct igb_adapter *adapter = netdev_priv(netdev);
18959 + /* only return the current stats */
18960 + return &adapter->net_stats;
18964 + * igb_change_mtu - Change the Maximum Transfer Unit
18965 + * @netdev: network interface device structure
18966 + * @new_mtu: new value for maximum frame size
18968 + * Returns 0 on success, negative on failure
18970 +static int igb_change_mtu(struct net_device *netdev, int new_mtu)
18972 + struct igb_adapter *adapter = netdev_priv(netdev);
18973 + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
18974 + u32 rx_buffer_len, i;
18975 + u16 rx_ps_hdr_size = 0;
18977 + if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
18978 + DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
18982 +#define MAX_STD_JUMBO_FRAME_SIZE 9234
18983 + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
18984 + DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
18988 + while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
18991 + /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
18992 + * means we reserve 2 more, this pushes us to allocate from the next
18993 + * larger slab size.
18994 + * i.e. RXBUFFER_2048 --> size-4096 slab
18997 + /* igb_down has a dependency on max_frame_size */
18998 + adapter->max_frame_size = max_frame;
19000 + if (max_frame <= IGB_RXBUFFER_1024)
19001 + rx_buffer_len = IGB_RXBUFFER_1024;
19002 + else if (max_frame <= IGB_RXBUFFER_2048)
19003 + rx_buffer_len = IGB_RXBUFFER_2048;
19004 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
19006 +#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
19007 + rx_buffer_len = IGB_RXBUFFER_16384;
19009 + rx_buffer_len = PAGE_SIZE / 2;
19012 + else if (max_frame <= IGB_RXBUFFER_4096)
19013 + rx_buffer_len = IGB_RXBUFFER_4096;
19014 + else if (max_frame <= IGB_RXBUFFER_8192)
19015 + rx_buffer_len = IGB_RXBUFFER_8192;
19017 + rx_buffer_len = IGB_RXBUFFER_16384;
19020 + /* adjust allocation if LPE protects us, and we aren't using SBP */
19021 + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
19022 + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
19023 + rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
19025 + if (netif_running(netdev))
19026 + igb_down(adapter);
19028 + DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
19029 + netdev->mtu, new_mtu);
19030 + netdev->mtu = new_mtu;
19032 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
19033 + /* 82575 and greater support packet-split where the protocol
19034 + * header is placed in skb->data and the packet data is
19035 + * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
19036 + * In the case of a non-split, skb->data is linearly filled,
19037 + * followed by the page buffers. Therefore, skb->data is
19038 + * sized to hold the largest protocol header.
19040 + /* allocations using alloc_page take too long for regular MTU
19041 + * so only enable packet split for jumbo frames */
19042 + if (new_mtu > ETH_DATA_LEN)
19043 + rx_ps_hdr_size = IGB_RXBUFFER_128;
19044 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
19046 + for (i = 0; i < adapter->num_rx_queues; i++) {
19047 + struct igb_ring *rx_ring = &adapter->rx_ring[i];
19048 + rx_ring->rx_buffer_len = rx_buffer_len;
19049 + rx_ring->rx_ps_hdr_size = rx_ps_hdr_size;
19052 + if (netif_running(netdev))
19055 + igb_reset(adapter);
19057 + clear_bit(__IGB_RESETTING, &adapter->state);
19063 + * igb_update_stats - Update the board statistics counters
19064 + * @adapter: board private structure
19067 +void igb_update_stats(struct igb_adapter *adapter)
19069 + struct e1000_hw *hw = &adapter->hw;
19070 +#ifdef HAVE_PCI_ERS
19071 + struct pci_dev *pdev = adapter->pdev;
19075 +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
19078 + * Prevent stats update while adapter is being reset, or if the pci
19079 + * connection is down.
19081 + if (adapter->link_speed == 0)
19083 +#ifdef HAVE_PCI_ERS
19084 + if (pci_channel_offline(pdev))
19088 + /* read stats registers */
19089 + adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
19090 + adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC);
19091 + adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL);
19092 + E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */
19093 + adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC);
19094 + adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC);
19095 + adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC);
19097 + adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64);
19098 + adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127);
19099 + adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255);
19100 + adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511);
19101 + adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
19102 + adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
19103 + adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS);
19104 + adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC);
19106 + adapter->stats.mpc += E1000_READ_REG(hw, E1000_MPC);
19107 + adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC);
19108 + adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL);
19109 + adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC);
19110 + adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL);
19111 + adapter->stats.dc += E1000_READ_REG(hw, E1000_DC);
19112 + adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC);
19113 + adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
19114 + adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC);
19115 + adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC);
19116 + adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
19117 + adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC);
19118 + adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC);
19119 + adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL);
19120 + E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */
19121 + adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC);
19122 + adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC);
19123 + adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC);
19124 + adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC);
19125 + adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH);
19126 + adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH);
19127 + adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR);
19129 + adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64);
19130 + adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127);
19131 + adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255);
19132 + adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511);
19133 + adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
19134 + adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
19136 + adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC);
19137 + adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC);
19139 + adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT);
19140 + adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC);
19142 + adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
19143 + adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
19144 + adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
19145 + adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC);
19146 + adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
19148 + adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC);
19149 + adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
19150 + adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
19151 + adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
19152 + adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
19153 + adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
19154 + adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
19155 + adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
19156 + adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
19158 + /* Fill out the OS statistics structure */
19159 + adapter->net_stats.multicast = adapter->stats.mprc;
19160 + adapter->net_stats.collisions = adapter->stats.colc;
19164 + /* RLEC on some newer hardware can be incorrect so build
19165 + * our own version based on RUC and ROC */
19166 + adapter->net_stats.rx_errors = adapter->stats.rxerrc +
19167 + adapter->stats.crcerrs + adapter->stats.algnerrc +
19168 + adapter->stats.ruc + adapter->stats.roc +
19169 + adapter->stats.cexterr;
19170 + adapter->net_stats.rx_length_errors = adapter->stats.ruc +
19171 + adapter->stats.roc;
19172 + adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
19173 + adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
19174 + adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
19177 + adapter->net_stats.tx_errors = adapter->stats.ecol +
19178 + adapter->stats.latecol;
19179 + adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
19180 + adapter->net_stats.tx_window_errors = adapter->stats.latecol;
19181 + adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
19183 + /* Tx Dropped needs to be maintained elsewhere */
19186 + if (hw->phy.media_type == e1000_media_type_copper) {
19187 + if ((adapter->link_speed == SPEED_1000) &&
19188 + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
19189 + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
19190 + adapter->phy_stats.idle_errors += phy_tmp;
19194 + /* Management Stats */
19195 + adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC);
19196 + adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC);
19197 + adapter->stats.mgpdc += E1000_READ_REG(hw, E1000_MGTPDC);
19200 +static irqreturn_t igb_msix_other(int irq, void *data)
19202 + struct igb_adapter *adapter = data;
19203 + struct e1000_hw *hw = &adapter->hw;
19204 + u32 icr = E1000_READ_REG(hw, E1000_ICR);
19205 + /* reading ICR causes bit 31 of EICR to be cleared */
19207 + if (icr & E1000_ICR_DOUTSYNC) {
19208 + /* HW is reporting DMA is out of sync */
19209 + adapter->stats.doosync++;
19212 + /* Check for a mailbox event */
19213 + if (icr & E1000_ICR_VMMB)
19214 + igb_msg_task(adapter);
19216 + if (!(icr & E1000_ICR_LSC))
19217 + goto no_link_interrupt;
19218 + hw->mac.get_link_status = 1;
19219 + /* guard against interrupt when we're going down */
19220 + if (!test_bit(__IGB_DOWN, &adapter->state))
19221 + mod_timer(&adapter->watchdog_timer, jiffies + 1);
19223 +no_link_interrupt:
19224 + if (adapter->vfs_allocated_count)
19225 + E1000_WRITE_REG(hw, E1000_IMS,
19228 + E1000_IMS_DOUTSYNC);
19230 + E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
19231 + E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other);
19233 + return IRQ_HANDLED;
19237 +static void igb_write_itr(struct igb_q_vector *q_vector)
19239 + u32 itr_val = q_vector->itr_val & 0x7FFC;
19241 + if (!q_vector->set_itr)
19247 + if (q_vector->itr_shift)
19248 + itr_val |= itr_val << q_vector->itr_shift;
19250 + itr_val |= 0x8000000;
19252 + writel(itr_val, q_vector->itr_register);
19253 + q_vector->set_itr = 0;
19256 +static irqreturn_t igb_msix_ring(int irq, void *data)
19258 + struct igb_q_vector *q_vector = data;
19260 + /* Write the ITR value calculated from the previous interrupt. */
19261 + igb_write_itr(q_vector);
19263 + napi_schedule(&q_vector->napi);
19265 + return IRQ_HANDLED;
19269 +static void igb_update_dca(struct igb_q_vector *q_vector)
19271 + struct igb_adapter *adapter = q_vector->adapter;
19272 + struct e1000_hw *hw = &adapter->hw;
19273 + int cpu = get_cpu();
19275 + if (q_vector->cpu == cpu)
19276 + goto out_no_update;
19278 + if (q_vector->tx_ring) {
19279 + int q = q_vector->tx_ring->reg_idx;
19280 + u32 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(q));
19281 + if (hw->mac.type == e1000_82575) {
19282 + dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
19283 + dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
19285 + dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
19286 + dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
19287 + E1000_DCA_TXCTRL_CPUID_SHIFT_82576;
19289 + dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
19290 + E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(q), dca_txctrl);
19292 + if (q_vector->rx_ring) {
19293 + int q = q_vector->rx_ring->reg_idx;
19294 + u32 dca_rxctrl = E1000_READ_REG(hw, E1000_DCA_RXCTRL(q));
19295 + if (hw->mac.type == e1000_82575) {
19296 + dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
19297 + dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
19299 + dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
19300 + dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
19301 + E1000_DCA_RXCTRL_CPUID_SHIFT_82576;
19303 + dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
19304 + dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
19305 + dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
19306 + E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(q), dca_rxctrl);
19308 + q_vector->cpu = cpu;
19313 +static void igb_setup_dca(struct igb_adapter *adapter)
19315 + struct e1000_hw *hw = &adapter->hw;
19318 + if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
19321 + /* Always use CB2 mode, difference is masked in the CB driver. */
19322 + E1000_WRITE_REG(hw, E1000_DCA_CTRL, 2);
19324 + for (i = 0; i < adapter->num_q_vectors; i++) {
19325 + struct igb_q_vector *q_vector = adapter->q_vector[i];
19326 + q_vector->cpu = -1;
19327 + igb_update_dca(q_vector);
19331 +static int __igb_notify_dca(struct device *dev, void *data)
19333 + struct net_device *netdev = dev_get_drvdata(dev);
19334 + struct igb_adapter *adapter = netdev_priv(netdev);
19335 + struct e1000_hw *hw = &adapter->hw;
19336 + unsigned long event = *(unsigned long *)data;
19339 + case DCA_PROVIDER_ADD:
19340 + /* if already enabled, don't do it again */
19341 + if (adapter->flags & IGB_FLAG_DCA_ENABLED)
19343 + if (dca_add_requester(dev) == E1000_SUCCESS) {
19344 + adapter->flags |= IGB_FLAG_DCA_ENABLED;
19345 + DPRINTK(PROBE, INFO, "DCA enabled\n");
19346 + igb_setup_dca(adapter);
19349 + /* Fall Through since DCA is disabled. */
19350 + case DCA_PROVIDER_REMOVE:
19351 + if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
19352 + /* without this a class_device is left
19353 + * hanging around in the sysfs model */
19354 + dca_remove_requester(dev);
19355 + DPRINTK(PROBE, INFO, "DCA disabled\n");
19356 + adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
19357 + E1000_WRITE_REG(hw, E1000_DCA_CTRL, 1);
19362 + return E1000_SUCCESS;
19365 +static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
19370 + ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
19371 + __igb_notify_dca);
19373 + return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
19375 +#endif /* IGB_DCA */
19377 +static void igb_ping_all_vfs(struct igb_adapter *adapter)
19379 + struct e1000_hw *hw = &adapter->hw;
19383 + for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
19384 + ping = E1000_PF_CONTROL_MSG;
19385 + if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
19386 + ping |= E1000_VT_MSGTYPE_CTS;
19387 + e1000_write_mbx(hw, &ping, 1, i);
19391 +static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
19394 + struct e1000_hw *hw = &adapter->hw;
19395 + u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
19396 + struct vf_data_storage *vf_data = &adapter->vf_data[vf];
19398 + vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
19399 + IGB_VF_FLAG_MULTI_PROMISC);
19400 + vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
19402 +#ifdef IGB_ENABLE_VF_PROMISC
19403 + if (*msgbuf & E1000_VF_SET_PROMISC_UNICAST) {
19404 + vmolr |= E1000_VMOLR_ROPE;
19405 + vf_data->flags |= IGB_VF_FLAG_UNI_PROMISC;
19406 + *msgbuf &= ~E1000_VF_SET_PROMISC_UNICAST;
19409 + if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
19410 + vmolr |= E1000_VMOLR_MPME;
19411 + *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
19414 + * if we have hashes and we are clearing a multicast promisc
19415 + * flag we need to write the hashes to the MTA as this step
19416 + * was previously skipped
19418 + if (vf_data->num_vf_mc_hashes > 30) {
19419 + vmolr |= E1000_VMOLR_MPME;
19420 + } else if (vf_data->num_vf_mc_hashes) {
19422 + vmolr |= E1000_VMOLR_ROMPE;
19423 + for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
19424 + hw->mac.ops.mta_set(hw,
19425 + vf_data->vf_mc_hashes[j]);
19429 + E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
19431 + /* there are flags left unprocessed, likely not supported */
19432 + if (*msgbuf & E1000_VT_MSGINFO_MASK)
19439 +static int igb_set_vf_multicasts(struct igb_adapter *adapter,
19440 + u32 *msgbuf, u32 vf)
19442 + int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
19443 + u16 *hash_list = (u16 *)&msgbuf[1];
19444 + struct vf_data_storage *vf_data = &adapter->vf_data[vf];
19447 + /* salt away the number of multicast addresses assigned
19448 + * to this VF for later use to restore when the PF multi cast
19451 + vf_data->num_vf_mc_hashes = n;
19453 + /* only up to 30 hash values supported */
19457 + /* store the hashes for later use */
19458 + for (i = 0; i < n; i++)
19459 + vf_data->vf_mc_hashes[i] = hash_list[i];
19461 + /* Flush and reset the mta with the new values */
19462 + igb_set_rx_mode(adapter->netdev);
19467 +static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
19469 + struct e1000_hw *hw = &adapter->hw;
19470 + struct vf_data_storage *vf_data;
19473 + for (i = 0; i < adapter->vfs_allocated_count; i++) {
19474 + u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
19475 + vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
19477 + vf_data = &adapter->vf_data[i];
19479 + if ((vf_data->num_vf_mc_hashes > 30) ||
19480 + (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
19481 + vmolr |= E1000_VMOLR_MPME;
19482 + } else if (vf_data->num_vf_mc_hashes) {
19483 + vmolr |= E1000_VMOLR_ROMPE;
19484 + for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
19485 + hw->mac.ops.mta_set(hw,
19486 + vf_data->vf_mc_hashes[j]);
19488 + E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
19492 +static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
19494 + struct e1000_hw *hw = &adapter->hw;
19495 + u32 pool_mask, reg, vid;
19496 + u16 vlan_default;
19499 + pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
19501 + /* Find the vlan filter for this id */
19502 + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
19503 + reg = E1000_READ_REG(hw, E1000_VLVF(i));
19505 + /* remove the vf from the pool */
19506 + reg &= ~pool_mask;
19508 + /* if pool is empty then remove entry from vfta */
19509 + if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
19510 + (reg & E1000_VLVF_VLANID_ENABLE)) {
19512 + vid = reg & E1000_VLVF_VLANID_MASK;
19513 + igb_vfta_set(hw, vid, FALSE);
19516 + E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
19519 + adapter->vf_data[vf].vlans_enabled = 0;
19521 + vlan_default = adapter->vf_data[vf].default_vf_vlan_id;
19522 + if (vlan_default)
19523 + igb_vlvf_set(adapter, vlan_default, true, vf);
19526 +s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
19528 + struct e1000_hw *hw = &adapter->hw;
19531 + /* The vlvf table only exists on 82576 hardware and newer */
19532 + if (hw->mac.type < e1000_82576)
19535 + /* we only need to do this if VMDq is enabled */
19536 + if (!adapter->VMDQ_queues)
19539 + /* Find the vlan filter for this id */
19540 + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
19541 + reg = E1000_READ_REG(hw, E1000_VLVF(i));
19542 + if ((reg & E1000_VLVF_VLANID_ENABLE) &&
19543 + vid == (reg & E1000_VLVF_VLANID_MASK))
19548 + if (i == E1000_VLVF_ARRAY_SIZE) {
19549 + /* Did not find a matching VLAN ID entry that was
19550 + * enabled. Search for a free filter entry, i.e.
19551 + * one without the enable bit set
19553 + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
19554 + reg = E1000_READ_REG(hw, E1000_VLVF(i));
19555 + if (!(reg & E1000_VLVF_VLANID_ENABLE))
19559 + if (i < E1000_VLVF_ARRAY_SIZE) {
19560 + /* Found an enabled/available entry */
19561 + reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
19563 + /* if !enabled we need to set this up in vfta */
19564 + if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
19565 + /* add VID to filter table */
19566 + igb_vfta_set(hw, vid, TRUE);
19567 + reg |= E1000_VLVF_VLANID_ENABLE;
19569 + reg &= ~E1000_VLVF_VLANID_MASK;
19571 + E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
19573 + printk(KERN_INFO "VLAN Enabled for vf %d\n", vf);
19574 + /* do not modify RLPML for PF devices */
19575 + if (vf >= adapter->vfs_allocated_count)
19576 + return E1000_SUCCESS;
19578 + if (!adapter->vf_data[vf].vlans_enabled) {
19580 + reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
19581 + size = reg & E1000_VMOLR_RLPML_MASK;
19583 + reg &= ~E1000_VMOLR_RLPML_MASK;
19585 + E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
19588 + adapter->vf_data[vf].vlans_enabled++;
19589 + return E1000_SUCCESS;
19592 + if (i < E1000_VLVF_ARRAY_SIZE) {
19593 + /* remove vf from the pool */
19594 + reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
19595 + /* if pool is empty then remove entry from vfta */
19596 + if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
19598 + igb_vfta_set(hw, vid, FALSE);
19600 + E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
19602 + /* do not modify RLPML for PF devices */
19603 + if (vf >= adapter->vfs_allocated_count)
19604 + return E1000_SUCCESS;
19606 + adapter->vf_data[vf].vlans_enabled--;
19607 + if (!adapter->vf_data[vf].vlans_enabled) {
19609 + reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
19610 + size = reg & E1000_VMOLR_RLPML_MASK;
19612 + reg &= ~E1000_VMOLR_RLPML_MASK;
19614 + E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
19616 + return E1000_SUCCESS;
19622 +static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
19624 + int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
19625 + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
19627 + return igb_vlvf_set(adapter, vid, add, vf);
19630 +static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
19632 + /* clear all flags */
19633 + adapter->vf_data[vf].flags = 0;
19634 + adapter->vf_data[vf].last_nack = jiffies;
19636 + /* reset offloads to defaults */
19637 + igb_set_vmolr(adapter, vf);
19639 + /* reset vlans for device */
19640 + igb_clear_vf_vfta(adapter, vf);
19642 + /* reset multicast table array for vf */
19643 + adapter->vf_data[vf].num_vf_mc_hashes = 0;
19645 + /* Flush and reset the mta with the new values */
19646 + igb_set_rx_mode(adapter->netdev);
19649 +static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
19651 + struct e1000_hw *hw = &adapter->hw;
19652 + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
19653 + int rar_entry = hw->mac.rar_entry_count - (vf + 1);
19654 + u32 reg, msgbuf[3];
19655 + u8 *addr = (u8 *)(&msgbuf[1]);
19657 + /* process all the same items cleared in a function level reset */
19658 + igb_vf_reset_event(adapter, vf);
19660 + /* set vf mac address */
19661 + igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
19663 + /* enable transmit and receive for vf */
19664 + reg = E1000_READ_REG(hw, E1000_VFTE);
19665 + E1000_WRITE_REG(hw, E1000_VFTE, reg | (1 << vf));
19666 + reg = E1000_READ_REG(hw, E1000_VFRE);
19667 + E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf));
19669 + adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
19671 + /* reply to reset with ack and vf mac address */
19672 + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
19673 + memcpy(addr, vf_mac, 6);
19674 + e1000_write_mbx(hw, msgbuf, 3, vf);
19677 +static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
19679 + unsigned char *addr = (char *)&msg[1];
19682 + if (is_valid_ether_addr(addr))
19683 + err = igb_set_vf_mac(adapter, vf, addr);
19688 +static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
19690 + struct e1000_hw *hw = &adapter->hw;
19691 + struct vf_data_storage *vf_data = &adapter->vf_data[vf];
19692 + u32 msg = E1000_VT_MSGTYPE_NACK;
19694 + /* if device isn't clear to send it shouldn't be reading either */
19695 + if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
19696 + time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
19697 + e1000_write_mbx(hw, &msg, 1, vf);
19698 + vf_data->last_nack = jiffies;
19702 +static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
19704 + u32 msgbuf[E1000_VFMAILBOX_SIZE];
19705 + struct e1000_hw *hw = &adapter->hw;
19706 + struct vf_data_storage *vf_data = &adapter->vf_data[vf];
19709 + retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
19712 + printk(KERN_ERR "Error receiving message from VF\n");
19714 + /* this is a message we already processed, do nothing */
19715 + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
19719 + * until the vf completes a virtual function reset it should not be
19720 + * allowed to start any configuration.
19723 + if (msgbuf[0] == E1000_VF_RESET) {
19724 + igb_vf_reset_msg(adapter, vf);
19728 + if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
19729 + msgbuf[0] = E1000_VT_MSGTYPE_NACK;
19730 + if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
19731 + e1000_write_mbx(hw, msgbuf, 1, vf);
19732 + vf_data->last_nack = jiffies;
19737 + switch ((msgbuf[0] & 0xFFFF)) {
19738 + case E1000_VF_SET_MAC_ADDR:
19739 +#ifndef IGB_DISABLE_VF_MAC_SET
19740 + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
19742 + retval = -EINVAL;
19745 + case E1000_VF_SET_PROMISC:
19746 + retval = igb_set_vf_promisc(adapter, msgbuf, vf);
19748 + case E1000_VF_SET_MULTICAST:
19749 + retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
19751 + case E1000_VF_SET_LPE:
19752 + retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
19754 + case E1000_VF_SET_VLAN:
19755 + retval = igb_set_vf_vlan(adapter, msgbuf, vf);
19758 + printk(KERN_ERR "Unhandled Msg %8.8x\n", msgbuf[0]);
19759 + retval = -E1000_ERR_MBX;
19763 + /* notify the VF of the results of what it sent us */
19765 + msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
19767 + msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
19769 + msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
19771 + e1000_write_mbx(hw, msgbuf, 1, vf);
19774 +static void igb_msg_task(struct igb_adapter *adapter)
19776 + struct e1000_hw *hw = &adapter->hw;
19779 + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
19780 + /* process any reset requests */
19781 + if (!e1000_check_for_rst(hw, vf))
19782 + igb_vf_reset_event(adapter, vf);
19784 + /* process any messages pending */
19785 + if (!e1000_check_for_msg(hw, vf))
19786 + igb_rcv_msg_from_vf(adapter, vf);
19788 + /* process any acks */
19789 + if (!e1000_check_for_ack(hw, vf))
19790 + igb_rcv_ack_from_vf(adapter, vf);
19795 + * igb_set_uta - Set unicast filter table address
19796 + * @adapter: board private structure
19798 + * The unicast table address is a register array of 32-bit registers.
19799 + * The table is meant to be used in a way similar to how the MTA is used
19800 + * however due to certain limitations in the hardware it is necessary to
19801 + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
19802 + * enable bit to allow vlan tag stripping when promiscous mode is enabled
19804 +static void igb_set_uta(struct igb_adapter *adapter)
19806 + struct e1000_hw *hw = &adapter->hw;
19809 + /* The UTA table only exists on 82576 hardware and newer */
19810 + if (hw->mac.type < e1000_82576)
19813 + /* we only need to do this if VMDq is enabled */
19814 + if (!adapter->VMDQ_queues)
19817 + for (i = 0; i < hw->mac.uta_reg_count; i++)
19818 + E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, ~0);
19822 + * igb_intr_msi - Interrupt Handler
19823 + * @irq: interrupt number
19824 + * @data: pointer to a network interface device structure
19826 +static irqreturn_t igb_intr_msi(int irq, void *data)
19828 + struct igb_adapter *adapter = data;
19829 + struct igb_q_vector *q_vector = adapter->q_vector[0];
19830 + struct e1000_hw *hw = &adapter->hw;
19831 + /* read ICR disables interrupts using IAM */
19832 + u32 icr = E1000_READ_REG(hw, E1000_ICR);
19834 + igb_write_itr(q_vector);
19836 + if (icr & E1000_ICR_DOUTSYNC) {
19837 + /* HW is reporting DMA is out of sync */
19838 + adapter->stats.doosync++;
19841 + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
19842 + hw->mac.get_link_status = 1;
19843 + if (!test_bit(__IGB_DOWN, &adapter->state))
19844 + mod_timer(&adapter->watchdog_timer, jiffies + 1);
19847 + napi_schedule(&q_vector->napi);
19849 + return IRQ_HANDLED;
19853 + * igb_intr - Legacy Interrupt Handler
19854 + * @irq: interrupt number
19855 + * @data: pointer to a network interface device structure
19857 +static irqreturn_t igb_intr(int irq, void *data)
19859 + struct igb_adapter *adapter = data;
19860 + struct igb_q_vector *q_vector = adapter->q_vector[0];
19861 + struct e1000_hw *hw = &adapter->hw;
19862 + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
19863 + * need for the IMC write */
19864 + u32 icr = E1000_READ_REG(hw, E1000_ICR);
19866 + return IRQ_NONE; /* Not our interrupt */
19868 + igb_write_itr(q_vector);
19870 + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
19871 + * not set, then the adapter didn't send an interrupt */
19872 + if (!(icr & E1000_ICR_INT_ASSERTED))
19875 + if (icr & E1000_ICR_DOUTSYNC) {
19876 + /* HW is reporting DMA is out of sync */
19877 + adapter->stats.doosync++;
19880 + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
19881 + hw->mac.get_link_status = 1;
19882 + /* guard against interrupt when we're going down */
19883 + if (!test_bit(__IGB_DOWN, &adapter->state))
19884 + mod_timer(&adapter->watchdog_timer, jiffies + 1);
19887 + napi_schedule(&q_vector->napi);
19889 + return IRQ_HANDLED;
19892 +static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
19894 + struct igb_adapter *adapter = q_vector->adapter;
19895 + struct e1000_hw *hw = &adapter->hw;
19897 + if (adapter->itr_setting & 3) {
19898 + if (!adapter->msix_entries)
19899 + igb_set_itr(adapter);
19901 + igb_update_ring_itr(q_vector);
19904 + if (!test_bit(__IGB_DOWN, &adapter->state)) {
19905 + if (adapter->msix_entries)
19906 + E1000_WRITE_REG(hw, E1000_EIMS, q_vector->eims_value);
19908 + igb_irq_enable(adapter);
19913 + * igb_poll - NAPI Rx polling callback
19914 + * @napi: napi polling structure
19915 + * @budget: count of how many packets we should handle
19917 +static int igb_poll(struct napi_struct *napi, int budget)
19919 + struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi);
19920 + int tx_clean_complete = 1, work_done = 0;
19923 + if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
19924 + igb_update_dca(q_vector);
19926 + if (q_vector->tx_ring)
19927 + tx_clean_complete = igb_clean_tx_irq(q_vector);
19929 + if (q_vector->rx_ring)
19930 + igb_clean_rx_irq_adv(q_vector, &work_done, budget);
19932 + if (!tx_clean_complete)
19933 + work_done = budget;
19935 +#ifndef HAVE_NETDEV_NAPI_LIST
19936 + /* if netdev is disabled we need to stop polling */
19937 + if (!netif_running(q_vector->adapter->netdev))
19941 + /* If not enough Rx work done, exit the polling mode */
19942 + if (work_done < budget) {
19943 + napi_complete(napi);
19944 + igb_ring_irq_enable(q_vector);
19947 + return work_done;
19950 +#ifdef SIOCSHWTSTAMP
19952 + * igb_systim_to_hwtstamp - convert system time value to hw timestamp
19953 + * @adapter: board private structure
19954 + * @shhwtstamps: timestamp structure to update
19955 + * @regval: unsigned 64bit system time value.
19957 + * We need to convert the system time value stored in the RX/TXSTMP registers
19958 + * into a hwtstamp which can be used by the upper level timestamping functions
19960 +static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
19961 + struct skb_shared_hwtstamps *shhwtstamps,
19966 + ns = timecounter_cyc2time(&adapter->clock, regval);
19967 + timecompare_update(&adapter->compare, ns);
19968 + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
19969 + shhwtstamps->hwtstamp = ns_to_ktime(ns);
19970 + shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
19974 + * igb_tx_hwtstamp - utility function which checks for TX time stamp
19975 + * @adapter: board private structure
19976 + * @skb: packet that was just sent
19978 + * If we were asked to do hardware stamping and such a time stamp is
19979 + * available, then it must have been for this skb here because we only
19980 + * allow only one such packet into the queue.
19982 +static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
19984 + union skb_shared_tx *shtx = skb_tx(skb);
19985 + struct e1000_hw *hw = &adapter->hw;
19986 + struct skb_shared_hwtstamps shhwtstamps;
19989 + /* if skb does not support hw timestamp or TX stamp not valid exit */
19990 + if (likely(!shtx->hardware) ||
19991 + !(E1000_READ_REG(hw, E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
19994 + regval = E1000_READ_REG(hw, E1000_TXSTMPL);
19995 + regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32;
19997 + igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
19998 + skb_tstamp_tx(skb, &shhwtstamps);
20003 + * igb_clean_tx_irq - Reclaim resources after transmit completes
20004 + * @q_vector: pointer to q_vector containing needed info
20005 + * returns TRUE if ring is completely cleaned
20007 +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
20009 + struct igb_adapter *adapter = q_vector->adapter;
20010 + struct igb_ring *tx_ring = q_vector->tx_ring;
20011 + struct net_device *netdev = adapter->netdev;
20012 + struct e1000_hw *hw = &adapter->hw;
20013 + struct igb_buffer *buffer_info;
20014 + struct sk_buff *skb;
20015 + union e1000_adv_tx_desc *tx_desc, *eop_desc;
20016 + unsigned int total_bytes = 0, total_packets = 0;
20017 + unsigned int i, eop, count = 0;
20018 + bool cleaned = false;
20020 + i = tx_ring->next_to_clean;
20021 + eop = tx_ring->buffer_info[i].next_to_watch;
20022 + eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
20024 + while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
20025 + (count < tx_ring->count)) {
20026 + for (cleaned = false; !cleaned; count++) {
20027 + tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
20028 + buffer_info = &tx_ring->buffer_info[i];
20029 + cleaned = (i == eop);
20030 + skb = buffer_info->skb;
20033 +#ifdef NETIF_F_TSO
20034 + unsigned int segs, bytecount;
20035 + /* gso_segs is currently only valid for tcp */
20036 + segs = skb_shinfo(skb)->gso_segs ?: 1;
20037 + /* multiply data chunks by size of headers */
20038 + bytecount = ((segs - 1) * skb_headlen(skb)) +
20040 + total_packets += segs;
20041 + total_bytes += bytecount;
20044 + total_bytes += skb->len;
20046 +#ifdef SIOCSHWTSTAMP
20047 + igb_tx_hwtstamp(adapter, skb);
20051 + igb_unmap_and_free_tx_resource(tx_ring->pdev,
20053 + tx_desc->wb.status = 0;
20056 + if (i == tx_ring->count)
20059 + eop = tx_ring->buffer_info[i].next_to_watch;
20060 + eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
20063 + tx_ring->next_to_clean = i;
20065 + if (unlikely(count &&
20066 + netif_carrier_ok(netdev) &&
20067 + IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
20068 + /* Make sure that anybody stopping the queue after this
20069 + * sees the new next_to_clean.
20072 + if (netif_is_multiqueue(netdev)) {
20073 + if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
20074 + !(test_bit(__IGB_DOWN, &adapter->state))) {
20075 + netif_wake_subqueue(netdev, tx_ring->queue_index);
20076 + ++tx_ring->restart_queue;
20079 + if (netif_queue_stopped(netdev) &&
20080 + !(test_bit(__IGB_DOWN, &adapter->state))) {
20081 + netif_wake_queue(netdev);
20082 + ++tx_ring->restart_queue;
20087 + if (tx_ring->detect_tx_hung) {
20088 + /* Detect a transmit hang in hardware, this serializes the
20089 + * check with the clearing of time_stamp and movement of i */
20090 + tx_ring->detect_tx_hung = FALSE;
20091 + if (tx_ring->buffer_info[i].time_stamp &&
20092 + time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
20093 + (adapter->tx_timeout_factor * HZ))
20094 + && !(E1000_READ_REG(hw, E1000_STATUS) &
20095 + E1000_STATUS_TXOFF)) {
20097 + /* detected Tx unit hang */
20098 + DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
20099 + " Tx Queue <%d>\n"
20102 + " next_to_use <%x>\n"
20103 + " next_to_clean <%x>\n"
20104 + "buffer_info[next_to_clean]\n"
20105 + " time_stamp <%lx>\n"
20106 + " next_to_watch <%x>\n"
20107 + " jiffies <%lx>\n"
20108 + " desc.status <%x>\n",
20109 + tx_ring->queue_index,
20110 + readl(tx_ring->head),
20111 + readl(tx_ring->tail),
20112 + tx_ring->next_to_use,
20113 + tx_ring->next_to_clean,
20114 + tx_ring->buffer_info[eop].time_stamp,
20117 + eop_desc->wb.status);
20118 + if (netif_is_multiqueue(netdev))
20119 + netif_stop_subqueue(netdev,
20120 + tx_ring->queue_index);
20122 + netif_stop_queue(netdev);
20125 + tx_ring->total_bytes += total_bytes;
20126 + tx_ring->total_packets += total_packets;
20127 + tx_ring->stats.bytes += total_bytes;
20128 + tx_ring->stats.packets += total_packets;
20129 + adapter->net_stats.tx_bytes += total_bytes;
20130 + adapter->net_stats.tx_packets += total_packets;
20131 + return (count < tx_ring->count);
20136 + * igb_get_skb_hdr - helper function for LRO header processing
20137 + * @skb: pointer to sk_buff to be added to LRO packet
20138 + * @iphdr: pointer to ip header structure
20139 + * @tcph: pointer to tcp header structure
20140 + * @hdr_flags: pointer to header flags
20141 + * @priv: pointer to the receive descriptor for the current sk_buff
20143 +static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
20144 + u64 *hdr_flags, void *priv)
20146 + union e1000_adv_rx_desc *rx_desc = priv;
20147 + u16 pkt_type = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info &
20148 + (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
20150 + /* Verify that this is a valid IPv4 TCP packet */
20151 + if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
20152 + E1000_RXDADV_PKTTYPE_TCP))
20155 + /* Set network headers */
20156 + skb_reset_network_header(skb);
20157 + skb_set_transport_header(skb, ip_hdrlen(skb));
20158 + *iphdr = ip_hdr(skb);
20159 + *tcph = tcp_hdr(skb);
20160 + *hdr_flags = LRO_IPV4 | LRO_TCP;
20166 +#endif /* IGB_LRO */
20168 + * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
20169 + * @adapter: address of board private structure
20171 +int igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
20173 + struct net_device *netdev = pci_get_drvdata(rx_ring->pdev);
20174 + union e1000_adv_rx_desc *rx_desc;
20175 + struct igb_buffer *buffer_info;
20176 + struct sk_buff *skb;
20178 + int bufsz, err = 0;
20180 + i = rx_ring->next_to_use;
20181 + buffer_info = &rx_ring->buffer_info[i];
20183 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
20184 + if (rx_ring->rx_ps_hdr_size)
20185 + bufsz = rx_ring->rx_ps_hdr_size;
20187 + bufsz = rx_ring->rx_buffer_len;
20189 + bufsz = rx_ring->rx_buffer_len;
20190 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
20192 + while (cleaned_count--) {
20193 + rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
20195 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
20196 + if (rx_ring->rx_ps_hdr_size && !buffer_info->page_dma) {
20197 + if (!buffer_info->page) {
20198 + buffer_info->page = netdev_alloc_page(netdev);
20199 + if (!buffer_info->page) {
20203 + buffer_info->page_offset = 0;
20205 + buffer_info->page_offset ^= PAGE_SIZE / 2;
20207 + buffer_info->page_dma =
20208 + pci_map_page(rx_ring->pdev, buffer_info->page,
20209 + buffer_info->page_offset,
20210 + rx_ring->rx_buffer_len,
20211 + PCI_DMA_FROMDEVICE);
20213 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
20215 + if (!buffer_info->skb) {
20216 + skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
20222 + /* Make buffer alignment 2 beyond a 16 byte boundary
20223 + * this will result in a 16 byte aligned IP header after
20224 + * the 14 byte MAC header is removed
20226 + skb_reserve(skb, NET_IP_ALIGN);
20228 + buffer_info->skb = skb;
20230 + if (!buffer_info->dma)
20231 + buffer_info->dma = pci_map_single(rx_ring->pdev,
20232 + buffer_info->skb->data,
20234 + PCI_DMA_FROMDEVICE);
20235 + /* Refresh the desc even if buffer_addrs didn't change because
20236 + * each write-back erases this info. */
20237 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
20238 + if (rx_ring->rx_ps_hdr_size) {
20239 + rx_desc->read.pkt_addr =
20240 + cpu_to_le64(buffer_info->page_dma);
20241 + rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
20243 + rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
20244 + rx_desc->read.hdr_addr = 0;
20247 + rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
20248 + rx_desc->read.hdr_addr = 0;
20249 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
20252 + if (i == rx_ring->count)
20254 + buffer_info = &rx_ring->buffer_info[i];
20258 + if (rx_ring->next_to_use != i) {
20259 + rx_ring->next_to_use = i;
20261 + i = (rx_ring->count - 1);
20265 + /* Force memory writes to complete before letting h/w
20266 + * know there are new descriptors to fetch. (Only
20267 + * applicable for weak-ordered memory model archs,
20268 + * such as IA-64). */
20270 + writel(i, rx_ring->tail);
20277 + * igb_receive_skb - helper function to handle rx indications
20278 + * @ring: pointer to receive ring receving this packet
20279 + * @status: descriptor status field as written by hardware
20280 + * @rx_desc: receive descriptor containing vlan and type information.
20281 + * @skb: pointer to sk_buff to be indicated to stack
20283 +static void igb_receive_skb(struct igb_ring *ring, u8 status,
20284 + union e1000_adv_rx_desc *rx_desc,
20285 + struct sk_buff *skb)
20287 + struct igb_q_vector *q_vector = ring->q_vector;
20288 + struct igb_adapter *adapter = q_vector->adapter;
20289 + bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
20292 + if (adapter->netdev->features & NETIF_F_LRO &&
20293 + skb->ip_summed == CHECKSUM_UNNECESSARY) {
20294 + if (vlan_extracted)
20295 + lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
20297 + le16_to_cpu(rx_desc->wb.upper.vlan),
20300 + lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
20301 + ring->lro_used = TRUE;
20304 + if (vlan_extracted)
20305 + vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
20306 + le16_to_cpu(rx_desc->wb.upper.vlan),
20310 + napi_gro_receive(&q_vector->napi, skb);
20316 +static inline void igb_rx_checksum_adv(struct igb_ring *ring,
20317 + u32 status_err, struct sk_buff *skb)
20319 + struct igb_adapter *adapter = ring->q_vector->adapter;
20320 + skb->ip_summed = CHECKSUM_NONE;
20322 + /* Ignore Checksum bit is set or checksum is disabled through ethtool */
20323 + if (!ring->rx_csum || (status_err & E1000_RXD_STAT_IXSM))
20326 + /* TCP/UDP checksum error bit is set */
20328 + (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
20330 + * work around errata with sctp packets where the TCPE aka
20331 + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
20332 + * packets, (aka let the stack check the crc32c)
20334 + if (!((adapter->hw.mac.type >= e1000_82576) &&
20335 + (skb->len == 60)))
20336 + ring->hw_csum_err++;
20338 + /* let the stack verify checksum errors */
20341 + /* It must be a TCP or UDP packet with a valid checksum */
20342 + if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
20343 + skb->ip_summed = CHECKSUM_UNNECESSARY;
20345 + ring->hw_csum_good++;
20348 +#ifdef SIOCSHWTSTAMP
20349 +static inline void igb_rx_hwtstamp(struct igb_adapter *adapter, u32 staterr,
20350 + struct sk_buff *skb)
20352 + struct e1000_hw *hw = &adapter->hw;
20356 + * If this bit is set, then the RX registers contain the time stamp. No
20357 + * other packet will be time stamped until we read these registers, so
20358 + * read the registers to make them available again. Because only one
20359 + * packet can be time stamped at a time, we know that the register
20360 + * values must belong to this one here and therefore we don't need to
20361 + * compare any of the additional attributes stored for it.
20363 + * If nothing went wrong, then it should have a skb_shared_tx that we
20364 + * can turn into a skb_shared_hwtstamps.
20366 + if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
20368 + if(!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
20371 + regval = E1000_READ_REG(hw, E1000_RXSTMPL);
20372 + regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32;
20374 + igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
20377 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
20378 +static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
20379 + union e1000_adv_rx_desc *rx_desc)
20381 + /* HW will not DMA in data larger than the given buffer, even if it
20382 + * parses the (NFS, of course) header to be larger. In that case, it
20383 + * fills the header buffer and spills the rest into the page.
20385 + u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
20386 + E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
20387 + if (hlen > rx_ring->rx_ps_hdr_size)
20388 + hlen = rx_ring->rx_ps_hdr_size;
20393 +static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
20394 + int *work_done, int budget)
20396 + struct igb_adapter *adapter = q_vector->adapter;
20397 + struct net_device *netdev = adapter->netdev;
20398 + struct igb_ring *rx_ring = q_vector->rx_ring;
20399 + struct pci_dev *pdev = rx_ring->pdev;
20400 + union e1000_adv_rx_desc *rx_desc , *next_rxd;
20401 + struct igb_buffer *buffer_info , *next_buffer;
20402 + struct sk_buff *skb;
20403 + bool cleaned = FALSE;
20404 + int cleaned_count = 0;
20405 + unsigned int total_bytes = 0, total_packets = 0;
20410 + i = rx_ring->next_to_clean;
20411 + buffer_info = &rx_ring->buffer_info[i];
20412 + rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
20413 + staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
20415 + while (staterr & E1000_RXD_STAT_DD) {
20416 + if (*work_done >= budget)
20420 + skb = buffer_info->skb;
20421 + prefetch(skb->data - NET_IP_ALIGN);
20422 + buffer_info->skb = NULL;
20425 + if (i == rx_ring->count)
20428 + next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
20429 + prefetch(next_rxd);
20430 + next_buffer = &rx_ring->buffer_info[i];
20432 + length = le16_to_cpu(rx_desc->wb.upper.length);
20436 +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
20437 + pci_unmap_single(pdev, buffer_info->dma,
20438 + rx_ring->rx_buffer_len,
20439 + PCI_DMA_FROMDEVICE);
20440 + buffer_info->dma = 0;
20441 + skb_put(skb, length);
20444 + if (!rx_ring->rx_ps_hdr_size) {
20445 + pci_unmap_single(pdev, buffer_info->dma,
20446 + rx_ring->rx_buffer_len,
20447 + PCI_DMA_FROMDEVICE);
20448 + buffer_info->dma = 0;
20449 + skb_put(skb, length);
20453 + if (buffer_info->dma) {
20454 + u16 hlen = igb_get_hlen(rx_ring, rx_desc);
20455 + pci_unmap_single(pdev, buffer_info->dma,
20456 + rx_ring->rx_ps_hdr_size,
20457 + PCI_DMA_FROMDEVICE);
20458 + buffer_info->dma = 0;
20459 + skb_put(skb, hlen);
20463 + pci_unmap_page(pdev, buffer_info->page_dma,
20464 + rx_ring->rx_buffer_len,
20465 + PCI_DMA_FROMDEVICE);
20466 + buffer_info->page_dma = 0;
20468 + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
20469 + buffer_info->page,
20470 + buffer_info->page_offset,
20473 + if (page_count(buffer_info->page) != 1)
20474 + buffer_info->page = NULL;
20476 + get_page(buffer_info->page);
20478 + skb->len += length;
20479 + skb->data_len += length;
20480 + skb->truesize += length;
20483 + if (!(staterr & E1000_RXD_STAT_EOP)) {
20484 + buffer_info->skb = next_buffer->skb;
20485 + buffer_info->dma = next_buffer->dma;
20486 + next_buffer->skb = skb;
20487 + next_buffer->dma = 0;
20491 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
20492 + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
20493 + dev_kfree_skb_irq(skb);
20497 +#ifdef SIOCSHWTSTAMP
20498 + igb_rx_hwtstamp(adapter, staterr, skb);
20500 + total_bytes += skb->len;
20503 + igb_rx_checksum_adv(rx_ring, staterr, skb);
20505 +#ifndef ETH_TYPE_TRANS_SETS_DEV
20506 + skb->dev = netdev;
20508 + skb->protocol = eth_type_trans(skb, netdev);
20510 + igb_receive_skb(rx_ring, staterr, rx_desc, skb);
20512 + netdev->last_rx = jiffies;
20515 + rx_desc->wb.upper.status_error = 0;
20517 + /* return some buffers to hardware, one at a time is too slow */
20518 + if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
20519 + if (igb_alloc_rx_buffers_adv(rx_ring, cleaned_count))
20520 + adapter->alloc_rx_buff_failed++;
20521 + cleaned_count = 0;
20524 + /* use prefetched values */
20525 + rx_desc = next_rxd;
20526 + buffer_info = next_buffer;
20527 + staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
20530 + rx_ring->next_to_clean = i;
20531 + cleaned_count = IGB_DESC_UNUSED(rx_ring);
20534 + if (rx_ring->lro_used) {
20535 + lro_flush_all(&rx_ring->lro_mgr);
20536 + rx_ring->lro_used = FALSE;
20540 + if (cleaned_count)
20541 + if (igb_alloc_rx_buffers_adv(rx_ring, cleaned_count))
20542 + adapter->alloc_rx_buff_failed++;
20544 + rx_ring->total_packets += total_packets;
20545 + rx_ring->total_bytes += total_bytes;
20546 + rx_ring->stats.packets += total_packets;
20547 + rx_ring->stats.bytes += total_bytes;
20548 + adapter->net_stats.rx_bytes += total_bytes;
20549 + adapter->net_stats.rx_packets += total_packets;
20553 +#ifdef SIOCGMIIPHY
20555 + * igb_mii_ioctl -
20560 +static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
20562 + struct igb_adapter *adapter = netdev_priv(netdev);
20563 + struct mii_ioctl_data *data = if_mii(ifr);
20565 + if (adapter->hw.phy.media_type != e1000_media_type_copper)
20566 + return -EOPNOTSUPP;
20569 + case SIOCGMIIPHY:
20570 + data->phy_id = adapter->hw.phy.addr;
20572 + case SIOCGMIIREG:
20573 + if (!capable(CAP_NET_ADMIN))
20575 + if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
20579 + case SIOCSMIIREG:
20581 + return -EOPNOTSUPP;
20583 + return E1000_SUCCESS;
20587 +#ifdef SIOCSHWTSTAMP
20589 + * igb_hwtstamp_ioctl - control hardware time stamping
20594 + * Outgoing time stamping can be enabled and disabled. Play nice and
20595 + * disable it when requested, although it shouldn't case any overhead
20596 + * when no packet needs it. At most one packet in the queue may be
20597 + * marked for time stamping, otherwise it would be impossible to tell
20598 + * for sure to which packet the hardware time stamp belongs.
20600 + * Incoming time stamping has to be configured via the hardware
20601 + * filters. Not all combinations are supported, in particular event
20602 + * type has to be specified. Matching the kind of event packet is
20603 + * not supported, with the exception of "all V2 events regardless of
20607 +static int igb_hwtstamp_ioctl(struct net_device *netdev,
20608 + struct ifreq *ifr, int cmd)
20610 + struct igb_adapter *adapter = netdev_priv(netdev);
20611 + struct e1000_hw *hw = &adapter->hw;
20612 + struct hwtstamp_config config;
20613 + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
20614 + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
20615 + u32 tsync_rx_cfg = 0;
20616 + bool is_l4 = false;
20617 + bool is_l2 = false;
20620 + if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
20623 + /* reserved for future extensions */
20624 + if (config.flags)
20627 + switch (config.tx_type) {
20628 + case HWTSTAMP_TX_OFF:
20629 + tsync_tx_ctl = 0;
20630 + case HWTSTAMP_TX_ON:
20636 + switch (config.rx_filter) {
20637 + case HWTSTAMP_FILTER_NONE:
20638 + tsync_rx_ctl = 0;
20640 + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
20641 + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
20642 + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
20643 + case HWTSTAMP_FILTER_ALL:
20645 + * register TSYNCRXCFG must be set, therefore it is not
20646 + * possible to time stamp both Sync and Delay_Req messages
20647 + * => fall back to time stamping all packets
20649 + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
20650 + config.rx_filter = HWTSTAMP_FILTER_ALL;
20652 + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
20653 + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
20654 + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
20657 + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
20658 + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
20659 + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
20662 + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
20663 + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
20664 + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
20665 + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
20668 + config.rx_filter = HWTSTAMP_FILTER_SOME;
20670 + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
20671 + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
20672 + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
20673 + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
20676 + config.rx_filter = HWTSTAMP_FILTER_SOME;
20678 + case HWTSTAMP_FILTER_PTP_V2_EVENT:
20679 + case HWTSTAMP_FILTER_PTP_V2_SYNC:
20680 + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
20681 + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
20682 + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
20689 + if (hw->mac.type == e1000_82575) {
20690 + if (tsync_rx_ctl | tsync_tx_ctl)
20695 + /* enable/disable TX */
20696 + regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
20697 + regval &= ~E1000_TSYNCTXCTL_ENABLED;
20698 + regval |= tsync_tx_ctl;
20699 + E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval);
20701 + /* enable/disable RX */
20702 + regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
20703 + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
20704 + regval |= tsync_rx_ctl;
20705 + E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval);
20707 + /* define which PTP packets are time stamped */
20708 + E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg);
20710 + /* define ethertype filter for timestamped packets */
20712 + E1000_WRITE_REG(hw, E1000_ETQF(3),
20713 + (E1000_ETQF_FILTER_ENABLE | /* enable filter */
20714 + E1000_ETQF_1588 | /* enable timestamping */
20715 + ETH_P_1588)); /* 1588 eth protocol type */
20717 + E1000_WRITE_REG(hw, E1000_ETQF(3), 0);
20719 +#define PTP_PORT 319
20720 + /* L4 Queue Filter[3]: filter by destination port and protocol */
20722 + u32 ftqf = (IPPROTO_UDP /* UDP */
20723 + | E1000_FTQF_VF_BP /* VF not compared */
20724 + | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
20725 + | E1000_FTQF_MASK); /* mask all inputs */
20726 + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
20728 + E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_PORT));
20729 + E1000_WRITE_REG(hw, E1000_IMIREXT(3),
20730 + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
20731 + if (hw->mac.type == e1000_82576) {
20732 + /* enable source port check */
20733 + E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_PORT));
20734 + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
20736 + E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf);
20738 + E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK);
20740 + E1000_WRITE_FLUSH(hw);
20742 + adapter->hwtstamp_config = config;
20744 + /* clear TX/RX time stamp registers, just to be sure */
20745 + regval = E1000_READ_REG(hw, E1000_TXSTMPH);
20746 + regval = E1000_READ_REG(hw, E1000_RXSTMPH);
20748 + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
20759 +static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
20762 +#ifdef SIOCGMIIPHY
20763 + case SIOCGMIIPHY:
20764 + case SIOCGMIIREG:
20765 + case SIOCSMIIREG:
20766 + return igb_mii_ioctl(netdev, ifr, cmd);
20768 +#ifdef SIOCSHWTSTAMP
20769 + case SIOCSHWTSTAMP:
20770 + return igb_hwtstamp_ioctl(netdev, ifr, cmd);
20772 +#ifdef ETHTOOL_OPS_COMPAT
20773 + case SIOCETHTOOL:
20774 + return ethtool_ioctl(ifr);
20777 + return -EOPNOTSUPP;
20781 +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
20783 + struct igb_adapter *adapter = hw->back;
20786 + cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
20788 + return -E1000_ERR_CONFIG;
20790 + pci_read_config_word(adapter->pdev, cap_offset + reg, value);
20792 + return E1000_SUCCESS;
20795 +s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
20797 + struct igb_adapter *adapter = hw->back;
20800 + cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
20802 + return -E1000_ERR_CONFIG;
20804 + pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
20806 + return E1000_SUCCESS;
20809 +static void igb_vlan_rx_register(struct net_device *netdev,
20810 + struct vlan_group *grp)
20812 + struct igb_adapter *adapter = netdev_priv(netdev);
20813 + struct e1000_hw *hw = &adapter->hw;
20816 + igb_irq_disable(adapter);
20817 + adapter->vlgrp = grp;
20820 + /* enable VLAN tag insert/strip */
20821 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
20822 + ctrl |= E1000_CTRL_VME;
20823 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
20825 + /* Disable CFI check */
20826 + rctl = E1000_READ_REG(hw, E1000_RCTL);
20827 + rctl &= ~E1000_RCTL_CFIEN;
20828 + E1000_WRITE_REG(hw, E1000_RCTL, rctl);
20830 + /* disable VLAN tag insert/strip */
20831 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
20832 + ctrl &= ~E1000_CTRL_VME;
20833 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
20836 + igb_set_rlpml(adapter);
20838 + if (!test_bit(__IGB_DOWN, &adapter->state))
20839 + igb_irq_enable(adapter);
20842 +static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
20844 + struct igb_adapter *adapter = netdev_priv(netdev);
20845 + struct e1000_hw *hw = &adapter->hw;
20846 + int pf_id = adapter->vfs_allocated_count;
20847 +#ifndef HAVE_NETDEV_VLAN_FEATURES
20848 + struct net_device *v_netdev;
20851 + /* attempt to add filter to vlvf array */
20852 + igb_vlvf_set(adapter, vid, TRUE, pf_id);
20854 + /* add the filter since PF can receive vlans w/o entry in vlvf */
20855 + igb_vfta_set(hw, vid, TRUE);
20856 +#ifndef HAVE_NETDEV_VLAN_FEATURES
20857 + /* Copy feature flags from netdev to the vlan netdev for this vid.
20858 + * This allows things like TSO to bubble down to our vlan device.
20860 + v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
20861 + v_netdev->features |= adapter->netdev->features;
20862 + vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
20866 +static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
20868 + struct igb_adapter *adapter = netdev_priv(netdev);
20869 + struct e1000_hw *hw = &adapter->hw;
20870 + int pf_id = adapter->vfs_allocated_count;
20873 + igb_irq_disable(adapter);
20874 + vlan_group_set_device(adapter->vlgrp, vid, NULL);
20876 + if (!test_bit(__IGB_DOWN, &adapter->state))
20877 + igb_irq_enable(adapter);
20879 + /* remove vlan from VLVF table array */
20880 + err = igb_vlvf_set(adapter, vid, FALSE, pf_id);
20882 + /* if vid was not present in VLVF just remove it from table */
20884 + igb_vfta_set(hw, vid, FALSE);
20887 +static void igb_restore_vlan(struct igb_adapter *adapter)
20889 + igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
20891 + if (adapter->vlgrp) {
20893 + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
20894 + if (!vlan_group_get_device(adapter->vlgrp, vid))
20896 + igb_vlan_rx_add_vid(adapter->netdev, vid);
20901 +int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
20903 + struct e1000_mac_info *mac = &adapter->hw.mac;
20905 + mac->autoneg = 0;
20907 + switch (spddplx) {
20908 + case SPEED_10 + DUPLEX_HALF:
20909 + mac->forced_speed_duplex = ADVERTISE_10_HALF;
20911 + case SPEED_10 + DUPLEX_FULL:
20912 + mac->forced_speed_duplex = ADVERTISE_10_FULL;
20914 + case SPEED_100 + DUPLEX_HALF:
20915 + mac->forced_speed_duplex = ADVERTISE_100_HALF;
20917 + case SPEED_100 + DUPLEX_FULL:
20918 + mac->forced_speed_duplex = ADVERTISE_100_FULL;
20920 + case SPEED_1000 + DUPLEX_FULL:
20921 + mac->autoneg = 1;
20922 + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
20924 + case SPEED_1000 + DUPLEX_HALF: /* not supported */
20926 + DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
20932 +#ifdef USE_REBOOT_NOTIFIER
20933 +/* only want to do this for 2.4 kernels? */
20934 +static int igb_notify_reboot(struct notifier_block *nb, unsigned long event,
20937 + struct pci_dev *pdev = NULL;
20942 + case SYS_POWER_OFF:
20943 + while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
20944 + if (pci_dev_driver(pdev) == &igb_driver)
20945 + igb_suspend(pdev, PMSG_SUSPEND);
20948 + return NOTIFY_DONE;
20952 +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
20954 + struct net_device *netdev = pci_get_drvdata(pdev);
20955 + struct igb_adapter *adapter = netdev_priv(netdev);
20956 + struct e1000_hw *hw = &adapter->hw;
20957 + u32 ctrl, rctl, status;
20958 + u32 wufc = adapter->wol;
20963 + netif_device_detach(netdev);
20965 + if (netif_running(netdev))
20966 + igb_close(netdev);
20968 + igb_clear_interrupt_scheme(adapter);
20971 + retval = pci_save_state(pdev);
20976 + status = E1000_READ_REG(hw, E1000_STATUS);
20977 + if (status & E1000_STATUS_LU)
20978 + wufc &= ~E1000_WUFC_LNKC;
20981 + igb_setup_rctl(adapter);
20982 + igb_set_rx_mode(netdev);
20984 + /* turn on all-multi mode if wake on multicast is enabled */
20985 + if (wufc & E1000_WUFC_MC) {
20986 + rctl = E1000_READ_REG(hw, E1000_RCTL);
20987 + rctl |= E1000_RCTL_MPE;
20988 + E1000_WRITE_REG(hw, E1000_RCTL, rctl);
20991 + ctrl = E1000_READ_REG(hw, E1000_CTRL);
20992 + /* phy power management enable */
20993 + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
20994 + ctrl |= E1000_CTRL_ADVD3WUC;
20995 + E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
20997 + /* Allow time for pending master requests to run */
20998 + e1000_disable_pcie_master(hw);
21000 + E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN);
21001 + E1000_WRITE_REG(hw, E1000_WUFC, wufc);
21003 + E1000_WRITE_REG(hw, E1000_WUC, 0);
21004 + E1000_WRITE_REG(hw, E1000_WUFC, 0);
21007 + *enable_wake = wufc || adapter->en_mng_pt;
21008 + if (!*enable_wake)
21009 + e1000_shutdown_fiber_serdes_link(hw);
21011 + /* Release control of h/w to f/w. If f/w is AMT enabled, this
21012 + * would have already happened in close and is redundant. */
21013 + igb_release_hw_control(adapter);
21015 + pci_disable_device(pdev);
21021 +static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
21026 + retval = __igb_shutdown(pdev, &wake);
21031 + pci_prepare_to_sleep(pdev);
21033 + pci_wake_from_d3(pdev, false);
21034 + pci_set_power_state(pdev, PCI_D3hot);
21040 +static int igb_resume(struct pci_dev *pdev)
21042 + struct net_device *netdev = pci_get_drvdata(pdev);
21043 + struct igb_adapter *adapter = netdev_priv(netdev);
21044 + struct e1000_hw *hw = &adapter->hw;
21047 + pci_set_power_state(pdev, PCI_D0);
21048 + pci_restore_state(pdev);
21049 + err = pci_enable_device_mem(pdev);
21051 + dev_err(&pdev->dev, "igb: Cannot enable PCI device "
21052 + "from suspend\n");
21055 + pci_set_master(pdev);
21057 + pci_enable_wake(pdev, PCI_D3hot, 0);
21058 + pci_enable_wake(pdev, PCI_D3cold, 0);
21060 + if (igb_init_interrupt_scheme(adapter)) {
21061 + DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
21065 + /* e1000_power_up_phy(adapter); */
21067 + igb_reset(adapter);
21069 + /* let the f/w know that the h/w is now under the control of the
21071 + igb_get_hw_control(adapter);
21073 + E1000_WRITE_REG(hw, E1000_WUS, ~0);
21075 + if (netif_running(netdev)) {
21076 + err = igb_open(netdev);
21081 + netif_device_attach(netdev);
21087 +#ifndef USE_REBOOT_NOTIFIER
21088 +static void igb_shutdown(struct pci_dev *pdev)
21092 + __igb_shutdown(pdev, &wake);
21094 + if (system_state == SYSTEM_POWER_OFF) {
21095 + pci_wake_from_d3(pdev, wake);
21096 + pci_set_power_state(pdev, PCI_D3hot);
21101 +#ifdef CONFIG_NET_POLL_CONTROLLER
21103 + * Polling 'interrupt' - used by things like netconsole to send skbs
21104 + * without having to re-enable interrupts. It's not called while
21105 + * the interrupt routine is executing.
21107 +static void igb_netpoll(struct net_device *netdev)
21109 + struct igb_adapter *adapter = netdev_priv(netdev);
21110 + struct e1000_hw *hw = &adapter->hw;
21113 + if (!adapter->msix_entries) {
21114 + struct igb_q_vector *q_vector = adapter->q_vector[0];
21115 + igb_irq_disable(adapter);
21116 + napi_schedule(&q_vector->napi);
21120 + for (i = 0; i < adapter->num_q_vectors; i++) {
21121 + struct igb_q_vector *q_vector = adapter->q_vector[i];
21122 + E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value);
21123 + napi_schedule(&q_vector->napi);
21126 +#endif /* CONFIG_NET_POLL_CONTROLLER */
21128 +#ifdef HAVE_PCI_ERS
21130 + * igb_io_error_detected - called when PCI error is detected
21131 + * @pdev: Pointer to PCI device
21132 + * @state: The current pci connection state
21134 + * This function is called after a PCI bus error affecting
21135 + * this device has been detected.
21137 +static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
21138 + pci_channel_state_t state)
21140 + struct net_device *netdev = pci_get_drvdata(pdev);
21141 + struct igb_adapter *adapter = netdev_priv(netdev);
21143 + netif_device_detach(netdev);
21145 + if (state == pci_channel_io_perm_failure)
21146 + return PCI_ERS_RESULT_DISCONNECT;
21148 + if (netif_running(netdev))
21149 + igb_down(adapter);
21150 + pci_disable_device(pdev);
21152 + /* Request a slot slot reset. */
21153 + return PCI_ERS_RESULT_NEED_RESET;
21157 + * igb_io_slot_reset - called after the pci bus has been reset.
21158 + * @pdev: Pointer to PCI device
21160 + * Restart the card from scratch, as if from a cold-boot. Implementation
21161 + * resembles the first-half of the igb_resume routine.
21163 +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
21165 + struct net_device *netdev = pci_get_drvdata(pdev);
21166 + struct igb_adapter *adapter = netdev_priv(netdev);
21167 + struct e1000_hw *hw = &adapter->hw;
21168 + pci_ers_result_t result;
21170 + if (pci_enable_device_mem(pdev)) {
21171 + dev_err(&pdev->dev,
21172 + "Cannot re-enable PCI device after reset.\n");
21173 + result = PCI_ERS_RESULT_DISCONNECT;
21175 + pci_set_master(pdev);
21176 + pci_restore_state(pdev);
21178 + pci_enable_wake(pdev, PCI_D3hot, 0);
21179 + pci_enable_wake(pdev, PCI_D3cold, 0);
21181 + igb_reset(adapter);
21182 + E1000_WRITE_REG(hw, E1000_WUS, ~0);
21183 + result = PCI_ERS_RESULT_RECOVERED;
21186 + pci_cleanup_aer_uncorrect_error_status(pdev);
21192 + * igb_io_resume - called when traffic can start flowing again.
21193 + * @pdev: Pointer to PCI device
21195 + * This callback is called when the error recovery driver tells us that
21196 + * its OK to resume normal operation. Implementation resembles the
21197 + * second-half of the igb_resume routine.
21199 +static void igb_io_resume(struct pci_dev *pdev)
21201 + struct net_device *netdev = pci_get_drvdata(pdev);
21202 + struct igb_adapter *adapter = netdev_priv(netdev);
21204 + if (netif_running(netdev)) {
21205 + if (igb_up(adapter)) {
21206 + dev_err(&pdev->dev, "igb_up failed after reset\n");
21211 + netif_device_attach(netdev);
21213 + /* let the f/w know that the h/w is now under the control of the
21215 + igb_get_hw_control(adapter);
21218 +#endif /* HAVE_PCI_ERS */
21219 +static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
21222 + u32 rar_low, rar_high;
21223 + struct e1000_hw *hw = &adapter->hw;
21225 + /* HW expects these in little endian so we reverse the byte order
21226 + * from network order (big endian) to little endian
21228 + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
21229 + ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
21230 + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
21232 + /* Indicate to hardware the Address is Valid. */
21233 + rar_high |= E1000_RAH_AV;
21235 + if (hw->mac.type == e1000_82575)
21236 + rar_high |= E1000_RAH_POOL_1 * qsel;
21238 + rar_high |= E1000_RAH_POOL_1 << qsel;
21240 + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
21241 + E1000_WRITE_FLUSH(hw);
21242 + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
21243 + E1000_WRITE_FLUSH(hw);
21246 +int igb_set_vf_mac(struct igb_adapter *adapter,
21247 + int vf, unsigned char *mac_addr)
21249 + struct e1000_hw *hw = &adapter->hw;
21250 + /* VF MAC addresses start at end of receive addresses and moves
21251 + * torwards the first, as a result a collision should not be possible */
21252 + int rar_entry = hw->mac.rar_entry_count - (vf + 1);
21254 + memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, 6);
21256 + igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
21261 +static void igb_vmm_control(struct igb_adapter *adapter)
21263 + struct e1000_hw *hw = &adapter->hw;
21265 + /* replication is not supported for 82575 */
21266 + if (hw->mac.type == e1000_82575)
21269 + if (adapter->vfs_allocated_count || adapter->VMDQ_queues) {
21270 + e1000_vmdq_set_loopback_pf(hw, true);
21271 + e1000_vmdq_set_replication_pf(hw, true);
21273 + e1000_vmdq_set_loopback_pf(hw, false);
21274 + e1000_vmdq_set_replication_pf(hw, false);
21278 +static void igb_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
21280 + unsigned char my_mac_addr[6];
21281 + unsigned char oui[OUI_LEN] = {0x02, 0xAA, 0x00};
21282 + struct net_device *netdev = pci_get_drvdata(pdev);
21283 + struct igb_adapter *adapter = netdev_priv(netdev);
21284 + unsigned int vfn = (event_mask & 7);
21286 + bool enable = ((event_mask & 0x10000000U) != 0);
21289 + random_ether_addr(my_mac_addr);
21290 + memcpy(my_mac_addr, oui, OUI_LEN);
21291 + printk(KERN_INFO "IOV1: VF %d is enabled\n", vfn);
21292 + printk(KERN_INFO "Assigned MAC: "
21293 + "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
21294 + my_mac_addr[0], my_mac_addr[1], my_mac_addr[2],
21295 + my_mac_addr[3], my_mac_addr[4], my_mac_addr[5]);
21296 + igb_set_vf_mac(adapter, vfn, my_mac_addr);
21298 + printk(KERN_INFO "IOV1: VF %d is disabled\n", vfn);
21303 Index: linux-2.6.22/drivers/net/igb/igb_param.c
21304 ===================================================================
21305 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
21306 +++ linux-2.6.22/drivers/net/igb/igb_param.c 2009-12-18 12:39:22.000000000 -0500
21308 +/*******************************************************************************
21310 + Intel(R) Gigabit Ethernet Linux driver
21311 + Copyright(c) 2007-2009 Intel Corporation.
21313 + This program is free software; you can redistribute it and/or modify it
21314 + under the terms and conditions of the GNU General Public License,
21315 + version 2, as published by the Free Software Foundation.
21317 + This program is distributed in the hope it will be useful, but WITHOUT
21318 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21319 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21322 + You should have received a copy of the GNU General Public License along with
21323 + this program; if not, write to the Free Software Foundation, Inc.,
21324 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21326 + The full GNU General Public License is included in this distribution in
21327 + the file called "COPYING".
21329 + Contact Information:
21330 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21331 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21333 +*******************************************************************************/
21336 +#include <linux/netdevice.h>
21340 +/* This is the only thing that needs to be changed to adjust the
21341 + * maximum number of ports that the driver can manage.
21344 +#define IGB_MAX_NIC 32
21346 +#define OPTION_UNSET -1
21347 +#define OPTION_DISABLED 0
21348 +#define OPTION_ENABLED 1
21350 +/* All parameters are treated the same, as an integer array of values.
21351 + * This macro just reduces the need to repeat the same declaration code
21352 + * over and over (plus this helps to avoid typo bugs).
21355 +#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET }
21356 +#ifndef module_param_array
21357 +/* Module Parameters are always initialized to -1, so that the driver
21358 + * can tell the difference between no user specified value or the
21359 + * user asking for the default value.
21360 + * The true default values are loaded in when igb_check_options is called.
21362 + * This is a GCC extension to ANSI C.
21363 + * See the item "Labeled Elements in Initializers" in the section
21364 + * "Extensions to the C Language Family" of the GCC documentation.
21367 +#define IGB_PARAM(X, desc) \
21368 + static const int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
21369 + MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \
21370 + MODULE_PARM_DESC(X, desc);
21372 +#define IGB_PARAM(X, desc) \
21373 + static int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
21374 + static unsigned int num_##X; \
21375 + module_param_array_named(X, X, int, &num_##X, 0); \
21376 + MODULE_PARM_DESC(X, desc);
21379 +/* Interrupt Throttle Rate (interrupts/sec)
21381 + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
21383 +IGB_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
21384 +#define DEFAULT_ITR 3
21385 +#define MAX_ITR 100000
21386 +#define MIN_ITR 120
21387 +/* IntMode (Interrupt Mode)
21389 + * Valid Range: 0 - 2
21391 + * Default Value: 2 (MSI-X)
21393 +IGB_PARAM(IntMode, "Interrupt Mode");
21394 +#define MAX_INTMODE IGB_INT_MODE_MSIX
21395 +#define MIN_INTMODE IGB_INT_MODE_LEGACY
21397 +/* LLIPort (Low Latency Interrupt TCP Port)
21399 + * Valid Range: 0 - 65535
21401 + * Default Value: 0 (disabled)
21403 +IGB_PARAM(LLIPort, "Low Latency Interrupt TCP Port");
21405 +#define DEFAULT_LLIPORT 0
21406 +#define MAX_LLIPORT 0xFFFF
21407 +#define MIN_LLIPORT 0
21409 +/* LLIPush (Low Latency Interrupt on TCP Push flag)
21411 + * Valid Range: 0, 1
21413 + * Default Value: 0 (disabled)
21415 +IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag");
21417 +#define DEFAULT_LLIPUSH 0
21418 +#define MAX_LLIPUSH 1
21419 +#define MIN_LLIPUSH 0
21421 +/* LLISize (Low Latency Interrupt on Packet Size)
21423 + * Valid Range: 0 - 1500
21425 + * Default Value: 0 (disabled)
21427 +IGB_PARAM(LLISize, "Low Latency Interrupt on Packet Size");
21429 +#define DEFAULT_LLISIZE 0
21430 +#define MAX_LLISIZE 1500
21431 +#define MIN_LLISIZE 0
21434 +/* LROAggr (Large Receive Offload)
21436 + * Valid Range: 2 - 44
21438 + * Default Value: 32
21440 +IGB_PARAM(LROAggr, "LRO - Maximum packets to aggregate");
21442 +#define DEFAULT_LRO_AGGR 32
21443 +#define MAX_LRO_AGGR 44
21444 +#define MIN_LRO_AGGR 2
21447 +/* RSS (Enable RSS multiqueue receive)
21449 + * Valid Range: 0 - 8
21451 + * Default Value: 1
21453 +IGB_PARAM(RSS, "RSS - multiqueue receive count");
21455 +#define DEFAULT_RSS 1
21456 +#define MAX_RSS ((adapter->hw.mac.type == e1000_82575) ? 4 : 8)
21459 +/* VMDQ (Enable VMDq multiqueue receive)
21461 + * Valid Range: 0 - 8
21463 + * Default Value: 0
21465 +IGB_PARAM(VMDQ, "VMDQ - VMDq multiqueue receive");
21467 +#define DEFAULT_VMDQ 0
21468 +#define MAX_VMDQ MAX_RSS
21469 +#define MIN_VMDQ 0
21471 +#ifdef CONFIG_PCI_IOV
21472 +/* max_vfs (Enable SR-IOV VF devices)
21474 + * Valid Range: 0 - 7
21476 + * Default Value: 0
21478 +IGB_PARAM(max_vfs, "max_vfs - SR-IOV VF devices");
21480 +#define DEFAULT_SRIOV 0
21481 +#define MAX_SRIOV 7
21482 +#define MIN_SRIOV 0
21484 +#endif /* CONFIG_PCI_IOV */
21486 +/* QueuePairs (Enable TX/RX queue pairs for interrupt handling)
21488 + * Valid Range: 0 - 1
21490 + * Default Value: 1
21492 +IGB_PARAM(QueuePairs, "QueuePairs - TX/RX queue pairs for interrupt handling");
21494 +#define DEFAULT_QUEUE_PAIRS 1
21495 +#define MAX_QUEUE_PAIRS 1
21496 +#define MIN_QUEUE_PAIRS 0
21498 +struct igb_option {
21499 + enum { enable_option, range_option, list_option } type;
21500 + const char *name;
21504 + struct { /* range_option info */
21508 + struct { /* list_option info */
21510 + struct igb_opt_list { int i; char *str; } *p;
21515 +static int __devinit igb_validate_option(unsigned int *value,
21516 + struct igb_option *opt,
21517 + struct igb_adapter *adapter)
21519 + if (*value == OPTION_UNSET) {
21520 + *value = opt->def;
21524 + switch (opt->type) {
21525 + case enable_option:
21526 + switch (*value) {
21527 + case OPTION_ENABLED:
21528 + DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
21530 + case OPTION_DISABLED:
21531 + DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
21535 + case range_option:
21536 + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
21537 + DPRINTK(PROBE, INFO,
21538 + "%s set to %d\n", opt->name, *value);
21542 + case list_option: {
21544 + struct igb_opt_list *ent;
21546 + for (i = 0; i < opt->arg.l.nr; i++) {
21547 + ent = &opt->arg.l.p[i];
21548 + if (*value == ent->i) {
21549 + if (ent->str[0] != '\0')
21550 + DPRINTK(PROBE, INFO, "%s\n", ent->str);
21560 + DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n",
21561 + opt->name, *value, opt->err);
21562 + *value = opt->def;
21567 + * igb_check_options - Range Checking for Command Line Parameters
21568 + * @adapter: board private structure
21570 + * This routine checks all command line parameters for valid user
21571 + * input. If an invalid value is given, or if no user specified
21572 + * value exists, a default value is used. The final value is stored
21573 + * in a variable in the adapter structure.
21576 +void __devinit igb_check_options(struct igb_adapter *adapter)
21578 + int bd = adapter->bd_number;
21580 + if (bd >= IGB_MAX_NIC) {
21581 + DPRINTK(PROBE, NOTICE,
21582 + "Warning: no configuration for board #%d\n", bd);
21583 + DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
21584 +#ifndef module_param_array
21585 + bd = IGB_MAX_NIC;
21589 + { /* Interrupt Throttling Rate */
21590 + struct igb_option opt = {
21591 + .type = range_option,
21592 + .name = "Interrupt Throttling Rate (ints/sec)",
21593 + .err = "using default of " __MODULE_STRING(DEFAULT_ITR),
21594 + .def = DEFAULT_ITR,
21595 + .arg = { .r = { .min = MIN_ITR,
21596 + .max = MAX_ITR } }
21599 +#ifdef module_param_array
21600 + if (num_InterruptThrottleRate > bd) {
21602 + adapter->itr = InterruptThrottleRate[bd];
21603 + switch (adapter->itr) {
21605 + DPRINTK(PROBE, INFO, "%s turned off\n",
21609 + DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
21611 + adapter->itr_setting = adapter->itr;
21612 + adapter->itr = IGB_START_ITR;
21615 + DPRINTK(PROBE, INFO,
21616 + "%s set to dynamic conservative mode\n",
21618 + adapter->itr_setting = adapter->itr;
21619 + adapter->itr = IGB_START_ITR;
21622 + igb_validate_option(&adapter->itr, &opt,
21624 + /* Save the setting, because the dynamic bits
21625 + * change itr. In case of invalid user value,
21626 + * default to conservative mode, else need to
21627 + * clear the lower two bits because they are
21628 + * used as control */
21629 + if (adapter->itr == 3) {
21630 + adapter->itr_setting = adapter->itr;
21631 + adapter->itr = IGB_START_ITR;
21633 + adapter->itr = 1000000000 / (adapter->itr * 256);
21634 + adapter->itr_setting = adapter->itr & ~3;
21638 +#ifdef module_param_array
21640 + adapter->itr_setting = opt.def;
21641 + adapter->itr = 8000;
21645 + { /* Interrupt Mode */
21646 + struct igb_option opt = {
21647 + .type = range_option,
21648 + .name = "Interrupt Mode",
21649 + .err = "defaulting to 2 (MSI-X)",
21650 + .def = IGB_INT_MODE_MSIX,
21651 + .arg = { .r = { .min = MIN_INTMODE,
21652 + .max = MAX_INTMODE } }
21655 +#ifdef module_param_array
21656 + if (num_IntMode > bd) {
21658 + unsigned int int_mode = IntMode[bd];
21659 + igb_validate_option(&int_mode, &opt, adapter);
21660 + adapter->int_mode = int_mode;
21661 +#ifdef module_param_array
21663 + adapter->int_mode = opt.def;
21667 + { /* Low Latency Interrupt TCP Port */
21668 + struct igb_option opt = {
21669 + .type = range_option,
21670 + .name = "Low Latency Interrupt TCP Port",
21671 + .err = "using default of " __MODULE_STRING(DEFAULT_LLIPORT),
21672 + .def = DEFAULT_LLIPORT,
21673 + .arg = { .r = { .min = MIN_LLIPORT,
21674 + .max = MAX_LLIPORT } }
21677 +#ifdef module_param_array
21678 + if (num_LLIPort > bd) {
21680 + adapter->lli_port = LLIPort[bd];
21681 + if (adapter->lli_port) {
21682 + igb_validate_option(&adapter->lli_port, &opt,
21685 + DPRINTK(PROBE, INFO, "%s turned off\n",
21688 +#ifdef module_param_array
21690 + adapter->lli_port = opt.def;
21694 + { /* Low Latency Interrupt on Packet Size */
21695 + struct igb_option opt = {
21696 + .type = range_option,
21697 + .name = "Low Latency Interrupt on Packet Size",
21698 + .err = "using default of " __MODULE_STRING(DEFAULT_LLISIZE),
21699 + .def = DEFAULT_LLISIZE,
21700 + .arg = { .r = { .min = MIN_LLISIZE,
21701 + .max = MAX_LLISIZE } }
21704 +#ifdef module_param_array
21705 + if (num_LLISize > bd) {
21707 + adapter->lli_size = LLISize[bd];
21708 + if (adapter->lli_size) {
21709 + igb_validate_option(&adapter->lli_size, &opt,
21712 + DPRINTK(PROBE, INFO, "%s turned off\n",
21715 +#ifdef module_param_array
21717 + adapter->lli_size = opt.def;
21721 + { /* Low Latency Interrupt on TCP Push flag */
21722 + struct igb_option opt = {
21723 + .type = enable_option,
21724 + .name = "Low Latency Interrupt on TCP Push flag",
21725 + .err = "defaulting to Disabled",
21726 + .def = OPTION_DISABLED
21729 +#ifdef module_param_array
21730 + if (num_LLIPush > bd) {
21732 + unsigned int lli_push = LLIPush[bd];
21733 + igb_validate_option(&lli_push, &opt, adapter);
21734 + adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0;
21735 +#ifdef module_param_array
21737 + adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0;
21742 + { /* Large Receive Offload - Maximum packets to aggregate */
21743 + struct igb_option opt = {
21744 + .type = range_option,
21745 + .name = "LRO - Maximum packets to aggregate",
21746 + .err = "using default of " __MODULE_STRING(DEFAULT_LRO_AGGR),
21747 + .def = DEFAULT_LRO_AGGR,
21748 + .arg = { .r = { .min = MIN_LRO_AGGR,
21749 + .max = MAX_LRO_AGGR } }
21752 +#ifdef module_param_array
21753 + if (num_LROAggr > bd) {
21755 + adapter->lro_max_aggr = LROAggr[bd];
21756 + igb_validate_option(&adapter->lro_max_aggr, &opt, adapter);
21758 +#ifdef module_param_array
21760 + adapter->lro_max_aggr = opt.def;
21764 +#endif /* IGB_LRO */
21765 +#ifdef CONFIG_PCI_IOV
21766 + { /* SRIOV - Enable SR-IOV VF devices */
21767 + struct igb_option opt = {
21768 + .type = range_option,
21769 + .name = "max_vfs - SR-IOV VF devices",
21770 + .err = "using default of " __MODULE_STRING(DEFAULT_SRIOV),
21771 + .def = DEFAULT_SRIOV,
21772 + .arg = { .r = { .min = MIN_SRIOV,
21773 + .max = MAX_SRIOV } }
21776 +#ifdef module_param_array
21777 + if (num_max_vfs > bd) {
21779 + adapter->vfs_allocated_count = max_vfs[bd];
21780 + igb_validate_option(&adapter->vfs_allocated_count, &opt, adapter);
21782 +#ifdef module_param_array
21784 + adapter->vfs_allocated_count = opt.def;
21787 + if (adapter->hw.mac.type != e1000_82576 && adapter->vfs_allocated_count) {
21788 + adapter->vfs_allocated_count = 0;
21789 + DPRINTK(PROBE, INFO, "SR-IOV option max_vfs only supported on 82576.\n");
21792 +#endif /* CONFIG_PCI_IOV */
21793 + { /* VMDQ - Enable VMDq multiqueue receive */
21794 + struct igb_option opt = {
21795 + .type = range_option,
21796 + .name = "VMDQ - VMDq multiqueue receive count",
21797 + .err = "using default of " __MODULE_STRING(DEFAULT_VMDQ),
21798 + .def = DEFAULT_VMDQ,
21799 + .arg = { .r = { .min = MIN_VMDQ,
21800 + .max = (MAX_VMDQ - adapter->vfs_allocated_count) } }
21802 +#ifdef module_param_array
21803 + if (num_VMDQ > bd) {
21805 + adapter->VMDQ_queues = VMDQ[bd];
21806 + if (adapter->vfs_allocated_count && !adapter->VMDQ_queues) {
21807 + DPRINTK(PROBE, INFO, "Enabling SR-IOV requires VMDq be set to at least 1\n");
21808 + adapter->VMDQ_queues = 1;
21810 + igb_validate_option(&adapter->VMDQ_queues, &opt, adapter);
21812 +#ifdef module_param_array
21814 + if (!adapter->vfs_allocated_count)
21815 + adapter->VMDQ_queues = opt.def;
21817 + adapter->VMDQ_queues = 1;
21821 + { /* RSS - Enable RSS multiqueue receives */
21822 + struct igb_option opt = {
21823 + .type = range_option,
21824 + .name = "RSS - RSS multiqueue receive count",
21825 + .err = "using default of " __MODULE_STRING(DEFAULT_RSS),
21826 + .def = DEFAULT_RSS,
21827 + .arg = { .r = { .min = MIN_RSS,
21828 + .max = MAX_RSS } }
21831 + if (adapter->VMDQ_queues) {
21832 + switch (adapter->hw.mac.type) {
21833 + case e1000_82576:
21834 + opt.arg.r.max = 2;
21836 + case e1000_82575:
21837 + if (adapter->VMDQ_queues == 2)
21838 + opt.arg.r.max = 3;
21839 + if (adapter->VMDQ_queues <= 2)
21842 + opt.arg.r.max = 1;
21847 +#ifdef module_param_array
21848 + if (num_RSS > bd) {
21850 + adapter->RSS_queues = RSS[bd];
21851 + switch (adapter->RSS_queues) {
21855 + igb_validate_option(&adapter->RSS_queues, &opt, adapter);
21856 + if (adapter->RSS_queues)
21859 + adapter->RSS_queues = min_t(u32, opt.arg.r.max, num_online_cpus());
21862 +#ifdef module_param_array
21864 + adapter->RSS_queues = opt.def;
21868 + { /* QueuePairs - Enable TX/RX queue pairs for interrupt handling */
21869 + struct igb_option opt = {
21870 + .type = enable_option,
21871 + .name = "QueuePairs - TX/RX queue pairs for interrupt handling",
21872 + .err = "defaulting to Enabled",
21873 + .def = OPTION_ENABLED
21876 +#ifdef module_param_array
21877 + if (num_QueuePairs > bd) {
21879 + unsigned int qp = QueuePairs[bd];
21881 + * we must enable queue pairs if the number of queues
21882 + * exceeds the number of avaialble interrupts. We are
21883 + * limited to 10, or 3 per unallocated vf.
21885 + if ((adapter->RSS_queues > 4) ||
21886 + (adapter->VMDQ_queues > 4) ||
21887 + ((adapter->RSS_queues > 1) &&
21888 + ((adapter->VMDQ_queues > 3) ||
21889 + (adapter->vfs_allocated_count > 6)))) {
21890 + if (qp == OPTION_DISABLED) {
21891 + qp = OPTION_ENABLED;
21892 + DPRINTK(PROBE, INFO,
21893 + "Number of queues exceeds available interrupts, %s\n",opt.err);
21896 + igb_validate_option(&qp, &opt, adapter);
21897 + adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0;
21899 +#ifdef module_param_array
21901 + adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0;
21907 Index: linux-2.6.22/drivers/net/igb/igb_regtest.h
21908 ===================================================================
21909 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
21910 +++ linux-2.6.22/drivers/net/igb/igb_regtest.h 2009-12-18 12:39:22.000000000 -0500
21912 +/*******************************************************************************
21914 + Intel(R) Gigabit Ethernet Linux driver
21915 + Copyright(c) 2007-2009 Intel Corporation.
21917 + This program is free software; you can redistribute it and/or modify it
21918 + under the terms and conditions of the GNU General Public License,
21919 + version 2, as published by the Free Software Foundation.
21921 + This program is distributed in the hope it will be useful, but WITHOUT
21922 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21923 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21926 + You should have received a copy of the GNU General Public License along with
21927 + this program; if not, write to the Free Software Foundation, Inc.,
21928 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21930 + The full GNU General Public License is included in this distribution in
21931 + the file called "COPYING".
21933 + Contact Information:
21934 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21935 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21937 +*******************************************************************************/
21939 +/* ethtool register test data */
21940 +struct igb_reg_test {
21949 +/* In the hardware, registers are laid out either singly, in arrays
21950 + * spaced 0x100 bytes apart, or in contiguous tables. We assume
21951 + * most tests take place on arrays or single registers (handled
21952 + * as a single-element array) and special-case the tables.
21953 + * Table tests are always pattern tests.
21955 + * We also make provision for some required setup steps by specifying
21956 + * registers to be written without any read-back testing.
21959 +#define PATTERN_TEST 1
21960 +#define SET_READ_TEST 2
21961 +#define WRITE_NO_TEST 3
21962 +#define TABLE32_TEST 4
21963 +#define TABLE64_TEST_LO 5
21964 +#define TABLE64_TEST_HI 6
21966 +/* 82576 reg test */
21967 +static struct igb_reg_test reg_test_82576[] = {
21968 + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21969 + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
21970 + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
21971 + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21972 + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
21973 + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21974 + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
21975 + { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
21976 + { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21977 + { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
21978 + /* Enable all queues before testing. */
21979 + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
21980 + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
21981 + /* RDH is read-only for 82576, only test RDT. */
21982 + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
21983 + { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
21984 + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
21985 + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
21986 + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
21987 + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
21988 + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
21989 + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
21990 + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21991 + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
21992 + { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
21993 + { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21994 + { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
21995 + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
21996 + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
21997 + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
21998 + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
21999 + { E1000_RA, 0, 16, TABLE64_TEST_LO,
22000 + 0xFFFFFFFF, 0xFFFFFFFF },
22001 + { E1000_RA, 0, 16, TABLE64_TEST_HI,
22002 + 0x83FFFFFF, 0xFFFFFFFF },
22003 + { E1000_RA2, 0, 8, TABLE64_TEST_LO,
22004 + 0xFFFFFFFF, 0xFFFFFFFF },
22005 + { E1000_RA2, 0, 8, TABLE64_TEST_HI,
22006 + 0x83FFFFFF, 0xFFFFFFFF },
22007 + { E1000_MTA, 0, 128, TABLE32_TEST,
22008 + 0xFFFFFFFF, 0xFFFFFFFF },
22012 +/* 82575 register test */
22013 +static struct igb_reg_test reg_test_82575[] = {
22014 + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
22015 + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
22016 + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
22017 + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
22018 + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
22019 + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
22020 + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
22021 + /* Enable all four RX queues before testing. */
22022 + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
22023 + /* RDH is read-only for 82575, only test RDT. */
22024 + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
22025 + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
22026 + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
22027 + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
22028 + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
22029 + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
22030 + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
22031 + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
22032 + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
22033 + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
22034 + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
22035 + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
22036 + { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
22037 + { E1000_RA, 0, 16, TABLE64_TEST_LO,
22038 + 0xFFFFFFFF, 0xFFFFFFFF },
22039 + { E1000_RA, 0, 16, TABLE64_TEST_HI,
22040 + 0x800FFFFF, 0xFFFFFFFF },
22041 + { E1000_MTA, 0, 128, TABLE32_TEST,
22042 + 0xFFFFFFFF, 0xFFFFFFFF },
22047 Index: linux-2.6.22/drivers/net/igb/kcompat.c
22048 ===================================================================
22049 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
22050 +++ linux-2.6.22/drivers/net/igb/kcompat.c 2009-12-18 12:39:22.000000000 -0500
22052 +/*******************************************************************************
22054 + Intel(R) Gigabit Ethernet Linux driver
22055 + Copyright(c) 2007-2009 Intel Corporation.
22057 + This program is free software; you can redistribute it and/or modify it
22058 + under the terms and conditions of the GNU General Public License,
22059 + version 2, as published by the Free Software Foundation.
22061 + This program is distributed in the hope it will be useful, but WITHOUT
22062 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22063 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22066 + You should have received a copy of the GNU General Public License along with
22067 + this program; if not, write to the Free Software Foundation, Inc.,
22068 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22070 + The full GNU General Public License is included in this distribution in
22071 + the file called "COPYING".
22073 + Contact Information:
22074 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22075 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22077 +*******************************************************************************/
22080 +#include "kcompat.h"
22082 +/*****************************************************************************/
22083 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) )
22085 +_kc_skb_pad(struct sk_buff *skb, int pad)
22087 + struct sk_buff *nskb;
22089 + /* If the skbuff is non linear tailroom is always zero.. */
22090 + if(skb_tailroom(skb) >= pad)
22092 + memset(skb->data+skb->len, 0, pad);
22096 + nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
22099 + memset(nskb->data+nskb->len, 0, pad);
22102 +#endif /* < 2.4.21 */
22104 +/*****************************************************************************/
22105 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
22107 +/**************************************/
22108 +/* PCI DMA MAPPING */
22110 +#if defined(CONFIG_HIGHMEM)
22112 +#ifndef PCI_DRAM_OFFSET
22113 +#define PCI_DRAM_OFFSET 0
22117 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
22118 + size_t size, int direction)
22120 + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
22121 + PCI_DRAM_OFFSET);
22124 +#else /* CONFIG_HIGHMEM */
22127 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
22128 + size_t size, int direction)
22130 + return pci_map_single(dev, (void *)page_address(page) + offset, size,
22134 +#endif /* CONFIG_HIGHMEM */
22137 +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
22140 + return pci_unmap_single(dev, dma_addr, size, direction);
22143 +#endif /* 2.4.13 => 2.4.3 */
22145 +/*****************************************************************************/
22146 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
22148 +/**************************************/
22149 +/* PCI DRIVER API */
22152 +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
22154 + if (!pci_dma_supported(dev, mask))
22156 + dev->dma_mask = mask;
22161 +_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
22165 + for (i = 0; i < 6; i++) {
22166 + if (pci_resource_len(dev, i) == 0)
22169 + if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
22170 + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
22171 + pci_release_regions(dev);
22174 + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
22175 + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
22176 + pci_release_regions(dev);
22185 +_kc_pci_release_regions(struct pci_dev *dev)
22189 + for (i = 0; i < 6; i++) {
22190 + if (pci_resource_len(dev, i) == 0)
22193 + if (pci_resource_flags(dev, i) & IORESOURCE_IO)
22194 + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
22196 + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
22197 + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
22201 +/**************************************/
22202 +/* NETWORK DRIVER API */
22204 +struct net_device *
22205 +_kc_alloc_etherdev(int sizeof_priv)
22207 + struct net_device *dev;
22210 + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
22211 + dev = kmalloc(alloc_size, GFP_KERNEL);
22214 + memset(dev, 0, alloc_size);
22217 + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
22218 + dev->name[0] = '\0';
22219 + ether_setup(dev);
22225 +_kc_is_valid_ether_addr(u8 *addr)
22227 + const char zaddr[6] = { 0, };
22229 + return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
22232 +#endif /* 2.4.3 => 2.4.0 */
22234 +/*****************************************************************************/
22235 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
22238 +_kc_pci_set_power_state(struct pci_dev *dev, int state)
22244 +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
22249 +#endif /* 2.4.6 => 2.4.3 */
22251 +/*****************************************************************************/
22252 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
22253 +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
22254 + int off, int size)
22256 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
22257 + frag->page = page;
22258 + frag->page_offset = off;
22259 + frag->size = size;
22260 + skb_shinfo(skb)->nr_frags = i + 1;
22264 + * Original Copyright:
22265 + * find_next_bit.c: fallback find next bit implementation
22267 + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
22268 + * Written by David Howells (dhowells@redhat.com)
22272 + * find_next_bit - find the next set bit in a memory region
22273 + * @addr: The address to base the search on
22274 + * @offset: The bitnumber to start searching at
22275 + * @size: The maximum size to search
22277 +unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
22278 + unsigned long offset)
22280 + const unsigned long *p = addr + BITOP_WORD(offset);
22281 + unsigned long result = offset & ~(BITS_PER_LONG-1);
22282 + unsigned long tmp;
22284 + if (offset >= size)
22287 + offset %= BITS_PER_LONG;
22290 + tmp &= (~0UL << offset);
22291 + if (size < BITS_PER_LONG)
22292 + goto found_first;
22294 + goto found_middle;
22295 + size -= BITS_PER_LONG;
22296 + result += BITS_PER_LONG;
22298 + while (size & ~(BITS_PER_LONG-1)) {
22299 + if ((tmp = *(p++)))
22300 + goto found_middle;
22301 + result += BITS_PER_LONG;
22302 + size -= BITS_PER_LONG;
22309 + tmp &= (~0UL >> (BITS_PER_LONG - size));
22310 + if (tmp == 0UL) /* Are any bits set? */
22311 + return result + size; /* Nope. */
22313 + return result + ffs(tmp);
22316 +#endif /* 2.6.0 => 2.4.6 */
22318 +/*****************************************************************************/
22319 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
22320 +void *_kc_kzalloc(size_t size, int flags)
22322 + void *ret = kmalloc(size, flags);
22324 + memset(ret, 0, size);
22327 +#endif /* <= 2.6.13 */
22329 +/*****************************************************************************/
22330 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
22331 +struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
22332 + unsigned int length)
22334 + /* 16 == NET_PAD_SKB */
22335 + struct sk_buff *skb;
22336 + skb = alloc_skb(length + 16, GFP_ATOMIC);
22337 + if (likely(skb != NULL)) {
22338 + skb_reserve(skb, 16);
22343 +#endif /* <= 2.6.17 */
22345 +/*****************************************************************************/
22346 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
22347 +int _kc_pci_save_state(struct pci_dev *pdev)
22349 + struct net_device *netdev = pci_get_drvdata(pdev);
22350 + struct adapter_struct *adapter = netdev_priv(netdev);
22351 + int size = PCI_CONFIG_SPACE_LEN, i;
22352 + u16 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
22353 + u16 pcie_link_status;
22355 + if (pcie_cap_offset) {
22356 + if (!pci_read_config_word(pdev,
22357 + pcie_cap_offset + PCIE_LINK_STATUS,
22358 + &pcie_link_status))
22359 + size = PCIE_CONFIG_SPACE_LEN;
22361 + pci_config_space_ich8lan();
22362 +#ifdef HAVE_PCI_ERS
22363 + if (adapter->config_space == NULL)
22365 + WARN_ON(adapter->config_space != NULL);
22367 + adapter->config_space = kmalloc(size, GFP_KERNEL);
22368 + if (!adapter->config_space) {
22369 + printk(KERN_ERR "Out of memory in pci_save_state\n");
22372 + for (i = 0; i < (size / 4); i++)
22373 + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
22377 +void _kc_pci_restore_state(struct pci_dev * pdev)
22379 + struct net_device *netdev = pci_get_drvdata(pdev);
22380 + struct adapter_struct *adapter = netdev_priv(netdev);
22381 + int size = PCI_CONFIG_SPACE_LEN, i;
22382 + u16 pcie_cap_offset;
22383 + u16 pcie_link_status;
22385 + if (adapter->config_space != NULL) {
22386 + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
22387 + if (pcie_cap_offset &&
22388 + !pci_read_config_word(pdev,
22389 + pcie_cap_offset + PCIE_LINK_STATUS,
22390 + &pcie_link_status))
22391 + size = PCIE_CONFIG_SPACE_LEN;
22393 + pci_config_space_ich8lan();
22394 + for (i = 0; i < (size / 4); i++)
22395 + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
22396 +#ifndef HAVE_PCI_ERS
22397 + kfree(adapter->config_space);
22398 + adapter->config_space = NULL;
22403 +#ifdef HAVE_PCI_ERS
22404 +void _kc_free_netdev(struct net_device *netdev)
22406 + struct adapter_struct *adapter = netdev_priv(netdev);
22408 + if (adapter->config_space != NULL)
22409 + kfree(adapter->config_space);
22410 +#ifdef CONFIG_SYSFS
22411 + if (netdev->reg_state == NETREG_UNINITIALIZED) {
22412 + kfree((char *)netdev - netdev->padded);
22414 + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
22415 + netdev->reg_state = NETREG_RELEASED;
22416 + class_device_put(&netdev->class_dev);
22419 + kfree((char *)netdev - netdev->padded);
22423 +#endif /* <= 2.6.18 */
22425 +/*****************************************************************************/
22426 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
22428 +struct net_device *napi_to_poll_dev(struct napi_struct *napi)
22430 + struct adapter_q_vector *q_vector = container_of(napi,
22431 + struct adapter_q_vector,
22433 + return &q_vector->poll_dev;
22436 +int __kc_adapter_clean(struct net_device *netdev, int *budget)
22439 + int work_to_do = min(*budget, netdev->quota);
22440 + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
22441 + struct napi_struct *napi = netdev->priv;
22442 + work_done = napi->poll(napi, work_to_do);
22443 + *budget -= work_done;
22444 + netdev->quota -= work_done;
22445 + return (work_done >= work_to_do) ? 1 : 0;
22448 +#endif /* <= 2.6.24 */
22450 +/*****************************************************************************/
22451 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
22453 +void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
22455 + struct adapter_struct *adapter = netdev_priv(netdev);
22458 + netif_stop_queue(netdev);
22459 + if (netif_is_multiqueue(netdev))
22460 + for (i = 0; i < adapter->num_tx_queues; i++)
22461 + netif_stop_subqueue(netdev, i);
22463 +void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
22465 + struct adapter_struct *adapter = netdev_priv(netdev);
22468 + netif_wake_queue(netdev);
22469 + if (netif_is_multiqueue(netdev))
22470 + for (i = 0; i < adapter->num_tx_queues; i++)
22471 + netif_wake_subqueue(netdev, i);
22473 +void _kc_netif_tx_start_all_queues(struct net_device *netdev)
22475 + struct adapter_struct *adapter = netdev_priv(netdev);
22478 + netif_start_queue(netdev);
22479 + if (netif_is_multiqueue(netdev))
22480 + for (i = 0; i < adapter->num_tx_queues; i++)
22481 + netif_start_subqueue(netdev, i);
22483 +#endif /* HAVE_TX_MQ */
22484 +#endif /* < 2.6.27 */
22486 +/*****************************************************************************/
22487 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
22490 +_kc_pci_prepare_to_sleep(struct pci_dev *dev)
22492 + pci_power_t target_state;
22495 + target_state = pci_choose_state(dev, PMSG_SUSPEND);
22497 + pci_enable_wake(dev, target_state, true);
22499 + error = pci_set_power_state(dev, target_state);
22502 + pci_enable_wake(dev, target_state, false);
22508 +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
22512 + err = pci_enable_wake(dev, PCI_D3cold, enable);
22516 + err = pci_enable_wake(dev, PCI_D3hot, enable);
22521 +#endif /* < 2.6.28 */
22523 +/*****************************************************************************/
22524 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
22525 +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
22527 + struct pci_dev *parent = pdev->bus->self;
22534 + pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
22536 + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
22537 + link_state &= ~state;
22538 + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
22541 +#endif /* < 2.6.29 */
22543 +/*****************************************************************************/
22544 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
22545 +#ifdef HAVE_NETDEV_SELECT_QUEUE
22546 +#include <net/ip.h>
22547 +static u32 _kc_simple_tx_hashrnd;
22548 +static u32 _kc_simple_tx_hashrnd_initialized;
22550 +u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
22552 + u32 addr1, addr2, ports;
22556 + if (unlikely(!_kc_simple_tx_hashrnd_initialized)) {
22557 + get_random_bytes(&_kc_simple_tx_hashrnd, 4);
22558 + _kc_simple_tx_hashrnd_initialized = 1;
22561 + switch (skb->protocol) {
22562 + case htons(ETH_P_IP):
22563 + if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
22564 + ip_proto = ip_hdr(skb)->protocol;
22565 + addr1 = ip_hdr(skb)->saddr;
22566 + addr2 = ip_hdr(skb)->daddr;
22567 + ihl = ip_hdr(skb)->ihl;
22569 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
22570 + case htons(ETH_P_IPV6):
22571 + ip_proto = ipv6_hdr(skb)->nexthdr;
22572 + addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
22573 + addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
22582 + switch (ip_proto) {
22583 + case IPPROTO_TCP:
22584 + case IPPROTO_UDP:
22585 + case IPPROTO_DCCP:
22586 + case IPPROTO_ESP:
22588 + case IPPROTO_SCTP:
22589 + case IPPROTO_UDPLITE:
22590 + ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
22598 + hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd);
22600 + return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
22602 +#endif /* HAVE_NETDEV_SELECT_QUEUE */
22603 +#endif /* < 2.6.30 */
22604 Index: linux-2.6.22/drivers/net/igb/kcompat.h
22605 ===================================================================
22606 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
22607 +++ linux-2.6.22/drivers/net/igb/kcompat.h 2009-12-18 12:39:22.000000000 -0500
22609 +/*******************************************************************************
22611 + Intel(R) Gigabit Ethernet Linux driver
22612 + Copyright(c) 2007-2009 Intel Corporation.
22614 + This program is free software; you can redistribute it and/or modify it
22615 + under the terms and conditions of the GNU General Public License,
22616 + version 2, as published by the Free Software Foundation.
22618 + This program is distributed in the hope it will be useful, but WITHOUT
22619 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22620 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22623 + You should have received a copy of the GNU General Public License along with
22624 + this program; if not, write to the Free Software Foundation, Inc.,
22625 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22627 + The full GNU General Public License is included in this distribution in
22628 + the file called "COPYING".
22630 + Contact Information:
22631 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22632 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22634 +*******************************************************************************/
22636 +#ifndef _KCOMPAT_H_
22637 +#define _KCOMPAT_H_
22639 +#include <linux/version.h>
22640 +#include <linux/init.h>
22641 +#include <linux/types.h>
22642 +#include <linux/errno.h>
22643 +#include <linux/module.h>
22644 +#include <linux/pci.h>
22645 +#include <linux/netdevice.h>
22646 +#include <linux/etherdevice.h>
22647 +#include <linux/skbuff.h>
22648 +#include <linux/ioport.h>
22649 +#include <linux/slab.h>
22650 +#include <linux/list.h>
22651 +#include <linux/delay.h>
22652 +#include <linux/sched.h>
22653 +#include <linux/in.h>
22654 +#include <linux/ip.h>
22655 +#include <linux/udp.h>
22656 +#include <linux/mii.h>
22657 +#include <asm/io.h>
22659 +/* NAPI enable/disable flags here */
22662 +#define adapter_struct igb_adapter
22663 +#define adapter_q_vector igb_q_vector
22666 +/* and finally set defines so that the code sees the changes */
22671 +/* packet split disable/enable */
22672 +#ifdef DISABLE_PACKET_SPLIT
22673 +#undef CONFIG_E1000_DISABLE_PACKET_SPLIT
22674 +#define CONFIG_E1000_DISABLE_PACKET_SPLIT
22675 +#undef CONFIG_IGB_DISABLE_PACKET_SPLIT
22676 +#define CONFIG_IGB_DISABLE_PACKET_SPLIT
22679 +/* MSI compatibility code for all kernels and drivers */
22680 +#ifdef DISABLE_PCI_MSI
22681 +#undef CONFIG_PCI_MSI
22683 +#ifndef CONFIG_PCI_MSI
22684 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
22685 +struct msix_entry {
22686 + u16 vector; /* kernel uses to write allocated vector */
22687 + u16 entry; /* driver uses to specify entry, OS writes */
22690 +#define pci_enable_msi(a) -ENOTSUPP
22691 +#define pci_disable_msi(a) do {} while (0)
22692 +#define pci_enable_msix(a, b, c) -ENOTSUPP
22693 +#define pci_disable_msix(a) do {} while (0)
22694 +#define msi_remove_pci_irq_vectors(a) do {} while (0)
22695 +#endif /* CONFIG_PCI_MSI */
22700 +#ifdef DISABLE_NET_POLL_CONTROLLER
22701 +#undef CONFIG_NET_POLL_CONTROLLER
22704 +#ifndef PMSG_SUSPEND
22705 +#define PMSG_SUSPEND 3
22708 +/* generic boolean compatibility */
22712 +#define FALSE false
22713 +#ifdef GCC_VERSION
22714 +#if ( GCC_VERSION < 3000 )
22715 +#define _Bool char
22718 +#define _Bool char
22721 +#define bool _Bool
22727 +#ifndef module_param
22728 +#define module_param(v,t,p) MODULE_PARM(v, "i");
22731 +#ifndef DMA_64BIT_MASK
22732 +#define DMA_64BIT_MASK 0xffffffffffffffffULL
22735 +#ifndef DMA_32BIT_MASK
22736 +#define DMA_32BIT_MASK 0x00000000ffffffffULL
22739 +#ifndef PCI_CAP_ID_EXP
22740 +#define PCI_CAP_ID_EXP 0x10
22743 +#ifndef PCIE_LINK_STATE_L0S
22744 +#define PCIE_LINK_STATE_L0S 1
22748 +#ifdef CONFIG_IA64
22749 +#define mmiowb() asm volatile ("mf.a" ::: "memory")
22755 +#ifndef SET_NETDEV_DEV
22756 +#define SET_NETDEV_DEV(net, pdev)
22759 +#ifndef HAVE_FREE_NETDEV
22760 +#define free_netdev(x) kfree(x)
22763 +#ifdef HAVE_POLL_CONTROLLER
22764 +#define CONFIG_NET_POLL_CONTROLLER
22767 +#ifndef NETDEV_TX_OK
22768 +#define NETDEV_TX_OK 0
22771 +#ifndef NETDEV_TX_BUSY
22772 +#define NETDEV_TX_BUSY 1
22775 +#ifndef NETDEV_TX_LOCKED
22776 +#define NETDEV_TX_LOCKED -1
22779 +#ifdef CONFIG_PCI_IOV
22780 +#define VMDQ_P(p) ((p) + adapter->num_vfs)
22782 +#define VMDQ_P(p) (p)
22785 +#ifndef SKB_DATAREF_SHIFT
22786 +/* if we do not have the infrastructure to detect if skb_header is cloned
22787 + just return false in all cases */
22788 +#define skb_header_cloned(x) 0
22791 +#ifndef NETIF_F_GSO
22792 +#define gso_size tso_size
22793 +#define gso_segs tso_segs
22796 +#ifndef NETIF_F_GRO
22797 +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
22798 + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
22799 +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
22802 +#ifndef NETIF_F_SCTP_CSUM
22803 +#define NETIF_F_SCTP_CSUM 0
22806 +#ifndef IPPROTO_SCTP
22807 +#define IPPROTO_SCTP 132
22810 +#ifndef CHECKSUM_PARTIAL
22811 +#define CHECKSUM_PARTIAL CHECKSUM_HW
22812 +#define CHECKSUM_COMPLETE CHECKSUM_HW
22815 +#ifndef __read_mostly
22816 +#define __read_mostly
22819 +#ifndef HAVE_NETIF_MSG
22820 +#define HAVE_NETIF_MSG 1
22822 + NETIF_MSG_DRV = 0x0001,
22823 + NETIF_MSG_PROBE = 0x0002,
22824 + NETIF_MSG_LINK = 0x0004,
22825 + NETIF_MSG_TIMER = 0x0008,
22826 + NETIF_MSG_IFDOWN = 0x0010,
22827 + NETIF_MSG_IFUP = 0x0020,
22828 + NETIF_MSG_RX_ERR = 0x0040,
22829 + NETIF_MSG_TX_ERR = 0x0080,
22830 + NETIF_MSG_TX_QUEUED = 0x0100,
22831 + NETIF_MSG_INTR = 0x0200,
22832 + NETIF_MSG_TX_DONE = 0x0400,
22833 + NETIF_MSG_RX_STATUS = 0x0800,
22834 + NETIF_MSG_PKTDATA = 0x1000,
22835 + NETIF_MSG_HW = 0x2000,
22836 + NETIF_MSG_WOL = 0x4000,
22840 +#define NETIF_MSG_HW 0x2000
22841 +#define NETIF_MSG_WOL 0x4000
22842 +#endif /* HAVE_NETIF_MSG */
22845 +#define MII_RESV1 0x17 /* Reserved... */
22849 +#define unlikely(_x) _x
22850 +#define likely(_x) _x
22854 +#define WARN_ON(x)
22857 +#ifndef PCI_DEVICE
22858 +#define PCI_DEVICE(vend,dev) \
22859 + .vendor = (vend), .device = (dev), \
22860 + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
22863 +#ifndef num_online_cpus
22864 +#define num_online_cpus() smp_num_cpus
22868 +#ifndef _LINUX_RANDOM_H
22869 +#include <linux/random.h>
22872 +#ifndef DECLARE_BITMAP
22873 +#ifndef BITS_TO_LONGS
22874 +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
22876 +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
22880 +#define VLAN_HLEN 4
22883 +#ifndef VLAN_ETH_HLEN
22884 +#define VLAN_ETH_HLEN 18
22887 +#ifndef VLAN_ETH_FRAME_LEN
22888 +#define VLAN_ETH_FRAME_LEN 1518
22891 +#ifndef DCA_GET_TAG_TWO_ARGS
22892 +#define dca3_get_tag(a,b) dca_get_tag(b)
22895 +/*****************************************************************************/
22896 +/* Installations with ethtool version without eeprom, adapter id, or statistics
22899 +#ifndef ETH_GSTRING_LEN
22900 +#define ETH_GSTRING_LEN 32
22903 +#ifndef ETHTOOL_GSTATS
22904 +#define ETHTOOL_GSTATS 0x1d
22905 +#undef ethtool_drvinfo
22906 +#define ethtool_drvinfo k_ethtool_drvinfo
22907 +struct k_ethtool_drvinfo {
22910 + char version[32];
22911 + char fw_version[32];
22912 + char bus_info[32];
22913 + char reserved1[32];
22914 + char reserved2[16];
22916 + u32 testinfo_len;
22921 +struct ethtool_stats {
22926 +#endif /* ETHTOOL_GSTATS */
22928 +#ifndef ETHTOOL_PHYS_ID
22929 +#define ETHTOOL_PHYS_ID 0x1c
22930 +#endif /* ETHTOOL_PHYS_ID */
22932 +#ifndef ETHTOOL_GSTRINGS
22933 +#define ETHTOOL_GSTRINGS 0x1b
22934 +enum ethtool_stringset {
22938 +struct ethtool_gstrings {
22939 + u32 cmd; /* ETHTOOL_GSTRINGS */
22940 + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
22941 + u32 len; /* number of strings in the string set */
22944 +#endif /* ETHTOOL_GSTRINGS */
22946 +#ifndef ETHTOOL_TEST
22947 +#define ETHTOOL_TEST 0x1a
22948 +enum ethtool_test_flags {
22949 + ETH_TEST_FL_OFFLINE = (1 << 0),
22950 + ETH_TEST_FL_FAILED = (1 << 1),
22952 +struct ethtool_test {
22959 +#endif /* ETHTOOL_TEST */
22961 +#ifndef ETHTOOL_GEEPROM
22962 +#define ETHTOOL_GEEPROM 0xb
22963 +#undef ETHTOOL_GREGS
22964 +struct ethtool_eeprom {
22972 +struct ethtool_value {
22976 +#endif /* ETHTOOL_GEEPROM */
22978 +#ifndef ETHTOOL_GLINK
22979 +#define ETHTOOL_GLINK 0xa
22980 +#endif /* ETHTOOL_GLINK */
22982 +#ifndef ETHTOOL_GREGS
22983 +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
22984 +#define ethtool_regs _kc_ethtool_regs
22985 +/* for passing big chunks of data */
22986 +struct _kc_ethtool_regs {
22988 + u32 version; /* driver-specific, indicates different chips/revs */
22989 + u32 len; /* bytes */
22992 +#endif /* ETHTOOL_GREGS */
22994 +#ifndef ETHTOOL_GMSGLVL
22995 +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
22997 +#ifndef ETHTOOL_SMSGLVL
22998 +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
23000 +#ifndef ETHTOOL_NWAY_RST
23001 +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
23003 +#ifndef ETHTOOL_GLINK
23004 +#define ETHTOOL_GLINK 0x0000000a /* Get link status */
23006 +#ifndef ETHTOOL_GEEPROM
23007 +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
23009 +#ifndef ETHTOOL_SEEPROM
23010 +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
23012 +#ifndef ETHTOOL_GCOALESCE
23013 +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
23014 +/* for configuring coalescing parameters of chip */
23015 +#define ethtool_coalesce _kc_ethtool_coalesce
23016 +struct _kc_ethtool_coalesce {
23017 + u32 cmd; /* ETHTOOL_{G,S}COALESCE */
23019 + /* How many usecs to delay an RX interrupt after
23020 + * a packet arrives. If 0, only rx_max_coalesced_frames
23023 + u32 rx_coalesce_usecs;
23025 + /* How many packets to delay an RX interrupt after
23026 + * a packet arrives. If 0, only rx_coalesce_usecs is
23027 + * used. It is illegal to set both usecs and max frames
23028 + * to zero as this would cause RX interrupts to never be
23031 + u32 rx_max_coalesced_frames;
23033 + /* Same as above two parameters, except that these values
23034 + * apply while an IRQ is being serviced by the host. Not
23035 + * all cards support this feature and the values are ignored
23038 + u32 rx_coalesce_usecs_irq;
23039 + u32 rx_max_coalesced_frames_irq;
23041 + /* How many usecs to delay a TX interrupt after
23042 + * a packet is sent. If 0, only tx_max_coalesced_frames
23045 + u32 tx_coalesce_usecs;
23047 + /* How many packets to delay a TX interrupt after
23048 + * a packet is sent. If 0, only tx_coalesce_usecs is
23049 + * used. It is illegal to set both usecs and max frames
23050 + * to zero as this would cause TX interrupts to never be
23053 + u32 tx_max_coalesced_frames;
23055 + /* Same as above two parameters, except that these values
23056 + * apply while an IRQ is being serviced by the host. Not
23057 + * all cards support this feature and the values are ignored
23060 + u32 tx_coalesce_usecs_irq;
23061 + u32 tx_max_coalesced_frames_irq;
23063 + /* How many usecs to delay in-memory statistics
23064 + * block updates. Some drivers do not have an in-memory
23065 + * statistic block, and in such cases this value is ignored.
23066 + * This value must not be zero.
23068 + u32 stats_block_coalesce_usecs;
23070 + /* Adaptive RX/TX coalescing is an algorithm implemented by
23071 + * some drivers to improve latency under low packet rates and
23072 + * improve throughput under high packet rates. Some drivers
23073 + * only implement one of RX or TX adaptive coalescing. Anything
23074 + * not implemented by the driver causes these values to be
23075 + * silently ignored.
23077 + u32 use_adaptive_rx_coalesce;
23078 + u32 use_adaptive_tx_coalesce;
23080 + /* When the packet rate (measured in packets per second)
23081 + * is below pkt_rate_low, the {rx,tx}_*_low parameters are
23084 + u32 pkt_rate_low;
23085 + u32 rx_coalesce_usecs_low;
23086 + u32 rx_max_coalesced_frames_low;
23087 + u32 tx_coalesce_usecs_low;
23088 + u32 tx_max_coalesced_frames_low;
23090 + /* When the packet rate is below pkt_rate_high but above
23091 + * pkt_rate_low (both measured in packets per second) the
23092 + * normal {rx,tx}_* coalescing parameters are used.
23095 + /* When the packet rate is (measured in packets per second)
23096 + * is above pkt_rate_high, the {rx,tx}_*_high parameters are
23099 + u32 pkt_rate_high;
23100 + u32 rx_coalesce_usecs_high;
23101 + u32 rx_max_coalesced_frames_high;
23102 + u32 tx_coalesce_usecs_high;
23103 + u32 tx_max_coalesced_frames_high;
23105 + /* How often to do adaptive coalescing packet rate sampling,
23106 + * measured in seconds. Must not be zero.
23108 + u32 rate_sample_interval;
23110 +#endif /* ETHTOOL_GCOALESCE */
23112 +#ifndef ETHTOOL_SCOALESCE
23113 +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
23115 +#ifndef ETHTOOL_GRINGPARAM
23116 +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
23117 +/* for configuring RX/TX ring parameters */
23118 +#define ethtool_ringparam _kc_ethtool_ringparam
23119 +struct _kc_ethtool_ringparam {
23120 + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
23122 + /* Read only attributes. These indicate the maximum number
23123 + * of pending RX/TX ring entries the driver will allow the
23126 + u32 rx_max_pending;
23127 + u32 rx_mini_max_pending;
23128 + u32 rx_jumbo_max_pending;
23129 + u32 tx_max_pending;
23131 + /* Values changeable by the user. The valid values are
23132 + * in the range 1 to the "*_max_pending" counterpart above.
23135 + u32 rx_mini_pending;
23136 + u32 rx_jumbo_pending;
23139 +#endif /* ETHTOOL_GRINGPARAM */
23141 +#ifndef ETHTOOL_SRINGPARAM
23142 +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
23144 +#ifndef ETHTOOL_GPAUSEPARAM
23145 +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
23146 +/* for configuring link flow control parameters */
23147 +#define ethtool_pauseparam _kc_ethtool_pauseparam
23148 +struct _kc_ethtool_pauseparam {
23149 + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
23151 + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
23152 + * being true) the user may set 'autoneg' here non-zero to have the
23153 + * pause parameters be auto-negotiated too. In such a case, the
23154 + * {rx,tx}_pause values below determine what capabilities are
23157 + * If 'autoneg' is zero or the link is not being auto-negotiated,
23158 + * then {rx,tx}_pause force the driver to use/not-use pause
23165 +#endif /* ETHTOOL_GPAUSEPARAM */
23167 +#ifndef ETHTOOL_SPAUSEPARAM
23168 +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
23170 +#ifndef ETHTOOL_GRXCSUM
23171 +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
23173 +#ifndef ETHTOOL_SRXCSUM
23174 +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
23176 +#ifndef ETHTOOL_GTXCSUM
23177 +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
23179 +#ifndef ETHTOOL_STXCSUM
23180 +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
23182 +#ifndef ETHTOOL_GSG
23183 +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
23184 + * (ethtool_value) */
23186 +#ifndef ETHTOOL_SSG
23187 +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
23188 + * (ethtool_value). */
23190 +#ifndef ETHTOOL_TEST
23191 +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
23193 +#ifndef ETHTOOL_GSTRINGS
23194 +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
23196 +#ifndef ETHTOOL_PHYS_ID
23197 +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
23199 +#ifndef ETHTOOL_GSTATS
23200 +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
23202 +#ifndef ETHTOOL_GTSO
23203 +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
23205 +#ifndef ETHTOOL_STSO
23206 +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
23209 +#ifndef ETHTOOL_BUSINFO_LEN
23210 +#define ETHTOOL_BUSINFO_LEN 32
23213 +/*****************************************************************************/
23214 +/* 2.4.3 => 2.4.0 */
23215 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
23217 +/**************************************/
23218 +/* PCI DRIVER API */
23220 +#ifndef pci_set_dma_mask
23221 +#define pci_set_dma_mask _kc_pci_set_dma_mask
23222 +extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
23225 +#ifndef pci_request_regions
23226 +#define pci_request_regions _kc_pci_request_regions
23227 +extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
23230 +#ifndef pci_release_regions
23231 +#define pci_release_regions _kc_pci_release_regions
23232 +extern void _kc_pci_release_regions(struct pci_dev *pdev);
23235 +/**************************************/
23236 +/* NETWORK DRIVER API */
23238 +#ifndef alloc_etherdev
23239 +#define alloc_etherdev _kc_alloc_etherdev
23240 +extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
23243 +#ifndef is_valid_ether_addr
23244 +#define is_valid_ether_addr _kc_is_valid_ether_addr
23245 +extern int _kc_is_valid_ether_addr(u8 *addr);
23248 +/**************************************/
23249 +/* MISCELLANEOUS */
23251 +#ifndef INIT_TQUEUE
23252 +#define INIT_TQUEUE(_tq, _routine, _data) \
23254 + INIT_LIST_HEAD(&(_tq)->list); \
23255 + (_tq)->sync = 0; \
23256 + (_tq)->routine = _routine; \
23257 + (_tq)->data = _data; \
23261 +#endif /* 2.4.3 => 2.4.0 */
23263 +/*****************************************************************************/
23264 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
23265 +/* Generic MII registers. */
23266 +#define MII_BMCR 0x00 /* Basic mode control register */
23267 +#define MII_BMSR 0x01 /* Basic mode status register */
23268 +#define MII_PHYSID1 0x02 /* PHYS ID 1 */
23269 +#define MII_PHYSID2 0x03 /* PHYS ID 2 */
23270 +#define MII_ADVERTISE 0x04 /* Advertisement control reg */
23271 +#define MII_LPA 0x05 /* Link partner ability reg */
23272 +#define MII_EXPANSION 0x06 /* Expansion register */
23273 +/* Basic mode control register. */
23274 +#define BMCR_FULLDPLX 0x0100 /* Full duplex */
23275 +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
23276 +/* Basic mode status register. */
23277 +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
23278 +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
23279 +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
23280 +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
23281 +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
23282 +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
23283 +/* Advertisement control register. */
23284 +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
23285 +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
23286 +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
23287 +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
23288 +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
23289 +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
23290 + ADVERTISE_100HALF | ADVERTISE_100FULL)
23291 +/* Expansion register for auto-negotiation. */
23292 +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
23295 +/*****************************************************************************/
23296 +/* 2.4.6 => 2.4.3 */
23297 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
23299 +#ifndef pci_set_power_state
23300 +#define pci_set_power_state _kc_pci_set_power_state
23301 +extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
23304 +#ifndef pci_enable_wake
23305 +#define pci_enable_wake _kc_pci_enable_wake
23306 +extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
23309 +#ifndef pci_disable_device
23310 +#define pci_disable_device _kc_pci_disable_device
23311 +extern void _kc_pci_disable_device(struct pci_dev *pdev);
23314 +/* PCI PM entry point syntax changed, so don't support suspend/resume */
23317 +#endif /* 2.4.6 => 2.4.3 */
23319 +#ifndef HAVE_PCI_SET_MWI
23320 +#define pci_set_mwi(X) pci_write_config_word(X, \
23321 + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
23322 + PCI_COMMAND_INVALIDATE);
23323 +#define pci_clear_mwi(X) pci_write_config_word(X, \
23324 + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
23325 + ~PCI_COMMAND_INVALIDATE);
23328 +/*****************************************************************************/
23329 +/* 2.4.10 => 2.4.9 */
23330 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
23332 +/**************************************/
23335 +#ifndef MODULE_LICENSE
23336 + #define MODULE_LICENSE(X)
23339 +/**************************************/
23343 +#define min(x,y) ({ \
23344 + const typeof(x) _x = (x); \
23345 + const typeof(y) _y = (y); \
23346 + (void) (&_x == &_y); \
23347 + _x < _y ? _x : _y; })
23350 +#define max(x,y) ({ \
23351 + const typeof(x) _x = (x); \
23352 + const typeof(y) _y = (y); \
23353 + (void) (&_x == &_y); \
23354 + _x > _y ? _x : _y; })
23356 +#define min_t(type,x,y) ({ \
23359 + _x < _y ? _x : _y; })
23361 +#define max_t(type,x,y) ({ \
23364 + _x > _y ? _x : _y; })
23366 +#ifndef list_for_each_safe
23367 +#define list_for_each_safe(pos, n, head) \
23368 + for (pos = (head)->next, n = pos->next; pos != (head); \
23369 + pos = n, n = pos->next)
23372 +#endif /* 2.4.10 -> 2.4.6 */
23375 +/*****************************************************************************/
23376 +/* 2.4.13 => 2.4.10 */
23377 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
23379 +/**************************************/
23380 +/* PCI DMA MAPPING */
23382 +#ifndef virt_to_page
23383 + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
23386 +#ifndef pci_map_page
23387 +#define pci_map_page _kc_pci_map_page
23388 +extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
23391 +#ifndef pci_unmap_page
23392 +#define pci_unmap_page _kc_pci_unmap_page
23393 +extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
23396 +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
23398 +#undef DMA_32BIT_MASK
23399 +#define DMA_32BIT_MASK 0xffffffff
23400 +#undef DMA_64BIT_MASK
23401 +#define DMA_64BIT_MASK 0xffffffff
23403 +/**************************************/
23407 +#define cpu_relax() rep_nop()
23410 +struct vlan_ethhdr {
23411 + unsigned char h_dest[ETH_ALEN];
23412 + unsigned char h_source[ETH_ALEN];
23413 + unsigned short h_vlan_proto;
23414 + unsigned short h_vlan_TCI;
23415 + unsigned short h_vlan_encapsulated_proto;
23417 +#endif /* 2.4.13 => 2.4.10 */
23419 +/*****************************************************************************/
23420 +/* 2.4.17 => 2.4.12 */
23421 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
23423 +#ifndef __devexit_p
23424 + #define __devexit_p(x) &(x)
23427 +#endif /* 2.4.17 => 2.4.13 */
23429 +/*****************************************************************************/
23430 +/* 2.4.20 => 2.4.19 */
23431 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
23433 +/* we won't support NAPI on less than 2.4.20 */
23438 +#endif /* 2.4.20 => 2.4.19 */
23440 +/*****************************************************************************/
23442 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) )
23443 +#define skb_pad(x,y) _kc_skb_pad(x, y)
23444 +struct sk_buff * _kc_skb_pad(struct sk_buff *skb, int pad);
23445 +#endif /* < 2.4.21 */
23447 +/*****************************************************************************/
23448 +/* 2.4.22 => 2.4.17 */
23449 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
23450 +#define pci_name(x) ((x)->slot_name)
23453 +/*****************************************************************************/
23454 +/*****************************************************************************/
23455 +/* 2.4.23 => 2.4.22 */
23456 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
23457 +/*****************************************************************************/
23459 +#ifndef netif_poll_disable
23460 +#define netif_poll_disable(x) _kc_netif_poll_disable(x)
23461 +static inline void _kc_netif_poll_disable(struct net_device *netdev)
23463 + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
23465 + current->state = TASK_INTERRUPTIBLE;
23466 + schedule_timeout(1);
23470 +#ifndef netif_poll_enable
23471 +#define netif_poll_enable(x) _kc_netif_poll_enable(x)
23472 +static inline void _kc_netif_poll_enable(struct net_device *netdev)
23474 + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
23478 +#ifndef netif_tx_disable
23479 +#define netif_tx_disable(x) _kc_netif_tx_disable(x)
23480 +static inline void _kc_netif_tx_disable(struct net_device *dev)
23482 + spin_lock_bh(&dev->xmit_lock);
23483 + netif_stop_queue(dev);
23484 + spin_unlock_bh(&dev->xmit_lock);
23487 +#endif /* 2.4.23 => 2.4.22 */
23489 +/*****************************************************************************/
23490 +/* 2.6.4 => 2.6.0 */
23491 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
23492 + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
23493 + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
23494 +#define ETHTOOL_OPS_COMPAT
23495 +#endif /* 2.6.4 => 2.6.0 */
23497 +/*****************************************************************************/
23498 +/* 2.5.71 => 2.4.x */
23499 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
23500 +#define sk_protocol protocol
23501 +#define pci_get_device pci_find_device
23502 +#endif /* 2.5.70 => 2.4.x */
23504 +/*****************************************************************************/
23505 +/* < 2.4.27 or 2.6.0 <= 2.6.5 */
23506 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
23507 + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
23508 + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
23510 +#ifndef netif_msg_init
23511 +#define netif_msg_init _kc_netif_msg_init
23512 +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
23514 + /* use default */
23515 + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
23516 + return default_msg_enable_bits;
23517 + if (debug_value == 0) /* no output */
23519 + /* set low N bits */
23520 + return (1 << debug_value) -1;
23524 +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
23525 +/*****************************************************************************/
23526 +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
23527 + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
23528 + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
23529 +#define netdev_priv(x) x->priv
23532 +/*****************************************************************************/
23534 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
23535 +#undef pci_register_driver
23536 +#define pci_register_driver pci_module_init
23538 +#define dev_err(__unused_dev, format, arg...) \
23539 + printk(KERN_ERR "%s: " format, pci_name(pdev) , ## arg)
23540 +#define dev_warn(__unused_dev, format, arg...) \
23541 + printk(KERN_WARNING "%s: " format, pci_name(pdev) , ## arg)
23543 +/* hlist_* code - double linked lists */
23544 +struct hlist_head {
23545 + struct hlist_node *first;
23548 +struct hlist_node {
23549 + struct hlist_node *next, **pprev;
23552 +static inline void __hlist_del(struct hlist_node *n)
23554 + struct hlist_node *next = n->next;
23555 + struct hlist_node **pprev = n->pprev;
23558 + next->pprev = pprev;
23561 +static inline void hlist_del(struct hlist_node *n)
23568 +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
23570 + struct hlist_node *first = h->first;
23573 + first->pprev = &n->next;
23575 + n->pprev = &h->first;
23578 +static inline int hlist_empty(const struct hlist_head *h)
23580 + return !h->first;
23582 +#define HLIST_HEAD_INIT { .first = NULL }
23583 +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
23584 +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
23585 +static inline void INIT_HLIST_NODE(struct hlist_node *h)
23590 +#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
23592 +#define hlist_for_each_entry(tpos, pos, head, member) \
23593 + for (pos = (head)->first; \
23594 + pos && ({ prefetch(pos->next); 1;}) && \
23595 + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
23598 +#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
23599 + for (pos = (head)->first; \
23600 + pos && ({ n = pos->next; 1; }) && \
23601 + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
23604 +/* we ignore GFP here */
23605 +#define dma_alloc_coherent(dv, sz, dma, gfp) \
23606 + pci_alloc_consistent(pdev, (sz), (dma))
23607 +#define dma_free_coherent(dv, sz, addr, dma_addr) \
23608 + pci_free_consistent(pdev, (sz), (addr), (dma_addr))
23610 +#ifndef might_sleep
23611 +#define might_sleep()
23614 +#endif /* <= 2.5.0 */
23616 +/*****************************************************************************/
23617 +/* 2.5.28 => 2.4.23 */
23618 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
23620 +static inline void _kc_synchronize_irq(void)
23622 + synchronize_irq();
23624 +#undef synchronize_irq
23625 +#define synchronize_irq(X) _kc_synchronize_irq()
23627 +#include <linux/tqueue.h>
23628 +#define work_struct tq_struct
23630 +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
23631 +#undef container_of
23632 +#define container_of list_entry
23633 +#define schedule_work schedule_task
23634 +#define flush_scheduled_work flush_scheduled_tasks
23635 +#define cancel_work_sync(x) flush_scheduled_work()
23637 +#endif /* 2.5.28 => 2.4.17 */
23639 +/*****************************************************************************/
23640 +/* 2.6.0 => 2.5.28 */
23641 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
23642 +#define MODULE_INFO(version, _version)
23643 +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
23644 +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
23646 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
23647 +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
23650 +#define pci_set_consistent_dma_mask(dev,mask) 1
23653 +#define dev_put(dev) __dev_put(dev)
23655 +#ifndef skb_fill_page_desc
23656 +#define skb_fill_page_desc _kc_skb_fill_page_desc
23657 +extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
23661 +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
23663 +#ifndef page_count
23664 +#define page_count(p) atomic_read(&(p)->count)
23667 +/* find_first_bit and find_next bit are not defined for most
23668 + * 2.4 kernels (except for the redhat 2.4.21 kernels
23670 +#include <linux/bitops.h>
23671 +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
23672 +#undef find_next_bit
23673 +#define find_next_bit _kc_find_next_bit
23674 +extern unsigned long _kc_find_next_bit(const unsigned long *addr,
23675 + unsigned long size,
23676 + unsigned long offset);
23677 +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
23679 +#endif /* 2.6.0 => 2.5.28 */
23681 +/*****************************************************************************/
23682 +/* 2.6.4 => 2.6.0 */
23683 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
23684 +#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
23685 +#endif /* 2.6.4 => 2.6.0 */
23687 +/*****************************************************************************/
23688 +/* 2.6.5 => 2.6.0 */
23689 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
23690 +#define pci_dma_sync_single_for_cpu pci_dma_sync_single
23691 +#define pci_dma_sync_single_for_device pci_dma_sync_single_for_cpu
23692 +#endif /* 2.6.5 => 2.6.0 */
23694 +/*****************************************************************************/
23695 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
23696 +/* taken from 2.6 include/linux/bitmap.h */
23697 +#undef bitmap_zero
23698 +#define bitmap_zero _kc_bitmap_zero
23699 +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
23701 + if (nbits <= BITS_PER_LONG)
23704 + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
23705 + memset(dst, 0, len);
23708 +#define random_ether_addr _kc_random_ether_addr
23709 +static inline void _kc_random_ether_addr(u8 *addr)
23711 + get_random_bytes(addr, ETH_ALEN);
23712 + addr[0] &= 0xfe; /* clear multicast */
23713 + addr[0] |= 0x02; /* set local assignment */
23715 +#endif /* < 2.6.6 */
23717 +/*****************************************************************************/
23718 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
23720 +#define if_mii _kc_if_mii
23721 +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
23723 + return (struct mii_ioctl_data *) &rq->ifr_ifru;
23725 +#endif /* < 2.6.7 */
23727 +/*****************************************************************************/
23728 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
23729 +#ifndef PCI_EXP_DEVCTL
23730 +#define PCI_EXP_DEVCTL 8
23732 +#ifndef PCI_EXP_DEVCTL_CERE
23733 +#define PCI_EXP_DEVCTL_CERE 0x0001
23735 +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
23736 + schedule_timeout((x * HZ)/1000 + 2); \
23739 +#endif /* < 2.6.8 */
23741 +/*****************************************************************************/
23742 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
23743 +#include <net/dsfield.h>
23747 +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
23748 +extern void *_kc_kzalloc(size_t size, int flags);
23750 +#define MSEC_PER_SEC 1000L
23751 +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
23753 +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
23754 + return (MSEC_PER_SEC / HZ) * j;
23755 +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
23756 + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
23758 + return (j * MSEC_PER_SEC) / HZ;
23761 +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
23763 + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
23764 + return MAX_JIFFY_OFFSET;
23765 +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
23766 + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
23767 +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
23768 + return m * (HZ / MSEC_PER_SEC);
23770 + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
23774 +#define msleep_interruptible _kc_msleep_interruptible
23775 +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
23777 + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
23779 + while (timeout && !signal_pending(current)) {
23780 + __set_current_state(TASK_INTERRUPTIBLE);
23781 + timeout = schedule_timeout(timeout);
23783 + return _kc_jiffies_to_msecs(timeout);
23786 +/* Basic mode control register. */
23787 +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
23790 +#define __le16 u16
23793 +#define __le32 u32
23796 +#define __le64 u64
23799 +#define __be16 u16
23802 +#ifdef pci_dma_mapping_error
23803 +#undef pci_dma_mapping_error
23805 +#define pci_dma_mapping_error _kc_pci_dma_mapping_error
23806 +static inline int _kc_pci_dma_mapping_error(struct pci_dev *pdev,
23807 + dma_addr_t dma_addr)
23809 + return dma_addr == 0;
23811 +#endif /* < 2.6.9 */
23813 +/*****************************************************************************/
23814 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
23815 +#ifdef module_param_array_named
23816 +#undef module_param_array_named
23817 +#define module_param_array_named(name, array, type, nump, perm) \
23818 + static struct kparam_array __param_arr_##name \
23819 + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
23820 + sizeof(array[0]), array }; \
23821 + module_param_call(name, param_array_set, param_array_get, \
23822 + &__param_arr_##name, perm)
23823 +#endif /* module_param_array_named */
23824 +#endif /* < 2.6.10 */
23826 +/*****************************************************************************/
23827 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
23831 +#define PCI_D3hot 3
23832 +#define PCI_D3cold 4
23833 +typedef int pci_power_t;
23834 +#define pci_choose_state(pdev,state) state
23835 +#define PMSG_SUSPEND 3
23836 +#define PCI_EXP_LNKCTL 16
23838 +#undef NETIF_F_LLTX
23840 +#ifndef ARCH_HAS_PREFETCH
23841 +#define prefetch(X)
23844 +#ifndef NET_IP_ALIGN
23845 +#define NET_IP_ALIGN 2
23848 +#define KC_USEC_PER_SEC 1000000L
23849 +#define usecs_to_jiffies _kc_usecs_to_jiffies
23850 +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
23852 +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
23853 + return (KC_USEC_PER_SEC / HZ) * j;
23854 +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
23855 + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
23857 + return (j * KC_USEC_PER_SEC) / HZ;
23860 +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
23862 + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
23863 + return MAX_JIFFY_OFFSET;
23864 +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
23865 + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
23866 +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
23867 + return m * (HZ / KC_USEC_PER_SEC);
23869 + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
23872 +#endif /* < 2.6.11 */
23874 +/*****************************************************************************/
23875 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
23876 +#include <linux/reboot.h>
23877 +#define USE_REBOOT_NOTIFIER
23879 +/* Generic MII registers. */
23880 +#define MII_CTRL1000 0x09 /* 1000BASE-T control */
23881 +#define MII_STAT1000 0x0a /* 1000BASE-T status */
23882 +/* Advertisement control register. */
23883 +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
23884 +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
23885 +/* 1000BASE-T Control register */
23886 +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
23887 +#endif /* < 2.6.12 */
23889 +/*****************************************************************************/
23890 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
23891 +#define pm_message_t u32
23893 +#define kzalloc _kc_kzalloc
23894 +extern void *_kc_kzalloc(size_t size, int flags);
23897 +/* Generic MII registers. */
23898 +#define MII_ESTATUS 0x0f /* Extended Status */
23899 +/* Basic mode status register. */
23900 +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
23901 +/* Extended status register. */
23902 +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
23903 +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
23904 +#endif /* < 2.6.14 */
23906 +/*****************************************************************************/
23907 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
23908 +#ifndef device_can_wakeup
23909 +#define device_can_wakeup(dev) (1)
23911 +#ifndef device_set_wakeup_enable
23912 +#define device_set_wakeup_enable(dev, val) do{}while(0)
23914 +#ifndef device_init_wakeup
23915 +#define device_init_wakeup(dev,val) do {} while (0)
23917 +#endif /* < 2.6.15 */
23919 +/*****************************************************************************/
23920 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
23921 +#undef DEFINE_MUTEX
23922 +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
23923 +#define mutex_lock(x) down_interruptible(x)
23924 +#define mutex_unlock(x) up(x)
23926 +#undef HAVE_PCI_ERS
23927 +#else /* 2.6.16 and above */
23928 +#undef HAVE_PCI_ERS
23929 +#define HAVE_PCI_ERS
23930 +#endif /* < 2.6.16 */
23932 +/*****************************************************************************/
23933 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
23935 +#ifndef IRQ_HANDLED
23936 +#define irqreturn_t void
23937 +#define IRQ_HANDLED
23941 +#ifndef IRQF_PROBE_SHARED
23942 +#ifdef SA_PROBEIRQ
23943 +#define IRQF_PROBE_SHARED SA_PROBEIRQ
23945 +#define IRQF_PROBE_SHARED 0
23949 +#ifndef IRQF_SHARED
23950 +#define IRQF_SHARED SA_SHIRQ
23953 +#ifndef ARRAY_SIZE
23954 +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
23957 +#ifndef netdev_alloc_skb
23958 +#define netdev_alloc_skb _kc_netdev_alloc_skb
23959 +extern struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
23960 + unsigned int length);
23963 +#ifndef skb_is_gso
23964 +#ifdef NETIF_F_TSO
23965 +#define skb_is_gso _kc_skb_is_gso
23966 +static inline int _kc_skb_is_gso(const struct sk_buff *skb)
23968 + return skb_shinfo(skb)->gso_size;
23971 +#define skb_is_gso(a) 0
23975 +#endif /* < 2.6.18 */
23977 +/*****************************************************************************/
23978 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
23980 +#ifndef DIV_ROUND_UP
23981 +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
23983 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
23984 +#ifndef RHEL_RELEASE_CODE
23985 +#define RHEL_RELEASE_CODE 0
23987 +#ifndef RHEL_RELEASE_VERSION
23988 +#define RHEL_RELEASE_VERSION(a,b) 0
23990 +#ifndef AX_RELEASE_CODE
23991 +#define AX_RELEASE_CODE 0
23993 +#ifndef AX_RELEASE_VERSION
23994 +#define AX_RELEASE_VERSION(a,b) 0
23996 +#if (!(( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) ) && ( RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0) ) || ( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0) ) || (AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0))))
23997 +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
23999 +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
24000 +#undef CONFIG_INET_LRO
24001 +#undef CONFIG_INET_LRO_MODULE
24003 +#undef CONFIG_FCOE
24004 +#undef CONFIG_FCOE_MODULE
24005 +#endif /* IXGBE_FCOE */
24007 +typedef irqreturn_t (*new_handler_t)(int, void*);
24008 +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
24010 +typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
24011 +typedef void (*new_handler_t)(int, void*);
24012 +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
24013 +#endif /* >= 2.5.x */
24015 + irq_handler_t new_handler = (irq_handler_t) handler;
24016 + return request_irq(irq, new_handler, flags, devname, dev_id);
24019 +#undef request_irq
24020 +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
24022 +#define irq_handler_t new_handler_t
24023 +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
24024 +#define PCIE_CONFIG_SPACE_LEN 256
24025 +#define PCI_CONFIG_SPACE_LEN 64
24026 +#define PCIE_LINK_STATUS 0x12
24027 +#define pci_config_space_ich8lan() do {} while(0)
24028 +#undef pci_save_state
24029 +extern int _kc_pci_save_state(struct pci_dev *);
24030 +#define pci_save_state(pdev) _kc_pci_save_state(pdev)
24031 +#undef pci_restore_state
24032 +extern void _kc_pci_restore_state(struct pci_dev *);
24033 +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
24034 +#ifdef HAVE_PCI_ERS
24035 +#undef free_netdev
24036 +extern void _kc_free_netdev(struct net_device *);
24037 +#define free_netdev(netdev) _kc_free_netdev(netdev)
24039 +static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
24043 +#define pci_disable_pcie_error_reporting(dev) do {} while (0)
24044 +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
24045 +#else /* 2.6.19 */
24046 +#include <linux/aer.h>
24047 +#endif /* < 2.6.19 */
24049 +/*****************************************************************************/
24050 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
24051 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
24053 +#define INIT_WORK(_work, _func) \
24055 + INIT_LIST_HEAD(&(_work)->entry); \
24056 + (_work)->pending = 0; \
24057 + (_work)->func = (void (*)(void *))_func; \
24058 + (_work)->data = _work; \
24059 + init_timer(&(_work)->timer); \
24063 +#ifndef PCI_VDEVICE
24064 +#define PCI_VDEVICE(ven, dev) \
24065 + PCI_VENDOR_ID_##ven, (dev), \
24066 + PCI_ANY_ID, PCI_ANY_ID, 0, 0
24069 +#ifndef round_jiffies
24070 +#define round_jiffies(x) x
24073 +#define csum_offset csum
24075 +#endif /* < 2.6.20 */
24077 +/*****************************************************************************/
24078 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
24079 +#define to_net_dev(class) container_of(class, struct net_device, class_dev)
24080 +#define NETDEV_CLASS_DEV
24081 +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
24082 +#define vlan_group_set_device(vg, id, dev) if (vg) vg->vlan_devices[id] = dev;
24083 +#define pci_channel_offline(pdev) (pdev->error_state && \
24084 + pdev->error_state != pci_channel_io_normal)
24085 +#define pci_request_selected_regions(pdev, bars, name) \
24086 + pci_request_regions(pdev, name)
24087 +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
24088 +#endif /* < 2.6.21 */
24090 +/*****************************************************************************/
24091 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
24092 +#define tcp_hdr(skb) (skb->h.th)
24093 +#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
24094 +#define skb_transport_offset(skb) (skb->h.raw - skb->data)
24095 +#define skb_transport_header(skb) (skb->h.raw)
24096 +#define ipv6_hdr(skb) (skb->nh.ipv6h)
24097 +#define ip_hdr(skb) (skb->nh.iph)
24098 +#define skb_network_offset(skb) (skb->nh.raw - skb->data)
24099 +#define skb_network_header(skb) (skb->nh.raw)
24100 +#define skb_tail_pointer(skb) skb->tail
24101 +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
24102 + memcpy(skb->data + offset, from, len)
24103 +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
24104 +#define pci_register_driver pci_module_init
24105 +#define skb_mac_header(skb) skb->mac.raw
24107 +#ifdef NETIF_F_MULTI_QUEUE
24108 +#ifndef alloc_etherdev_mq
24109 +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
24111 +#endif /* NETIF_F_MULTI_QUEUE */
24113 +#ifndef ETH_FCS_LEN
24114 +#define ETH_FCS_LEN 4
24116 +#define cancel_work_sync(x) flush_scheduled_work()
24118 +#define udp_hdr _udp_hdr
24119 +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
24121 + return (struct udphdr *)skb_transport_header(skb);
24124 +#else /* 2.6.22 */
24125 +#define ETH_TYPE_TRANS_SETS_DEV
24126 +#endif /* < 2.6.22 */
24128 +/*****************************************************************************/
24129 +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
24130 +#undef ETHTOOL_GPERMADDR
24131 +#undef SET_MODULE_OWNER
24132 +#define SET_MODULE_OWNER(dev) do { } while (0)
24133 +#endif /* > 2.6.22 */
24135 +/*****************************************************************************/
24136 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
24137 +#define netif_subqueue_stopped(_a, _b) 0
24138 +#endif /* < 2.6.23 */
24140 +/*****************************************************************************/
24141 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
24142 +/* if GRO is supported then the napi struct must already exist */
24143 +#ifndef NETIF_F_GRO
24144 +/* NAPI API changes in 2.6.24 break everything */
24145 +struct napi_struct {
24146 + /* used to look up the real NAPI polling routine */
24147 + int (*poll)(struct napi_struct *, int);
24148 + struct net_device *dev;
24154 +extern int __kc_adapter_clean(struct net_device *, int *);
24155 +extern struct net_device *napi_to_poll_dev(struct napi_struct *napi);
24156 +#define napi_enable(napi) do { \
24157 + struct napi_struct *_napi = (napi); \
24158 + /* abuse if_port as a counter */ \
24159 + if (!_napi->dev->if_port) { \
24160 + netif_poll_enable(_napi->dev); \
24162 + ++_napi->dev->if_port; \
24163 + netif_poll_enable(napi_to_poll_dev(_napi)); \
24165 +#define napi_disable(napi) do { \
24166 + struct napi_struct *_napi = (napi); \
24167 + netif_poll_disable(napi_to_poll_dev(_napi)); \
24168 + --_napi->dev->if_port; \
24169 + if (!_napi->dev->if_port) \
24170 + netif_poll_disable(_napi->dev); \
24172 +#define netif_napi_add(_netdev, _napi, _poll, _weight) \
24174 + struct napi_struct *__napi = (_napi); \
24175 + struct net_device *poll_dev = napi_to_poll_dev(__napi); \
24176 + poll_dev->poll = &(__kc_adapter_clean); \
24177 + poll_dev->priv = (_napi); \
24178 + poll_dev->weight = (_weight); \
24179 + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
24180 + set_bit(__LINK_STATE_START, &poll_dev->state);\
24181 + dev_hold(poll_dev); \
24182 + _netdev->poll = &(__kc_adapter_clean); \
24183 + _netdev->weight = (_weight); \
24184 + __napi->poll = &(_poll); \
24185 + __napi->weight = (_weight); \
24186 + __napi->dev = (_netdev); \
24187 + set_bit(__LINK_STATE_RX_SCHED, &(_netdev)->state); \
24189 +#define netif_napi_del(_napi) \
24191 + struct net_device *poll_dev = napi_to_poll_dev(_napi); \
24192 + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
24193 + dev_put(poll_dev); \
24194 + memset(poll_dev, 0, sizeof(struct net_device));\
24196 +#define napi_schedule_prep(_napi) \
24197 + (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
24198 +#define napi_schedule(_napi) netif_rx_schedule(napi_to_poll_dev(_napi))
24199 +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
24200 +#ifndef NETIF_F_GRO
24201 +#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
24203 +#define napi_complete(_napi) \
24205 + napi_gro_flush(_napi); \
24206 + netif_rx_complete(napi_to_poll_dev(_napi)); \
24208 +#endif /* NETIF_F_GRO */
24210 +#define netif_napi_add(_netdev, _napi, _poll, _weight) \
24212 + struct napi_struct *__napi = _napi; \
24213 + _netdev->poll = &(_poll); \
24214 + _netdev->weight = (_weight); \
24215 + __napi->poll = &(_poll); \
24216 + __napi->weight = (_weight); \
24217 + __napi->dev = (_netdev); \
24219 +#define netif_napi_del(_a) do {} while (0)
24222 +#undef dev_get_by_name
24223 +#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
24224 +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
24225 +#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
24226 +#else /* < 2.6.24 */
24227 +#define HAVE_ETHTOOL_GET_SSET_COUNT
24228 +#define HAVE_NETDEV_NAPI_LIST
24229 +#endif /* < 2.6.24 */
24231 +/*****************************************************************************/
24232 +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
24233 +#include <linux/pm_qos_params.h>
24234 +#endif /* > 2.6.24 */
24236 +/*****************************************************************************/
24237 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
24238 +#define PM_QOS_CPU_DMA_LATENCY 1
24240 +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
24241 +#include <linux/latency.h>
24242 +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
24243 +#define pm_qos_add_requirement(pm_qos_class, name, value) \
24244 + set_acceptable_latency(name, value)
24245 +#define pm_qos_remove_requirement(pm_qos_class, name) \
24246 + remove_acceptable_latency(name)
24247 +#define pm_qos_update_requirement(pm_qos_class, name, value) \
24248 + modify_acceptable_latency(name, value)
24250 +#define PM_QOS_DEFAULT_VALUE -1
24251 +#define pm_qos_add_requirement(pm_qos_class, name, value)
24252 +#define pm_qos_remove_requirement(pm_qos_class, name)
24253 +#define pm_qos_update_requirement(pm_qos_class, name, value) { \
24254 + if (value != PM_QOS_DEFAULT_VALUE) { \
24255 + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
24256 + pci_name(adapter->pdev)); \
24259 +#endif /* > 2.6.18 */
24261 +#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
24263 +#endif /* < 2.6.25 */
24265 +/*****************************************************************************/
24266 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
24267 +#else /* < 2.6.26 */
24268 +#include <linux/pci-aspm.h>
24269 +#define HAVE_NETDEV_VLAN_FEATURES
24270 +#endif /* < 2.6.26 */
24271 +/*****************************************************************************/
24272 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
24273 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
24274 +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && defined(CONFIG_PM_SLEEP)))
24275 +#undef device_set_wakeup_enable
24276 +#define device_set_wakeup_enable(dev, val) \
24279 + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
24281 + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
24284 + (dev)->power.can_wakeup = !!(pmc >> 11); \
24285 + (dev)->power.should_wakeup = (val && (pmc >> 11)); \
24287 +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
24288 +#endif /* 2.6.15 through 2.6.27 */
24289 +#ifndef netif_napi_del
24290 +#define netif_napi_del(_a) do {} while (0)
24292 +#ifdef CONFIG_NETPOLL
24293 +#undef netif_napi_del
24294 +#define netif_napi_del(_a) list_del(&(_a)->dev_list);
24297 +#endif /* netif_napi_del */
24298 +#ifndef pci_dma_mapping_error
24299 +#define pci_dma_mapping_error(pdev, dma_addr) pci_dma_mapping_error(dma_addr)
24302 +#ifdef CONFIG_NETDEVICES_MULTIQUEUE
24303 +#define HAVE_TX_MQ
24307 +extern void _kc_netif_tx_stop_all_queues(struct net_device *);
24308 +extern void _kc_netif_tx_wake_all_queues(struct net_device *);
24309 +extern void _kc_netif_tx_start_all_queues(struct net_device *);
24310 +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
24311 +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
24312 +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
24313 +#undef netif_stop_subqueue
24314 +#define netif_stop_subqueue(_ndev,_qi) do { \
24315 + if (netif_is_multiqueue((_ndev))) \
24316 + netif_stop_subqueue((_ndev), (_qi)); \
24318 + netif_stop_queue((_ndev)); \
24320 +#undef netif_start_subqueue
24321 +#define netif_start_subqueue(_ndev,_qi) do { \
24322 + if (netif_is_multiqueue((_ndev))) \
24323 + netif_start_subqueue((_ndev), (_qi)); \
24325 + netif_start_queue((_ndev)); \
24327 +#else /* HAVE_TX_MQ */
24328 +#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
24329 +#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
24330 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
24331 +#define netif_tx_start_all_queues(a) netif_start_queue(a)
24333 +#define netif_tx_start_all_queues(a) do {} while (0)
24335 +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
24336 +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
24337 +#endif /* HAVE_TX_MQ */
24338 +#ifndef NETIF_F_MULTI_QUEUE
24339 +#define NETIF_F_MULTI_QUEUE 0
24340 +#define netif_is_multiqueue(a) 0
24341 +#define netif_wake_subqueue(a, b)
24342 +#endif /* NETIF_F_MULTI_QUEUE */
24343 +#else /* < 2.6.27 */
24344 +#define HAVE_TX_MQ
24345 +#define HAVE_NETDEV_SELECT_QUEUE
24346 +#endif /* < 2.6.27 */
24348 +/*****************************************************************************/
24349 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
24350 +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \
24351 + pci_resource_len(pdev, bar))
24352 +#define pci_wake_from_d3 _kc_pci_wake_from_d3
24353 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
24354 +extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
24355 +extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
24356 +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
24357 +#endif /* < 2.6.28 */
24359 +/*****************************************************************************/
24360 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
24361 +#define pci_request_selected_regions_exclusive(pdev, bars, name) \
24362 + pci_request_selected_regions(pdev, bars, name)
24363 +extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
24364 +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
24365 +#else /* < 2.6.29 */
24367 +#define HAVE_PFC_MODE_ENABLE
24368 +#endif /* CONFIG_DCB */
24369 +#endif /* < 2.6.29 */
24371 +/*****************************************************************************/
24372 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
24374 +#undef CONFIG_FCOE
24375 +#undef CONFIG_FCOE_MODULE
24376 +#endif /* IXGBE_FCOE */
24377 +extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb);
24378 +#define skb_tx_hash(n, s) _kc_skb_tx_hash(n, s)
24379 +#define skb_record_rx_queue(a, b) do {} while (0)
24381 +#define HAVE_ASPM_QUIRKS
24382 +#endif /* < 2.6.30 */
24384 +/*****************************************************************************/
24385 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
24386 +#define ETH_P_1588 0x88F7
24388 +#ifndef HAVE_NETDEV_STORAGE_ADDRESS
24389 +#define HAVE_NETDEV_STORAGE_ADDRESS
24391 +#ifndef HAVE_NETDEV_HW_ADDR
24392 +#define HAVE_NETDEV_HW_ADDR
24394 +#endif /* < 2.6.31 */
24396 +/*****************************************************************************/
24397 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
24398 +#undef netdev_tx_t
24399 +#define netdev_tx_t int
24400 +#endif /* < 2.6.32 */
24401 +#endif /* _KCOMPAT_H_ */
24402 Index: linux-2.6.22/drivers/net/igb/kcompat_ethtool.c
24403 ===================================================================
24404 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
24405 +++ linux-2.6.22/drivers/net/igb/kcompat_ethtool.c 2009-12-18 12:39:22.000000000 -0500
24407 +/*******************************************************************************
24409 + Intel(R) Gigabit Ethernet Linux driver
24410 + Copyright(c) 2007-2009 Intel Corporation.
24412 + This program is free software; you can redistribute it and/or modify it
24413 + under the terms and conditions of the GNU General Public License,
24414 + version 2, as published by the Free Software Foundation.
24416 + This program is distributed in the hope it will be useful, but WITHOUT
24417 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24418 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24421 + You should have received a copy of the GNU General Public License along with
24422 + this program; if not, write to the Free Software Foundation, Inc.,
24423 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24425 + The full GNU General Public License is included in this distribution in
24426 + the file called "COPYING".
24428 + Contact Information:
24429 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24430 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24432 +*******************************************************************************/
24435 + * net/core/ethtool.c - Ethtool ioctl handler
24436 + * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
24438 + * This file is where we call all the ethtool_ops commands to get
24439 + * the information ethtool needs. We fall back to calling do_ioctl()
24440 + * for drivers which haven't been converted to ethtool_ops yet.
24442 + * It's GPL, stupid.
24444 + * Modification by sfeldma@pobox.com to work as backward compat
24445 + * solution for pre-ethtool_ops kernels.
24446 + * - copied struct ethtool_ops from ethtool.h
24447 + * - defined SET_ETHTOOL_OPS
24448 + * - put in some #ifndef NETIF_F_xxx wrappers
24449 + * - changes refs to dev->ethtool_ops to ethtool_ops
24450 + * - changed dev_ethtool to ethtool_ioctl
24451 + * - remove EXPORT_SYMBOL()s
24452 + * - added _kc_ prefix in built-in ethtool_op_xxx ops.
24455 +#include <linux/module.h>
24456 +#include <linux/types.h>
24457 +#include <linux/errno.h>
24458 +#include <linux/mii.h>
24459 +#include <linux/ethtool.h>
24460 +#include <linux/netdevice.h>
24461 +#include <asm/uaccess.h>
24463 +#include "kcompat.h"
24465 +#undef SUPPORTED_10000baseT_Full
24466 +#define SUPPORTED_10000baseT_Full (1 << 12)
24467 +#undef ADVERTISED_10000baseT_Full
24468 +#define ADVERTISED_10000baseT_Full (1 << 12)
24469 +#undef SPEED_10000
24470 +#define SPEED_10000 10000
24472 +#undef ethtool_ops
24473 +#define ethtool_ops _kc_ethtool_ops
24475 +struct _kc_ethtool_ops {
24476 + int (*get_settings)(struct net_device *, struct ethtool_cmd *);
24477 + int (*set_settings)(struct net_device *, struct ethtool_cmd *);
24478 + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
24479 + int (*get_regs_len)(struct net_device *);
24480 + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
24481 + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
24482 + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
24483 + u32 (*get_msglevel)(struct net_device *);
24484 + void (*set_msglevel)(struct net_device *, u32);
24485 + int (*nway_reset)(struct net_device *);
24486 + u32 (*get_link)(struct net_device *);
24487 + int (*get_eeprom_len)(struct net_device *);
24488 + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
24489 + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
24490 + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
24491 + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
24492 + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
24493 + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
24494 + void (*get_pauseparam)(struct net_device *,
24495 + struct ethtool_pauseparam*);
24496 + int (*set_pauseparam)(struct net_device *,
24497 + struct ethtool_pauseparam*);
24498 + u32 (*get_rx_csum)(struct net_device *);
24499 + int (*set_rx_csum)(struct net_device *, u32);
24500 + u32 (*get_tx_csum)(struct net_device *);
24501 + int (*set_tx_csum)(struct net_device *, u32);
24502 + u32 (*get_sg)(struct net_device *);
24503 + int (*set_sg)(struct net_device *, u32);
24504 + u32 (*get_tso)(struct net_device *);
24505 + int (*set_tso)(struct net_device *, u32);
24506 + int (*self_test_count)(struct net_device *);
24507 + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
24508 + void (*get_strings)(struct net_device *, u32 stringset, u8 *);
24509 + int (*phys_id)(struct net_device *, u32);
24510 + int (*get_stats_count)(struct net_device *);
24511 + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *,
24513 +} *ethtool_ops = NULL;
24515 +#undef SET_ETHTOOL_OPS
24516 +#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops))
24519 + * Some useful ethtool_ops methods that are device independent. If we find that
24520 + * all drivers want to do the same thing here, we can turn these into dev_()
24521 + * function calls.
24524 +#undef ethtool_op_get_link
24525 +#define ethtool_op_get_link _kc_ethtool_op_get_link
24526 +u32 _kc_ethtool_op_get_link(struct net_device *dev)
24528 + return netif_carrier_ok(dev) ? 1 : 0;
24531 +#undef ethtool_op_get_tx_csum
24532 +#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum
24533 +u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev)
24535 +#ifdef NETIF_F_IP_CSUM
24536 + return (dev->features & NETIF_F_IP_CSUM) != 0;
24542 +#undef ethtool_op_set_tx_csum
24543 +#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum
24544 +int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
24546 +#ifdef NETIF_F_IP_CSUM
24548 +#ifdef NETIF_F_IPV6_CSUM
24549 + dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
24551 + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
24553 + dev->features |= NETIF_F_IP_CSUM;
24555 + dev->features &= ~NETIF_F_IP_CSUM;
24562 +#undef ethtool_op_get_sg
24563 +#define ethtool_op_get_sg _kc_ethtool_op_get_sg
24564 +u32 _kc_ethtool_op_get_sg(struct net_device *dev)
24567 + return (dev->features & NETIF_F_SG) != 0;
24573 +#undef ethtool_op_set_sg
24574 +#define ethtool_op_set_sg _kc_ethtool_op_set_sg
24575 +int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data)
24579 + dev->features |= NETIF_F_SG;
24581 + dev->features &= ~NETIF_F_SG;
24587 +#undef ethtool_op_get_tso
24588 +#define ethtool_op_get_tso _kc_ethtool_op_get_tso
24589 +u32 _kc_ethtool_op_get_tso(struct net_device *dev)
24591 +#ifdef NETIF_F_TSO
24592 + return (dev->features & NETIF_F_TSO) != 0;
24598 +#undef ethtool_op_set_tso
24599 +#define ethtool_op_set_tso _kc_ethtool_op_set_tso
24600 +int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data)
24602 +#ifdef NETIF_F_TSO
24604 + dev->features |= NETIF_F_TSO;
24606 + dev->features &= ~NETIF_F_TSO;
24612 +/* Handlers for each ethtool command */
24614 +static int ethtool_get_settings(struct net_device *dev, void *useraddr)
24616 + struct ethtool_cmd cmd = { ETHTOOL_GSET };
24619 + if (!ethtool_ops->get_settings)
24620 + return -EOPNOTSUPP;
24622 + err = ethtool_ops->get_settings(dev, &cmd);
24626 + if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
24631 +static int ethtool_set_settings(struct net_device *dev, void *useraddr)
24633 + struct ethtool_cmd cmd;
24635 + if (!ethtool_ops->set_settings)
24636 + return -EOPNOTSUPP;
24638 + if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
24641 + return ethtool_ops->set_settings(dev, &cmd);
24644 +static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr)
24646 + struct ethtool_drvinfo info;
24647 + struct ethtool_ops *ops = ethtool_ops;
24649 + if (!ops->get_drvinfo)
24650 + return -EOPNOTSUPP;
24652 + memset(&info, 0, sizeof(info));
24653 + info.cmd = ETHTOOL_GDRVINFO;
24654 + ops->get_drvinfo(dev, &info);
24656 + if (ops->self_test_count)
24657 + info.testinfo_len = ops->self_test_count(dev);
24658 + if (ops->get_stats_count)
24659 + info.n_stats = ops->get_stats_count(dev);
24660 + if (ops->get_regs_len)
24661 + info.regdump_len = ops->get_regs_len(dev);
24662 + if (ops->get_eeprom_len)
24663 + info.eedump_len = ops->get_eeprom_len(dev);
24665 + if (copy_to_user(useraddr, &info, sizeof(info)))
24670 +static int ethtool_get_regs(struct net_device *dev, char *useraddr)
24672 + struct ethtool_regs regs;
24673 + struct ethtool_ops *ops = ethtool_ops;
24677 + if (!ops->get_regs || !ops->get_regs_len)
24678 + return -EOPNOTSUPP;
24680 + if (copy_from_user(®s, useraddr, sizeof(regs)))
24683 + reglen = ops->get_regs_len(dev);
24684 + if (regs.len > reglen)
24685 + regs.len = reglen;
24687 + regbuf = kmalloc(reglen, GFP_USER);
24691 + ops->get_regs(dev, ®s, regbuf);
24694 + if (copy_to_user(useraddr, ®s, sizeof(regs)))
24696 + useraddr += offsetof(struct ethtool_regs, data);
24697 + if (copy_to_user(useraddr, regbuf, reglen))
24706 +static int ethtool_get_wol(struct net_device *dev, char *useraddr)
24708 + struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
24710 + if (!ethtool_ops->get_wol)
24711 + return -EOPNOTSUPP;
24713 + ethtool_ops->get_wol(dev, &wol);
24715 + if (copy_to_user(useraddr, &wol, sizeof(wol)))
24720 +static int ethtool_set_wol(struct net_device *dev, char *useraddr)
24722 + struct ethtool_wolinfo wol;
24724 + if (!ethtool_ops->set_wol)
24725 + return -EOPNOTSUPP;
24727 + if (copy_from_user(&wol, useraddr, sizeof(wol)))
24730 + return ethtool_ops->set_wol(dev, &wol);
24733 +static int ethtool_get_msglevel(struct net_device *dev, char *useraddr)
24735 + struct ethtool_value edata = { ETHTOOL_GMSGLVL };
24737 + if (!ethtool_ops->get_msglevel)
24738 + return -EOPNOTSUPP;
24740 + edata.data = ethtool_ops->get_msglevel(dev);
24742 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
24747 +static int ethtool_set_msglevel(struct net_device *dev, char *useraddr)
24749 + struct ethtool_value edata;
24751 + if (!ethtool_ops->set_msglevel)
24752 + return -EOPNOTSUPP;
24754 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
24757 + ethtool_ops->set_msglevel(dev, edata.data);
24761 +static int ethtool_nway_reset(struct net_device *dev)
24763 + if (!ethtool_ops->nway_reset)
24764 + return -EOPNOTSUPP;
24766 + return ethtool_ops->nway_reset(dev);
24769 +static int ethtool_get_link(struct net_device *dev, void *useraddr)
24771 + struct ethtool_value edata = { ETHTOOL_GLINK };
24773 + if (!ethtool_ops->get_link)
24774 + return -EOPNOTSUPP;
24776 + edata.data = ethtool_ops->get_link(dev);
24778 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
24783 +static int ethtool_get_eeprom(struct net_device *dev, void *useraddr)
24785 + struct ethtool_eeprom eeprom;
24786 + struct ethtool_ops *ops = ethtool_ops;
24790 + if (!ops->get_eeprom || !ops->get_eeprom_len)
24791 + return -EOPNOTSUPP;
24793 + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
24796 + /* Check for wrap and zero */
24797 + if (eeprom.offset + eeprom.len <= eeprom.offset)
24800 + /* Check for exceeding total eeprom len */
24801 + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
24804 + data = kmalloc(eeprom.len, GFP_USER);
24809 + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
24812 + ret = ops->get_eeprom(dev, &eeprom, data);
24817 + if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
24819 + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
24828 +static int ethtool_set_eeprom(struct net_device *dev, void *useraddr)
24830 + struct ethtool_eeprom eeprom;
24831 + struct ethtool_ops *ops = ethtool_ops;
24835 + if (!ops->set_eeprom || !ops->get_eeprom_len)
24836 + return -EOPNOTSUPP;
24838 + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
24841 + /* Check for wrap and zero */
24842 + if (eeprom.offset + eeprom.len <= eeprom.offset)
24845 + /* Check for exceeding total eeprom len */
24846 + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
24849 + data = kmalloc(eeprom.len, GFP_USER);
24854 + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
24857 + ret = ops->set_eeprom(dev, &eeprom, data);
24861 + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
24869 +static int ethtool_get_coalesce(struct net_device *dev, void *useraddr)
24871 + struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
24873 + if (!ethtool_ops->get_coalesce)
24874 + return -EOPNOTSUPP;
24876 + ethtool_ops->get_coalesce(dev, &coalesce);
24878 + if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
24883 +static int ethtool_set_coalesce(struct net_device *dev, void *useraddr)
24885 + struct ethtool_coalesce coalesce;
24887 + if (!ethtool_ops->get_coalesce)
24888 + return -EOPNOTSUPP;
24890 + if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
24893 + return ethtool_ops->set_coalesce(dev, &coalesce);
24896 +static int ethtool_get_ringparam(struct net_device *dev, void *useraddr)
24898 + struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
24900 + if (!ethtool_ops->get_ringparam)
24901 + return -EOPNOTSUPP;
24903 + ethtool_ops->get_ringparam(dev, &ringparam);
24905 + if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
24910 +static int ethtool_set_ringparam(struct net_device *dev, void *useraddr)
24912 + struct ethtool_ringparam ringparam;
24914 + if (!ethtool_ops->get_ringparam)
24915 + return -EOPNOTSUPP;
24917 + if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
24920 + return ethtool_ops->set_ringparam(dev, &ringparam);
24923 +static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr)
24925 + struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
24927 + if (!ethtool_ops->get_pauseparam)
24928 + return -EOPNOTSUPP;
24930 + ethtool_ops->get_pauseparam(dev, &pauseparam);
24932 + if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
24937 +static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr)
24939 + struct ethtool_pauseparam pauseparam;
24941 + if (!ethtool_ops->get_pauseparam)
24942 + return -EOPNOTSUPP;
24944 + if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
24947 + return ethtool_ops->set_pauseparam(dev, &pauseparam);
24950 +static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr)
24952 + struct ethtool_value edata = { ETHTOOL_GRXCSUM };
24954 + if (!ethtool_ops->get_rx_csum)
24955 + return -EOPNOTSUPP;
24957 + edata.data = ethtool_ops->get_rx_csum(dev);
24959 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
24964 +static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr)
24966 + struct ethtool_value edata;
24968 + if (!ethtool_ops->set_rx_csum)
24969 + return -EOPNOTSUPP;
24971 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
24974 + ethtool_ops->set_rx_csum(dev, edata.data);
24978 +static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr)
24980 + struct ethtool_value edata = { ETHTOOL_GTXCSUM };
24982 + if (!ethtool_ops->get_tx_csum)
24983 + return -EOPNOTSUPP;
24985 + edata.data = ethtool_ops->get_tx_csum(dev);
24987 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
24992 +static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr)
24994 + struct ethtool_value edata;
24996 + if (!ethtool_ops->set_tx_csum)
24997 + return -EOPNOTSUPP;
24999 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
25002 + return ethtool_ops->set_tx_csum(dev, edata.data);
25005 +static int ethtool_get_sg(struct net_device *dev, char *useraddr)
25007 + struct ethtool_value edata = { ETHTOOL_GSG };
25009 + if (!ethtool_ops->get_sg)
25010 + return -EOPNOTSUPP;
25012 + edata.data = ethtool_ops->get_sg(dev);
25014 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
25019 +static int ethtool_set_sg(struct net_device *dev, char *useraddr)
25021 + struct ethtool_value edata;
25023 + if (!ethtool_ops->set_sg)
25024 + return -EOPNOTSUPP;
25026 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
25029 + return ethtool_ops->set_sg(dev, edata.data);
25032 +static int ethtool_get_tso(struct net_device *dev, char *useraddr)
25034 + struct ethtool_value edata = { ETHTOOL_GTSO };
25036 + if (!ethtool_ops->get_tso)
25037 + return -EOPNOTSUPP;
25039 + edata.data = ethtool_ops->get_tso(dev);
25041 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
25046 +static int ethtool_set_tso(struct net_device *dev, char *useraddr)
25048 + struct ethtool_value edata;
25050 + if (!ethtool_ops->set_tso)
25051 + return -EOPNOTSUPP;
25053 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
25056 + return ethtool_ops->set_tso(dev, edata.data);
25059 +static int ethtool_self_test(struct net_device *dev, char *useraddr)
25061 + struct ethtool_test test;
25062 + struct ethtool_ops *ops = ethtool_ops;
25066 + if (!ops->self_test || !ops->self_test_count)
25067 + return -EOPNOTSUPP;
25069 + if (copy_from_user(&test, useraddr, sizeof(test)))
25072 + test.len = ops->self_test_count(dev);
25073 + data = kmalloc(test.len * sizeof(u64), GFP_USER);
25077 + ops->self_test(dev, &test, data);
25080 + if (copy_to_user(useraddr, &test, sizeof(test)))
25082 + useraddr += sizeof(test);
25083 + if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
25092 +static int ethtool_get_strings(struct net_device *dev, void *useraddr)
25094 + struct ethtool_gstrings gstrings;
25095 + struct ethtool_ops *ops = ethtool_ops;
25099 + if (!ops->get_strings)
25100 + return -EOPNOTSUPP;
25102 + if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
25105 + switch (gstrings.string_set) {
25106 + case ETH_SS_TEST:
25107 + if (!ops->self_test_count)
25108 + return -EOPNOTSUPP;
25109 + gstrings.len = ops->self_test_count(dev);
25111 + case ETH_SS_STATS:
25112 + if (!ops->get_stats_count)
25113 + return -EOPNOTSUPP;
25114 + gstrings.len = ops->get_stats_count(dev);
25120 + data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
25124 + ops->get_strings(dev, gstrings.string_set, data);
25127 + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
25129 + useraddr += sizeof(gstrings);
25130 + if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
25139 +static int ethtool_phys_id(struct net_device *dev, void *useraddr)
25141 + struct ethtool_value id;
25143 + if (!ethtool_ops->phys_id)
25144 + return -EOPNOTSUPP;
25146 + if (copy_from_user(&id, useraddr, sizeof(id)))
25149 + return ethtool_ops->phys_id(dev, id.data);
25152 +static int ethtool_get_stats(struct net_device *dev, void *useraddr)
25154 + struct ethtool_stats stats;
25155 + struct ethtool_ops *ops = ethtool_ops;
25159 + if (!ops->get_ethtool_stats || !ops->get_stats_count)
25160 + return -EOPNOTSUPP;
25162 + if (copy_from_user(&stats, useraddr, sizeof(stats)))
25165 + stats.n_stats = ops->get_stats_count(dev);
25166 + data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER);
25170 + ops->get_ethtool_stats(dev, &stats, data);
25173 + if (copy_to_user(useraddr, &stats, sizeof(stats)))
25175 + useraddr += sizeof(stats);
25176 + if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
25185 +/* The main entry point in this file. Called from net/core/dev.c */
25187 +#define ETHTOOL_OPS_COMPAT
25188 +int ethtool_ioctl(struct ifreq *ifr)
25190 + struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
25191 + void *useraddr = (void *) ifr->ifr_data;
25195 + * XXX: This can be pushed down into the ethtool_* handlers that
25196 + * need it. Keep existing behavior for the moment.
25198 + if (!capable(CAP_NET_ADMIN))
25201 + if (!dev || !netif_device_present(dev))
25204 + if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd)))
25207 + switch (ethcmd) {
25208 + case ETHTOOL_GSET:
25209 + return ethtool_get_settings(dev, useraddr);
25210 + case ETHTOOL_SSET:
25211 + return ethtool_set_settings(dev, useraddr);
25212 + case ETHTOOL_GDRVINFO:
25213 + return ethtool_get_drvinfo(dev, useraddr);
25214 + case ETHTOOL_GREGS:
25215 + return ethtool_get_regs(dev, useraddr);
25216 + case ETHTOOL_GWOL:
25217 + return ethtool_get_wol(dev, useraddr);
25218 + case ETHTOOL_SWOL:
25219 + return ethtool_set_wol(dev, useraddr);
25220 + case ETHTOOL_GMSGLVL:
25221 + return ethtool_get_msglevel(dev, useraddr);
25222 + case ETHTOOL_SMSGLVL:
25223 + return ethtool_set_msglevel(dev, useraddr);
25224 + case ETHTOOL_NWAY_RST:
25225 + return ethtool_nway_reset(dev);
25226 + case ETHTOOL_GLINK:
25227 + return ethtool_get_link(dev, useraddr);
25228 + case ETHTOOL_GEEPROM:
25229 + return ethtool_get_eeprom(dev, useraddr);
25230 + case ETHTOOL_SEEPROM:
25231 + return ethtool_set_eeprom(dev, useraddr);
25232 + case ETHTOOL_GCOALESCE:
25233 + return ethtool_get_coalesce(dev, useraddr);
25234 + case ETHTOOL_SCOALESCE:
25235 + return ethtool_set_coalesce(dev, useraddr);
25236 + case ETHTOOL_GRINGPARAM:
25237 + return ethtool_get_ringparam(dev, useraddr);
25238 + case ETHTOOL_SRINGPARAM:
25239 + return ethtool_set_ringparam(dev, useraddr);
25240 + case ETHTOOL_GPAUSEPARAM:
25241 + return ethtool_get_pauseparam(dev, useraddr);
25242 + case ETHTOOL_SPAUSEPARAM:
25243 + return ethtool_set_pauseparam(dev, useraddr);
25244 + case ETHTOOL_GRXCSUM:
25245 + return ethtool_get_rx_csum(dev, useraddr);
25246 + case ETHTOOL_SRXCSUM:
25247 + return ethtool_set_rx_csum(dev, useraddr);
25248 + case ETHTOOL_GTXCSUM:
25249 + return ethtool_get_tx_csum(dev, useraddr);
25250 + case ETHTOOL_STXCSUM:
25251 + return ethtool_set_tx_csum(dev, useraddr);
25252 + case ETHTOOL_GSG:
25253 + return ethtool_get_sg(dev, useraddr);
25254 + case ETHTOOL_SSG:
25255 + return ethtool_set_sg(dev, useraddr);
25256 + case ETHTOOL_GTSO:
25257 + return ethtool_get_tso(dev, useraddr);
25258 + case ETHTOOL_STSO:
25259 + return ethtool_set_tso(dev, useraddr);
25260 + case ETHTOOL_TEST:
25261 + return ethtool_self_test(dev, useraddr);
25262 + case ETHTOOL_GSTRINGS:
25263 + return ethtool_get_strings(dev, useraddr);
25264 + case ETHTOOL_PHYS_ID:
25265 + return ethtool_phys_id(dev, useraddr);
25266 + case ETHTOOL_GSTATS:
25267 + return ethtool_get_stats(dev, useraddr);
25269 + return -EOPNOTSUPP;
25272 + return -EOPNOTSUPP;
25275 +#define mii_if_info _kc_mii_if_info
25276 +struct _kc_mii_if_info {
25280 + int reg_num_mask;
25282 + unsigned int full_duplex : 1; /* is full duplex? */
25283 + unsigned int force_media : 1; /* is autoneg. disabled? */
25285 + struct net_device *dev;
25286 + int (*mdio_read) (struct net_device *dev, int phy_id, int location);
25287 + void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
25290 +struct ethtool_cmd;
25291 +struct mii_ioctl_data;
25293 +#undef mii_link_ok
25294 +#define mii_link_ok _kc_mii_link_ok
25295 +#undef mii_nway_restart
25296 +#define mii_nway_restart _kc_mii_nway_restart
25297 +#undef mii_ethtool_gset
25298 +#define mii_ethtool_gset _kc_mii_ethtool_gset
25299 +#undef mii_ethtool_sset
25300 +#define mii_ethtool_sset _kc_mii_ethtool_sset
25301 +#undef mii_check_link
25302 +#define mii_check_link _kc_mii_check_link
25303 +#undef generic_mii_ioctl
25304 +#define generic_mii_ioctl _kc_generic_mii_ioctl
25305 +extern int _kc_mii_link_ok (struct mii_if_info *mii);
25306 +extern int _kc_mii_nway_restart (struct mii_if_info *mii);
25307 +extern int _kc_mii_ethtool_gset(struct mii_if_info *mii,
25308 + struct ethtool_cmd *ecmd);
25309 +extern int _kc_mii_ethtool_sset(struct mii_if_info *mii,
25310 + struct ethtool_cmd *ecmd);
25311 +extern void _kc_mii_check_link (struct mii_if_info *mii);
25312 +extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
25313 + struct mii_ioctl_data *mii_data, int cmd,
25314 + unsigned int *duplex_changed);
25317 +struct _kc_pci_dev_ext {
25318 + struct pci_dev *dev;
25319 + void *pci_drvdata;
25320 + struct pci_driver *driver;
25323 +struct _kc_net_dev_ext {
25324 + struct net_device *dev;
25325 + unsigned int carrier;
25329 +/**************************************/
25332 +int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
25334 + struct net_device *dev = mii->dev;
25335 + u32 advert, bmcr, lpa, nego;
25337 + ecmd->supported =
25338 + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
25339 + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
25340 + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
25342 + /* only supports twisted-pair */
25343 + ecmd->port = PORT_MII;
25345 + /* only supports internal transceiver */
25346 + ecmd->transceiver = XCVR_INTERNAL;
25348 + /* this isn't fully supported at higher layers */
25349 + ecmd->phy_address = mii->phy_id;
25351 + ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
25352 + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
25353 + if (advert & ADVERTISE_10HALF)
25354 + ecmd->advertising |= ADVERTISED_10baseT_Half;
25355 + if (advert & ADVERTISE_10FULL)
25356 + ecmd->advertising |= ADVERTISED_10baseT_Full;
25357 + if (advert & ADVERTISE_100HALF)
25358 + ecmd->advertising |= ADVERTISED_100baseT_Half;
25359 + if (advert & ADVERTISE_100FULL)
25360 + ecmd->advertising |= ADVERTISED_100baseT_Full;
25362 + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
25363 + lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
25364 + if (bmcr & BMCR_ANENABLE) {
25365 + ecmd->advertising |= ADVERTISED_Autoneg;
25366 + ecmd->autoneg = AUTONEG_ENABLE;
25368 + nego = mii_nway_result(advert & lpa);
25369 + if (nego == LPA_100FULL || nego == LPA_100HALF)
25370 + ecmd->speed = SPEED_100;
25372 + ecmd->speed = SPEED_10;
25373 + if (nego == LPA_100FULL || nego == LPA_10FULL) {
25374 + ecmd->duplex = DUPLEX_FULL;
25375 + mii->full_duplex = 1;
25377 + ecmd->duplex = DUPLEX_HALF;
25378 + mii->full_duplex = 0;
25381 + ecmd->autoneg = AUTONEG_DISABLE;
25383 + ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
25384 + ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
25387 + /* ignore maxtxpkt, maxrxpkt for now */
25392 +int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
25394 + struct net_device *dev = mii->dev;
25396 + if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
25398 + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
25400 + if (ecmd->port != PORT_MII)
25402 + if (ecmd->transceiver != XCVR_INTERNAL)
25404 + if (ecmd->phy_address != mii->phy_id)
25406 + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
25409 + /* ignore supported, maxtxpkt, maxrxpkt */
25411 + if (ecmd->autoneg == AUTONEG_ENABLE) {
25412 + u32 bmcr, advert, tmp;
25414 + if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
25415 + ADVERTISED_10baseT_Full |
25416 + ADVERTISED_100baseT_Half |
25417 + ADVERTISED_100baseT_Full)) == 0)
25420 + /* advertise only what has been requested */
25421 + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
25422 + tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
25423 + if (ADVERTISED_10baseT_Half)
25424 + tmp |= ADVERTISE_10HALF;
25425 + if (ADVERTISED_10baseT_Full)
25426 + tmp |= ADVERTISE_10FULL;
25427 + if (ADVERTISED_100baseT_Half)
25428 + tmp |= ADVERTISE_100HALF;
25429 + if (ADVERTISED_100baseT_Full)
25430 + tmp |= ADVERTISE_100FULL;
25431 + if (advert != tmp) {
25432 + mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
25433 + mii->advertising = tmp;
25436 + /* turn on autonegotiation, and force a renegotiate */
25437 + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
25438 + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
25439 + mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
25441 + mii->force_media = 0;
25445 + /* turn off auto negotiation, set speed and duplexity */
25446 + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
25447 + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
25448 + if (ecmd->speed == SPEED_100)
25449 + tmp |= BMCR_SPEED100;
25450 + if (ecmd->duplex == DUPLEX_FULL) {
25451 + tmp |= BMCR_FULLDPLX;
25452 + mii->full_duplex = 1;
25454 + mii->full_duplex = 0;
25456 + mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
25458 + mii->force_media = 1;
25463 +int _kc_mii_link_ok (struct mii_if_info *mii)
25465 + /* first, a dummy read, needed to latch some MII phys */
25466 + mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
25467 + if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
25472 +int _kc_mii_nway_restart (struct mii_if_info *mii)
25477 + /* if autoneg is off, it's an error */
25478 + bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
25480 + if (bmcr & BMCR_ANENABLE) {
25481 + bmcr |= BMCR_ANRESTART;
25482 + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
25489 +void _kc_mii_check_link (struct mii_if_info *mii)
25491 + int cur_link = mii_link_ok(mii);
25492 + int prev_link = netif_carrier_ok(mii->dev);
25494 + if (cur_link && !prev_link)
25495 + netif_carrier_on(mii->dev);
25496 + else if (prev_link && !cur_link)
25497 + netif_carrier_off(mii->dev);
25500 +int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
25501 + struct mii_ioctl_data *mii_data, int cmd,
25502 + unsigned int *duplex_chg_out)
25505 + unsigned int duplex_changed = 0;
25507 + if (duplex_chg_out)
25508 + *duplex_chg_out = 0;
25510 + mii_data->phy_id &= mii_if->phy_id_mask;
25511 + mii_data->reg_num &= mii_if->reg_num_mask;
25514 + case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */
25515 + case SIOCGMIIPHY:
25516 + mii_data->phy_id = mii_if->phy_id;
25517 + /* fall through */
25519 + case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */
25520 + case SIOCGMIIREG:
25521 + mii_data->val_out =
25522 + mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
25523 + mii_data->reg_num);
25526 + case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */
25527 + case SIOCSMIIREG: {
25528 + u16 val = mii_data->val_in;
25530 + if (!capable(CAP_NET_ADMIN))
25533 + if (mii_data->phy_id == mii_if->phy_id) {
25534 + switch(mii_data->reg_num) {
25536 + unsigned int new_duplex = 0;
25537 + if (val & (BMCR_RESET|BMCR_ANENABLE))
25538 + mii_if->force_media = 0;
25540 + mii_if->force_media = 1;
25541 + if (mii_if->force_media &&
25542 + (val & BMCR_FULLDPLX))
25544 + if (mii_if->full_duplex != new_duplex) {
25545 + duplex_changed = 1;
25546 + mii_if->full_duplex = new_duplex;
25550 + case MII_ADVERTISE:
25551 + mii_if->advertising = val;
25559 + mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
25560 + mii_data->reg_num, val);
25565 + rc = -EOPNOTSUPP;
25569 + if ((rc == 0) && (duplex_chg_out) && (duplex_changed))
25570 + *duplex_chg_out = 1;