Setting tag linux-2.6-22-50
[linux-2.6.git] / linux-2.6-015-igb.patch
1 Index: linux-2.6.22/drivers/net/Kconfig
2 ===================================================================
3 --- linux-2.6.22.orig/drivers/net/Kconfig       2009-12-18 12:37:55.000000000 -0500
4 +++ linux-2.6.22/drivers/net/Kconfig    2009-12-18 12:39:22.000000000 -0500
5 @@ -2016,6 +2016,28 @@
6           <file:Documentation/networking/net-modules.txt>.  The module
7           will be called e1000e.
8  
9 +config IGB
10 +       tristate "Intel(R) 82575 Gigabit Ethernet support"
11 +       depends on PCI
12 +       ---help---
13 +         This driver supports Intel(R) 82575 gigabit ethernet adapters.
14 +         For more information on how to identify your adapter, go to the
15 +         Adapter & Driver ID Guide at:
16 +
17 +         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
18 +
19 +         For general information and support, go to the Intel support
20 +         website at:
21 +
22 +         <http://support.intel.com>
23 +
24 +         More specific information on configuring the driver is in
25 +         <file:Documentation/networking/igb.txt>.
26 +
27 +         To compile this driver as a module, choose M here and read
28 +         <file:Documentation/networking/net-modules.txt>.  The module
29 +         will be called igb.
30 +
31  source "drivers/net/ixp2000/Kconfig"
32  
33  config MYRI_SBUS
34 Index: linux-2.6.22/drivers/net/Makefile
35 ===================================================================
36 --- linux-2.6.22.orig/drivers/net/Makefile      2009-12-18 12:38:07.000000000 -0500
37 +++ linux-2.6.22/drivers/net/Makefile   2009-12-18 12:39:22.000000000 -0500
38 @@ -12,6 +12,7 @@
39  obj-$(CONFIG_BONDING) += bonding/
40  obj-$(CONFIG_ATL1) += atl1/
41  obj-$(CONFIG_GIANFAR) += gianfar_driver.o
42 +obj-$(CONFIG_IGB) += igb/
43  
44  gianfar_driver-objs := gianfar.o \
45                 gianfar_ethtool.o \
46 Index: linux-2.6.22/drivers/net/igb/Makefile
47 ===================================================================
48 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
49 +++ linux-2.6.22/drivers/net/igb/Makefile       2009-12-18 12:39:22.000000000 -0500
50 @@ -0,0 +1,37 @@
51 +################################################################################
52 +#
53 +# Intel 82575 PCI-Express Ethernet Linux driver
54 +# Copyright(c) 1999 - 2009 Intel Corporation.
55 +#
56 +# This program is free software; you can redistribute it and/or modify it
57 +# under the terms and conditions of the GNU General Public License,
58 +# version 2, as published by the Free Software Foundation.
59 +#
60 +# This program is distributed in the hope it will be useful, but WITHOUT
61 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
62 +# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
63 +# more details.
64 +#
65 +# You should have received a copy of the GNU General Public License along with
66 +# this program; if not, write to the Free Software Foundation, Inc.,
67 +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
68 +#
69 +# The full GNU General Public License is included in this distribution in
70 +# the file called "COPYING".
71 +#
72 +# Contact Information:
73 +# Linux NICS <linux.nics@intel.com>
74 +# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
75 +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
76 +#
77 +################################################################################
78 +
79 +#
80 +# Makefile for the Intel(R) 82575 PCI-Express ethernet driver
81 +#
82 +
83 +obj-$(CONFIG_IGB) += igb.o
84 +
85 +igb-objs := igb_main.o igb_ethtool.o igb_param.o kcompat.o e1000_api.o e1000_manage.o e1000_82575.o \
86 +           e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
87 +
88 Index: linux-2.6.22/drivers/net/igb/e1000_82575.c
89 ===================================================================
90 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
91 +++ linux-2.6.22/drivers/net/igb/e1000_82575.c  2009-12-18 12:39:22.000000000 -0500
92 @@ -0,0 +1,1580 @@
93 +/*******************************************************************************
94 +
95 +  Intel(R) Gigabit Ethernet Linux driver
96 +  Copyright(c) 2007-2009 Intel Corporation.
97 +
98 +  This program is free software; you can redistribute it and/or modify it
99 +  under the terms and conditions of the GNU General Public License,
100 +  version 2, as published by the Free Software Foundation.
101 +
102 +  This program is distributed in the hope it will be useful, but WITHOUT
103 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
104 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
105 +  more details.
106 +
107 +  You should have received a copy of the GNU General Public License along with
108 +  this program; if not, write to the Free Software Foundation, Inc.,
109 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
110 +
111 +  The full GNU General Public License is included in this distribution in
112 +  the file called "COPYING".
113 +
114 +  Contact Information:
115 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
116 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
117 +
118 +*******************************************************************************/
119 +
120 +/*
121 + * 82575EB Gigabit Network Connection
122 + * 82575EB Gigabit Backplane Connection
123 + * 82575GB Gigabit Network Connection
124 + * 82576 Gigabit Network Connection
125 + * 82576 Quad Port Gigabit Mezzanine Adapter
126 + */
127 +
128 +#include "e1000_api.h"
129 +
130 +static s32  e1000_init_phy_params_82575(struct e1000_hw *hw);
131 +static s32  e1000_init_nvm_params_82575(struct e1000_hw *hw);
132 +static s32  e1000_init_mac_params_82575(struct e1000_hw *hw);
133 +static s32  e1000_acquire_phy_82575(struct e1000_hw *hw);
134 +static void e1000_release_phy_82575(struct e1000_hw *hw);
135 +static s32  e1000_acquire_nvm_82575(struct e1000_hw *hw);
136 +static void e1000_release_nvm_82575(struct e1000_hw *hw);
137 +static s32  e1000_check_for_link_82575(struct e1000_hw *hw);
138 +static s32  e1000_get_cfg_done_82575(struct e1000_hw *hw);
139 +static s32  e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
140 +                                         u16 *duplex);
141 +static s32  e1000_init_hw_82575(struct e1000_hw *hw);
142 +static s32  e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
143 +static s32  e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
144 +                                           u16 *data);
145 +static s32  e1000_reset_hw_82575(struct e1000_hw *hw);
146 +static s32  e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
147 +                                          bool active);
148 +static s32  e1000_setup_copper_link_82575(struct e1000_hw *hw);
149 +static s32  e1000_setup_serdes_link_82575(struct e1000_hw *hw);
150 +static s32  e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
151 +static s32  e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
152 +                                            u32 offset, u16 data);
153 +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
154 +static s32  e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
155 +static s32  e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
156 +                                                 u16 *speed, u16 *duplex);
157 +static s32  e1000_get_phy_id_82575(struct e1000_hw *hw);
158 +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
159 +static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
160 +static s32  e1000_reset_init_script_82575(struct e1000_hw *hw);
161 +static s32  e1000_read_mac_addr_82575(struct e1000_hw *hw);
162 +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
163 +static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
164 +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
165 +
166 +/**
167 + *  e1000_init_phy_params_82575 - Init PHY func ptrs.
168 + *  @hw: pointer to the HW structure
169 + **/
170 +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
171 +{
172 +       struct e1000_phy_info *phy = &hw->phy;
173 +       s32 ret_val = E1000_SUCCESS;
174 +
175 +       DEBUGFUNC("e1000_init_phy_params_82575");
176 +
177 +       if (hw->phy.media_type != e1000_media_type_copper) {
178 +               phy->type = e1000_phy_none;
179 +               goto out;
180 +       }
181 +
182 +       phy->ops.power_up   = e1000_power_up_phy_copper;
183 +       phy->ops.power_down = e1000_power_down_phy_copper_82575;
184 +
185 +       phy->autoneg_mask           = AUTONEG_ADVERTISE_SPEED_DEFAULT;
186 +       phy->reset_delay_us         = 100;
187 +
188 +       phy->ops.acquire            = e1000_acquire_phy_82575;
189 +       phy->ops.check_reset_block  = e1000_check_reset_block_generic;
190 +       phy->ops.commit             = e1000_phy_sw_reset_generic;
191 +       phy->ops.get_cfg_done       = e1000_get_cfg_done_82575;
192 +       phy->ops.release            = e1000_release_phy_82575;
193 +
194 +       if (e1000_sgmii_active_82575(hw)) {
195 +               phy->ops.reset      = e1000_phy_hw_reset_sgmii_82575;
196 +               phy->ops.read_reg   = e1000_read_phy_reg_sgmii_82575;
197 +               phy->ops.write_reg  = e1000_write_phy_reg_sgmii_82575;
198 +       } else {
199 +               phy->ops.reset      = e1000_phy_hw_reset_generic;
200 +               phy->ops.read_reg   = e1000_read_phy_reg_igp;
201 +               phy->ops.write_reg  = e1000_write_phy_reg_igp;
202 +       }
203 +
204 +       /* Set phy->phy_addr and phy->id. */
205 +       ret_val = e1000_get_phy_id_82575(hw);
206 +
207 +       /* Verify phy id and set remaining function pointers */
208 +       switch (phy->id) {
209 +       case M88E1111_I_PHY_ID:
210 +               phy->type                   = e1000_phy_m88;
211 +               phy->ops.check_polarity     = e1000_check_polarity_m88;
212 +               phy->ops.get_info           = e1000_get_phy_info_m88;
213 +               phy->ops.get_cable_length   = e1000_get_cable_length_m88;
214 +               phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
215 +               break;
216 +       case IGP03E1000_E_PHY_ID:
217 +       case IGP04E1000_E_PHY_ID:
218 +               phy->type                   = e1000_phy_igp_3;
219 +               phy->ops.check_polarity     = e1000_check_polarity_igp;
220 +               phy->ops.get_info           = e1000_get_phy_info_igp;
221 +               phy->ops.get_cable_length   = e1000_get_cable_length_igp_2;
222 +               phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
223 +               phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82575;
224 +               phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state_generic;
225 +               break;
226 +       default:
227 +               ret_val = -E1000_ERR_PHY;
228 +               goto out;
229 +       }
230 +
231 +out:
232 +       return ret_val;
233 +}
234 +
235 +/**
236 + *  e1000_init_nvm_params_82575 - Init NVM func ptrs.
237 + *  @hw: pointer to the HW structure
238 + **/
239 +static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
240 +{
241 +       struct e1000_nvm_info *nvm = &hw->nvm;
242 +       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
243 +       u16 size;
244 +
245 +       DEBUGFUNC("e1000_init_nvm_params_82575");
246 +
247 +       nvm->opcode_bits        = 8;
248 +       nvm->delay_usec         = 1;
249 +       switch (nvm->override) {
250 +       case e1000_nvm_override_spi_large:
251 +               nvm->page_size    = 32;
252 +               nvm->address_bits = 16;
253 +               break;
254 +       case e1000_nvm_override_spi_small:
255 +               nvm->page_size    = 8;
256 +               nvm->address_bits = 8;
257 +               break;
258 +       default:
259 +               nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
260 +               nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
261 +               break;
262 +       }
263 +
264 +       nvm->type              = e1000_nvm_eeprom_spi;
265 +
266 +       size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
267 +                         E1000_EECD_SIZE_EX_SHIFT);
268 +
269 +       /*
270 +        * Added to a constant, "size" becomes the left-shift value
271 +        * for setting word_size.
272 +        */
273 +       size += NVM_WORD_SIZE_BASE_SHIFT;
274 +
275 +       /* EEPROM access above 16k is unsupported */
276 +       if (size > 14)
277 +               size = 14;
278 +       nvm->word_size = 1 << size;
279 +
280 +       /* Function Pointers */
281 +       nvm->ops.acquire       = e1000_acquire_nvm_82575;
282 +       nvm->ops.read          = e1000_read_nvm_eerd;
283 +       nvm->ops.release       = e1000_release_nvm_82575;
284 +       nvm->ops.update        = e1000_update_nvm_checksum_generic;
285 +       nvm->ops.valid_led_default = e1000_valid_led_default_82575;
286 +       nvm->ops.validate      = e1000_validate_nvm_checksum_generic;
287 +       nvm->ops.write         = e1000_write_nvm_spi;
288 +
289 +       return E1000_SUCCESS;
290 +}
291 +
292 +/**
293 + *  e1000_init_mac_params_82575 - Init MAC func ptrs.
294 + *  @hw: pointer to the HW structure
295 + **/
296 +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
297 +{
298 +       struct e1000_mac_info *mac = &hw->mac;
299 +       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
300 +       u32 ctrl_ext = 0;
301 +
302 +       DEBUGFUNC("e1000_init_mac_params_82575");
303 +
304 +       /* Set media type */
305 +        /*
306 +        * The 82575 uses bits 22:23 for link mode. The mode can be changed
307 +         * based on the EEPROM. We cannot rely upon device ID. There
308 +         * is no distinguishable difference between fiber and internal
309 +         * SerDes mode on the 82575. There can be an external PHY attached
310 +         * on the SGMII interface. For this, we'll set sgmii_active to true.
311 +         */
312 +       hw->phy.media_type = e1000_media_type_copper;
313 +       dev_spec->sgmii_active = false;
314 +
315 +       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
316 +       switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
317 +       case E1000_CTRL_EXT_LINK_MODE_SGMII:
318 +               dev_spec->sgmii_active = true;
319 +               ctrl_ext |= E1000_CTRL_I2C_ENA;
320 +               break;
321 +       case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
322 +               hw->phy.media_type = e1000_media_type_internal_serdes;
323 +               ctrl_ext |= E1000_CTRL_I2C_ENA;
324 +               break;
325 +       default:
326 +               ctrl_ext &= ~E1000_CTRL_I2C_ENA;
327 +               break;
328 +       }
329 +
330 +       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
331 +
332 +       /* Set mta register count */
333 +       mac->mta_reg_count = 128;
334 +       /* Set uta register count */
335 +       mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
336 +       /* Set rar entry count */
337 +       mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
338 +       if (mac->type == e1000_82576)
339 +               mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
340 +       /* Set if part includes ASF firmware */
341 +       mac->asf_firmware_present = true;
342 +       /* Set if manageability features are enabled. */
343 +       mac->arc_subsystem_valid =
344 +               (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
345 +                       ? true : false;
346 +
347 +       /* Function pointers */
348 +
349 +       /* bus type/speed/width */
350 +       mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
351 +       /* reset */
352 +       mac->ops.reset_hw = e1000_reset_hw_82575;
353 +       /* hw initialization */
354 +       mac->ops.init_hw = e1000_init_hw_82575;
355 +       /* link setup */
356 +       mac->ops.setup_link = e1000_setup_link_generic;
357 +       /* physical interface link setup */
358 +       mac->ops.setup_physical_interface =
359 +               (hw->phy.media_type == e1000_media_type_copper)
360 +                       ? e1000_setup_copper_link_82575
361 +                       : e1000_setup_serdes_link_82575;
362 +       /* physical interface shutdown */
363 +       mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
364 +       /* check for link */
365 +       mac->ops.check_for_link = e1000_check_for_link_82575;
366 +       /* receive address register setting */
367 +       mac->ops.rar_set = e1000_rar_set_generic;
368 +       /* read mac address */
369 +       mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
370 +       /* multicast address update */
371 +       mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
372 +       /* writing VFTA */
373 +       mac->ops.write_vfta = e1000_write_vfta_generic;
374 +       /* clearing VFTA */
375 +       mac->ops.clear_vfta = e1000_clear_vfta_generic;
376 +       /* setting MTA */
377 +       mac->ops.mta_set = e1000_mta_set_generic;
378 +       /* ID LED init */
379 +       mac->ops.id_led_init = e1000_id_led_init_generic;
380 +       /* blink LED */
381 +       mac->ops.blink_led = e1000_blink_led_generic;
382 +       /* setup LED */
383 +       mac->ops.setup_led = e1000_setup_led_generic;
384 +       /* cleanup LED */
385 +       mac->ops.cleanup_led = e1000_cleanup_led_generic;
386 +       /* turn on/off LED */
387 +       mac->ops.led_on = e1000_led_on_generic;
388 +       mac->ops.led_off = e1000_led_off_generic;
389 +       /* clear hardware counters */
390 +       mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
391 +       /* link info */
392 +       mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
393 +
394 +       /* set lan id for port to determine which phy lock to use */
395 +       hw->mac.ops.set_lan_id(hw);
396 +
397 +       return E1000_SUCCESS;
398 +}
399 +
400 +/**
401 + *  e1000_init_function_pointers_82575 - Init func ptrs.
402 + *  @hw: pointer to the HW structure
403 + *
404 + *  Called to initialize all function pointers and parameters.
405 + **/
406 +void e1000_init_function_pointers_82575(struct e1000_hw *hw)
407 +{
408 +       DEBUGFUNC("e1000_init_function_pointers_82575");
409 +
410 +       hw->mac.ops.init_params = e1000_init_mac_params_82575;
411 +       hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
412 +       hw->phy.ops.init_params = e1000_init_phy_params_82575;
413 +       hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
414 +}
415 +
416 +/**
417 + *  e1000_acquire_phy_82575 - Acquire rights to access PHY
418 + *  @hw: pointer to the HW structure
419 + *
420 + *  Acquire access rights to the correct PHY.
421 + **/
422 +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
423 +{
424 +       u16 mask = E1000_SWFW_PHY0_SM;
425 +
426 +       DEBUGFUNC("e1000_acquire_phy_82575");
427 +
428 +       if (hw->bus.func == E1000_FUNC_1)
429 +               mask = E1000_SWFW_PHY1_SM;
430 +
431 +       return e1000_acquire_swfw_sync_82575(hw, mask);
432 +}
433 +
434 +/**
435 + *  e1000_release_phy_82575 - Release rights to access PHY
436 + *  @hw: pointer to the HW structure
437 + *
438 + *  A wrapper to release access rights to the correct PHY.
439 + **/
440 +static void e1000_release_phy_82575(struct e1000_hw *hw)
441 +{
442 +       u16 mask = E1000_SWFW_PHY0_SM;
443 +
444 +       DEBUGFUNC("e1000_release_phy_82575");
445 +
446 +       if (hw->bus.func == E1000_FUNC_1)
447 +               mask = E1000_SWFW_PHY1_SM;
448 +
449 +       e1000_release_swfw_sync_82575(hw, mask);
450 +}
451 +
452 +/**
453 + *  e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
454 + *  @hw: pointer to the HW structure
455 + *  @offset: register offset to be read
456 + *  @data: pointer to the read data
457 + *
458 + *  Reads the PHY register at offset using the serial gigabit media independent
459 + *  interface and stores the retrieved information in data.
460 + **/
461 +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
462 +                                          u16 *data)
463 +{
464 +       s32 ret_val = -E1000_ERR_PARAM;
465 +
466 +       DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
467 +
468 +       if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
469 +               DEBUGOUT1("PHY Address %u is out of range\n", offset);
470 +               goto out;
471 +       }
472 +
473 +       ret_val = hw->phy.ops.acquire(hw);
474 +       if (ret_val)
475 +               goto out;
476 +
477 +       ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
478 +
479 +       hw->phy.ops.release(hw);
480 +
481 +out:
482 +       return ret_val;
483 +}
484 +
485 +/**
486 + *  e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
487 + *  @hw: pointer to the HW structure
488 + *  @offset: register offset to write to
489 + *  @data: data to write at register offset
490 + *
491 + *  Writes the data to PHY register at the offset using the serial gigabit
492 + *  media independent interface.
493 + **/
494 +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
495 +                                           u16 data)
496 +{
497 +       s32 ret_val = -E1000_ERR_PARAM;
498 +
499 +       DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
500 +
501 +       if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
502 +               DEBUGOUT1("PHY Address %d is out of range\n", offset);
503 +               goto out;
504 +       }
505 +
506 +       ret_val = hw->phy.ops.acquire(hw);
507 +       if (ret_val)
508 +               goto out;
509 +
510 +       ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
511 +
512 +       hw->phy.ops.release(hw);
513 +
514 +out:
515 +       return ret_val;
516 +}
517 +
518 +/**
519 + *  e1000_get_phy_id_82575 - Retrieve PHY addr and id
520 + *  @hw: pointer to the HW structure
521 + *
522 + *  Retrieves the PHY address and ID for both PHY's which do and do not use
523 + *  sgmi interface.
524 + **/
525 +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
526 +{
527 +       struct e1000_phy_info *phy = &hw->phy;
528 +       s32  ret_val = E1000_SUCCESS;
529 +       u16 phy_id;
530 +       u32 ctrl_ext;
531 +
532 +       DEBUGFUNC("e1000_get_phy_id_82575");
533 +
534 +       /*
535 +        * For SGMII PHYs, we try the list of possible addresses until
536 +        * we find one that works.  For non-SGMII PHYs
537 +        * (e.g. integrated copper PHYs), an address of 1 should
538 +        * work.  The result of this function should mean phy->phy_addr
539 +        * and phy->id are set correctly.
540 +        */
541 +       if (!e1000_sgmii_active_82575(hw)) {
542 +               phy->addr = 1;
543 +               ret_val = e1000_get_phy_id(hw);
544 +               goto out;
545 +       }
546 +
547 +       /* Power on sgmii phy if it is disabled */
548 +       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
549 +       E1000_WRITE_REG(hw, E1000_CTRL_EXT,
550 +                       ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
551 +       E1000_WRITE_FLUSH(hw);
552 +       msec_delay(300);
553 +
554 +       /*
555 +        * The address field in the I2CCMD register is 3 bits and 0 is invalid.
556 +        * Therefore, we need to test 1-7
557 +        */
558 +       for (phy->addr = 1; phy->addr < 8; phy->addr++) {
559 +               ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
560 +               if (ret_val == E1000_SUCCESS) {
561 +                       DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
562 +                                 phy_id,
563 +                                 phy->addr);
564 +                       /*
565 +                        * At the time of this writing, The M88 part is
566 +                        * the only supported SGMII PHY product.
567 +                        */
568 +                       if (phy_id == M88_VENDOR)
569 +                               break;
570 +               } else {
571 +                       DEBUGOUT1("PHY address %u was unreadable\n",
572 +                                 phy->addr);
573 +               }
574 +       }
575 +
576 +       /* A valid PHY type couldn't be found. */
577 +       if (phy->addr == 8) {
578 +               phy->addr = 0;
579 +               ret_val = -E1000_ERR_PHY;
580 +       } else {
581 +               ret_val = e1000_get_phy_id(hw);
582 +       }
583 +
584 +       /* restore previous sfp cage power state */
585 +       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
586 +
587 +out:
588 +       return ret_val;
589 +}
590 +
591 +/**
592 + *  e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
593 + *  @hw: pointer to the HW structure
594 + *
595 + *  Resets the PHY using the serial gigabit media independent interface.
596 + **/
597 +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
598 +{
599 +       s32 ret_val = E1000_SUCCESS;
600 +
601 +       DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
602 +
603 +       /*
604 +        * This isn't a true "hard" reset, but is the only reset
605 +        * available to us at this time.
606 +        */
607 +
608 +       DEBUGOUT("Soft resetting SGMII attached PHY...\n");
609 +
610 +       if (!(hw->phy.ops.write_reg))
611 +               goto out;
612 +
613 +       /*
614 +        * SFP documentation requires the following to configure the SPF module
615 +        * to work on SGMII.  No further documentation is given.
616 +        */
617 +       ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
618 +       if (ret_val)
619 +               goto out;
620 +
621 +       ret_val = hw->phy.ops.commit(hw);
622 +
623 +out:
624 +       return ret_val;
625 +}
626 +
627 +/**
628 + *  e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
629 + *  @hw: pointer to the HW structure
630 + *  @active: true to enable LPLU, false to disable
631 + *
632 + *  Sets the LPLU D0 state according to the active flag.  When
633 + *  activating LPLU this function also disables smart speed
634 + *  and vice versa.  LPLU will not be activated unless the
635 + *  device autonegotiation advertisement meets standards of
636 + *  either 10 or 10/100 or 10/100/1000 at all duplexes.
637 + *  This is a function pointer entry point only called by
638 + *  PHY setup routines.
639 + **/
640 +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
641 +{
642 +       struct e1000_phy_info *phy = &hw->phy;
643 +       s32 ret_val = E1000_SUCCESS;
644 +       u16 data;
645 +
646 +       DEBUGFUNC("e1000_set_d0_lplu_state_82575");
647 +
648 +       if (!(hw->phy.ops.read_reg))
649 +               goto out;
650 +
651 +       ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
652 +       if (ret_val)
653 +               goto out;
654 +
655 +       if (active) {
656 +               data |= IGP02E1000_PM_D0_LPLU;
657 +               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
658 +                                            data);
659 +               if (ret_val)
660 +                       goto out;
661 +
662 +               /* When LPLU is enabled, we should disable SmartSpeed */
663 +               ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
664 +                                           &data);
665 +               data &= ~IGP01E1000_PSCFR_SMART_SPEED;
666 +               ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
667 +                                            data);
668 +               if (ret_val)
669 +                       goto out;
670 +       } else {
671 +               data &= ~IGP02E1000_PM_D0_LPLU;
672 +               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
673 +                                            data);
674 +               /*
675 +                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
676 +                * during Dx states where the power conservation is most
677 +                * important.  During driver activity we should enable
678 +                * SmartSpeed, so performance is maintained.
679 +                */
680 +               if (phy->smart_speed == e1000_smart_speed_on) {
681 +                       ret_val = phy->ops.read_reg(hw,
682 +                                                   IGP01E1000_PHY_PORT_CONFIG,
683 +                                                   &data);
684 +                       if (ret_val)
685 +                               goto out;
686 +
687 +                       data |= IGP01E1000_PSCFR_SMART_SPEED;
688 +                       ret_val = phy->ops.write_reg(hw,
689 +                                                    IGP01E1000_PHY_PORT_CONFIG,
690 +                                                    data);
691 +                       if (ret_val)
692 +                               goto out;
693 +               } else if (phy->smart_speed == e1000_smart_speed_off) {
694 +                       ret_val = phy->ops.read_reg(hw,
695 +                                                   IGP01E1000_PHY_PORT_CONFIG,
696 +                                                   &data);
697 +                       if (ret_val)
698 +                               goto out;
699 +
700 +                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
701 +                       ret_val = phy->ops.write_reg(hw,
702 +                                                    IGP01E1000_PHY_PORT_CONFIG,
703 +                                                    data);
704 +                       if (ret_val)
705 +                               goto out;
706 +               }
707 +       }
708 +
709 +out:
710 +       return ret_val;
711 +}
712 +
713 +/**
714 + *  e1000_acquire_nvm_82575 - Request for access to EEPROM
715 + *  @hw: pointer to the HW structure
716 + *
717 + *  Acquire the necessary semaphores for exclusive access to the EEPROM.
718 + *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
719 + *  Return successful if access grant bit set, else clear the request for
720 + *  EEPROM access and return -E1000_ERR_NVM (-1).
721 + **/
722 +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
723 +{
724 +       s32 ret_val;
725 +
726 +       DEBUGFUNC("e1000_acquire_nvm_82575");
727 +
728 +       ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
729 +       if (ret_val)
730 +               goto out;
731 +
732 +       ret_val = e1000_acquire_nvm_generic(hw);
733 +
734 +       if (ret_val)
735 +               e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
736 +
737 +out:
738 +       return ret_val;
739 +}
740 +
741 +/**
742 + *  e1000_release_nvm_82575 - Release exclusive access to EEPROM
743 + *  @hw: pointer to the HW structure
744 + *
745 + *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
746 + *  then release the semaphores acquired.
747 + **/
748 +static void e1000_release_nvm_82575(struct e1000_hw *hw)
749 +{
750 +       DEBUGFUNC("e1000_release_nvm_82575");
751 +
752 +       e1000_release_nvm_generic(hw);
753 +       e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
754 +}
755 +
756 +/**
757 + *  e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
758 + *  @hw: pointer to the HW structure
759 + *  @mask: specifies which semaphore to acquire
760 + *
761 + *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
762 + *  will also specify which port we're acquiring the lock for.
763 + **/
764 +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
765 +{
766 +       u32 swfw_sync;
767 +       u32 swmask = mask;
768 +       u32 fwmask = mask << 16;
769 +       s32 ret_val = E1000_SUCCESS;
770 +       s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
771 +
772 +       DEBUGFUNC("e1000_acquire_swfw_sync_82575");
773 +
774 +       while (i < timeout) {
775 +               if (e1000_get_hw_semaphore_generic(hw)) {
776 +                       ret_val = -E1000_ERR_SWFW_SYNC;
777 +                       goto out;
778 +               }
779 +
780 +               swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
781 +               if (!(swfw_sync & (fwmask | swmask)))
782 +                       break;
783 +
784 +               /*
785 +                * Firmware currently using resource (fwmask)
786 +                * or other software thread using resource (swmask)
787 +                */
788 +               e1000_put_hw_semaphore_generic(hw);
789 +               msec_delay_irq(5);
790 +               i++;
791 +       }
792 +
793 +       if (i == timeout) {
794 +               DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
795 +               ret_val = -E1000_ERR_SWFW_SYNC;
796 +               goto out;
797 +       }
798 +
799 +       swfw_sync |= swmask;
800 +       E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
801 +
802 +       e1000_put_hw_semaphore_generic(hw);
803 +
804 +out:
805 +       return ret_val;
806 +}
807 +
808 +/**
809 + *  e1000_release_swfw_sync_82575 - Release SW/FW semaphore
810 + *  @hw: pointer to the HW structure
811 + *  @mask: specifies which semaphore to acquire
812 + *
813 + *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
814 + *  will also specify which port we're releasing the lock for.
815 + **/
816 +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
817 +{
818 +       u32 swfw_sync;
819 +
820 +       DEBUGFUNC("e1000_release_swfw_sync_82575");
821 +
822 +       while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS);
823 +       /* Empty */
824 +
825 +       swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
826 +       swfw_sync &= ~mask;
827 +       E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
828 +
829 +       e1000_put_hw_semaphore_generic(hw);
830 +}
831 +
832 +/**
833 + *  e1000_get_cfg_done_82575 - Read config done bit
834 + *  @hw: pointer to the HW structure
835 + *
836 + *  Read the management control register for the config done bit for
837 + *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
838 + *  to read the config done bit, so an error is *ONLY* logged and returns
839 + *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
840 + *  would not be able to be reset or change link.
841 + **/
842 +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
843 +{
844 +       s32 timeout = PHY_CFG_TIMEOUT;
845 +       s32 ret_val = E1000_SUCCESS;
846 +       u32 mask = E1000_NVM_CFG_DONE_PORT_0;
847 +
848 +       DEBUGFUNC("e1000_get_cfg_done_82575");
849 +
850 +       if (hw->bus.func == E1000_FUNC_1)
851 +               mask = E1000_NVM_CFG_DONE_PORT_1;
852 +       while (timeout) {
853 +               if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
854 +                       break;
855 +               msec_delay(1);
856 +               timeout--;
857 +       }
858 +       if (!timeout)
859 +               DEBUGOUT("MNG configuration cycle has not completed.\n");
860 +
861 +       /* If EEPROM is not marked present, init the PHY manually */
862 +       if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
863 +           (hw->phy.type == e1000_phy_igp_3))
864 +               e1000_phy_init_script_igp3(hw);
865 +
866 +       return ret_val;
867 +}
868 +
869 +/**
870 + *  e1000_get_link_up_info_82575 - Get link speed/duplex info
871 + *  @hw: pointer to the HW structure
872 + *  @speed: stores the current speed
873 + *  @duplex: stores the current duplex
874 + *
875 + *  This is a wrapper function, if using the serial gigabit media independent
876 + *  interface, use PCS to retrieve the link speed and duplex information.
877 + *  Otherwise, use the generic function to get the link speed and duplex info.
878 + **/
879 +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
880 +                                        u16 *duplex)
881 +{
882 +       s32 ret_val;
883 +
884 +       DEBUGFUNC("e1000_get_link_up_info_82575");
885 +
886 +       if (hw->phy.media_type != e1000_media_type_copper)
887 +               ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
888 +                                                              duplex);
889 +       else
890 +               ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
891 +                                                                   duplex);
892 +
893 +       return ret_val;
894 +}
895 +
896 +/**
897 + *  e1000_check_for_link_82575 - Check for link
898 + *  @hw: pointer to the HW structure
899 + *
900 + *  If sgmii is enabled, then use the pcs register to determine link, otherwise
901 + *  use the generic interface for determining link.
902 + **/
903 +static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
904 +{
905 +       s32 ret_val;
906 +       u16 speed, duplex;
907 +
908 +       DEBUGFUNC("e1000_check_for_link_82575");
909 +
910 +       if (hw->phy.media_type != e1000_media_type_copper) {
911 +               ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
912 +                                                              &duplex);
913 +               /*
914 +                * Use this flag to determine if link needs to be checked or
915 +                * not.  If we have link clear the flag so that we do not
916 +                * continue to check for link.
917 +                */
918 +               hw->mac.get_link_status = !hw->mac.serdes_has_link;
919 +       } else {
920 +               ret_val = e1000_check_for_copper_link_generic(hw);
921 +       }
922 +
923 +       return ret_val;
924 +}
925 +
926 +/**
927 + *  e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
928 + *  @hw: pointer to the HW structure
929 + *  @speed: stores the current speed
930 + *  @duplex: stores the current duplex
931 + *
932 + *  Using the physical coding sub-layer (PCS), retrieve the current speed and
933 + *  duplex, then store the values in the pointers provided.
934 + **/
935 +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
936 +                                                u16 *speed, u16 *duplex)
937 +{
938 +       struct e1000_mac_info *mac = &hw->mac;
939 +       u32 pcs;
940 +
941 +       DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
942 +
943 +       /* Set up defaults for the return values of this function */
944 +       mac->serdes_has_link = false;
945 +       *speed = 0;
946 +       *duplex = 0;
947 +
948 +       /*
949 +        * Read the PCS Status register for link state. For non-copper mode,
950 +        * the status register is not accurate. The PCS status register is
951 +        * used instead.
952 +        */
953 +       pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
954 +
955 +       /*
956 +        * The link up bit determines when link is up on autoneg. The sync ok
957 +        * gets set once both sides sync up and agree upon link. Stable link
958 +        * can be determined by checking for both link up and link sync ok
959 +        */
960 +       if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
961 +               mac->serdes_has_link = true;
962 +
963 +               /* Detect and store PCS speed */
964 +               if (pcs & E1000_PCS_LSTS_SPEED_1000) {
965 +                       *speed = SPEED_1000;
966 +               } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
967 +                       *speed = SPEED_100;
968 +               } else {
969 +                       *speed = SPEED_10;
970 +               }
971 +
972 +               /* Detect and store PCS duplex */
973 +               if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
974 +                       *duplex = FULL_DUPLEX;
975 +               } else {
976 +                       *duplex = HALF_DUPLEX;
977 +               }
978 +       }
979 +
980 +       return E1000_SUCCESS;
981 +}
982 +
983 +/**
984 + *  e1000_shutdown_serdes_link_82575 - Remove link during power down
985 + *  @hw: pointer to the HW structure
986 + *
987 + *  In the case of serdes shut down sfp and PCS on driver unload
988 + *  when management pass thru is not enabled.
989 + **/
990 +void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
991 +{
992 +       u32 reg;
993 +       u16 eeprom_data = 0;
994 +
995 +       if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
996 +           !e1000_sgmii_active_82575(hw))
997 +               return;
998 +
999 +       if (hw->bus.func == E1000_FUNC_0)
1000 +               hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1001 +       else if (hw->bus.func == E1000_FUNC_1)
1002 +               hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1003 +
1004 +       /*
1005 +        * If APM is not enabled in the EEPROM and management interface is
1006 +        * not enabled, then power down.
1007 +        */
1008 +       if (!(eeprom_data & E1000_NVM_APME_82575) &&
1009 +           !e1000_enable_mng_pass_thru(hw)) {
1010 +               /* Disable PCS to turn off link */
1011 +               reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1012 +               reg &= ~E1000_PCS_CFG_PCS_EN;
1013 +               E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1014 +
1015 +               /* shutdown the laser */
1016 +               reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1017 +               reg |= E1000_CTRL_EXT_SDP3_DATA;
1018 +               E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1019 +
1020 +               /* flush the write to verify completion */
1021 +               E1000_WRITE_FLUSH(hw);
1022 +               msec_delay(1);
1023 +       }
1024 +
1025 +       return;
1026 +}
1027 +
1028 +/**
1029 + *  e1000_reset_hw_82575 - Reset hardware
1030 + *  @hw: pointer to the HW structure
1031 + *
1032 + *  This resets the hardware into a known state.
1033 + **/
1034 +static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
1035 +{
1036 +       u32 ctrl, icr;
1037 +       s32 ret_val;
1038 +
1039 +       DEBUGFUNC("e1000_reset_hw_82575");
1040 +
1041 +       /*
1042 +        * Prevent the PCI-E bus from sticking if there is no TLP connection
1043 +        * on the last TLP read/write transaction when MAC is reset.
1044 +        */
1045 +       ret_val = e1000_disable_pcie_master_generic(hw);
1046 +       if (ret_val) {
1047 +               DEBUGOUT("PCI-E Master disable polling has failed.\n");
1048 +       }
1049 +
1050 +       /* set the completion timeout for interface */
1051 +       ret_val = e1000_set_pcie_completion_timeout(hw);
1052 +       if (ret_val) {
1053 +               DEBUGOUT("PCI-E Set completion timeout has failed.\n");
1054 +       }
1055 +
1056 +       DEBUGOUT("Masking off all interrupts\n");
1057 +       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1058 +
1059 +       E1000_WRITE_REG(hw, E1000_RCTL, 0);
1060 +       E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1061 +       E1000_WRITE_FLUSH(hw);
1062 +
1063 +       msec_delay(10);
1064 +
1065 +       ctrl = E1000_READ_REG(hw, E1000_CTRL);
1066 +
1067 +       DEBUGOUT("Issuing a global reset to MAC\n");
1068 +       E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
1069 +
1070 +       ret_val = e1000_get_auto_rd_done_generic(hw);
1071 +       if (ret_val) {
1072 +               /*
1073 +                * When auto config read does not complete, do not
1074 +                * return with an error. This can happen in situations
1075 +                * where there is no eeprom and prevents getting link.
1076 +                */
1077 +               DEBUGOUT("Auto Read Done did not complete\n");
1078 +       }
1079 +
1080 +       /* If EEPROM is not present, run manual init scripts */
1081 +       if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1082 +               e1000_reset_init_script_82575(hw);
1083 +
1084 +       /* Clear any pending interrupt events. */
1085 +       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1086 +       icr = E1000_READ_REG(hw, E1000_ICR);
1087 +
1088 +       /* Install any alternate MAC address into RAR0 */
1089 +       ret_val = e1000_check_alt_mac_addr_generic(hw);
1090 +
1091 +       return ret_val;
1092 +}
1093 +
1094 +/**
1095 + *  e1000_init_hw_82575 - Initialize hardware
1096 + *  @hw: pointer to the HW structure
1097 + *
1098 + *  This inits the hardware readying it for operation.
1099 + **/
1100 +static s32 e1000_init_hw_82575(struct e1000_hw *hw)
1101 +{
1102 +       struct e1000_mac_info *mac = &hw->mac;
1103 +       s32 ret_val;
1104 +       u16 i, rar_count = mac->rar_entry_count;
1105 +
1106 +       DEBUGFUNC("e1000_init_hw_82575");
1107 +
1108 +       /* Initialize identification LED */
1109 +       ret_val = mac->ops.id_led_init(hw);
1110 +       if (ret_val) {
1111 +               DEBUGOUT("Error initializing identification LED\n");
1112 +               /* This is not fatal and we should not stop init due to this */
1113 +       }
1114 +
1115 +       /* Disabling VLAN filtering */
1116 +       DEBUGOUT("Initializing the IEEE VLAN\n");
1117 +       mac->ops.clear_vfta(hw);
1118 +
1119 +       /* Setup the receive address */
1120 +       e1000_init_rx_addrs_generic(hw, rar_count);
1121 +
1122 +       /* Zero out the Multicast HASH table */
1123 +       DEBUGOUT("Zeroing the MTA\n");
1124 +       for (i = 0; i < mac->mta_reg_count; i++)
1125 +               E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
1126 +
1127 +       /* Zero out the Unicast HASH table */
1128 +       DEBUGOUT("Zeroing the UTA\n");
1129 +       for (i = 0; i < mac->uta_reg_count; i++)
1130 +               E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
1131 +
1132 +       /* Setup link and flow control */
1133 +       ret_val = mac->ops.setup_link(hw);
1134 +
1135 +       /*
1136 +        * Clear all of the statistics registers (clear on read).  It is
1137 +        * important that we do this after we have tried to establish link
1138 +        * because the symbol error count will increment wildly if there
1139 +        * is no link.
1140 +        */
1141 +       e1000_clear_hw_cntrs_82575(hw);
1142 +
1143 +       return ret_val;
1144 +}
1145 +
1146 +/**
1147 + *  e1000_setup_copper_link_82575 - Configure copper link settings
1148 + *  @hw: pointer to the HW structure
1149 + *
1150 + *  Configures the link for auto-neg or forced speed and duplex.  Then we check
1151 + *  for link, once link is established calls to configure collision distance
1152 + *  and flow control are called.
1153 + **/
1154 +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
1155 +{
1156 +       u32 ctrl;
1157 +       s32  ret_val;
1158 +
1159 +       DEBUGFUNC("e1000_setup_copper_link_82575");
1160 +
1161 +       ctrl = E1000_READ_REG(hw, E1000_CTRL);
1162 +       ctrl |= E1000_CTRL_SLU;
1163 +       ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1164 +       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1165 +
1166 +       ret_val = e1000_setup_serdes_link_82575(hw);
1167 +       if (ret_val)
1168 +               goto out;
1169 +
1170 +       if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1171 +               ret_val = hw->phy.ops.reset(hw);
1172 +               if (ret_val) {
1173 +                       DEBUGOUT("Error resetting the PHY.\n");
1174 +                       goto out;
1175 +               }
1176 +       }
1177 +       switch (hw->phy.type) {
1178 +       case e1000_phy_m88:
1179 +               ret_val = e1000_copper_link_setup_m88(hw);
1180 +               break;
1181 +       case e1000_phy_igp_3:
1182 +               ret_val = e1000_copper_link_setup_igp(hw);
1183 +               break;
1184 +       default:
1185 +               ret_val = -E1000_ERR_PHY;
1186 +               break;
1187 +       }
1188 +
1189 +       if (ret_val)
1190 +               goto out;
1191 +
1192 +       ret_val = e1000_setup_copper_link_generic(hw);
1193 +out:
1194 +       return ret_val;
1195 +}
1196 +
1197 +/**
1198 + *  e1000_setup_serdes_link_82575 - Setup link for serdes
1199 + *  @hw: pointer to the HW structure
1200 + *
1201 + *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
1202 + *  used on copper connections where the serialized gigabit media independent
1203 + *  interface (sgmii), or serdes fiber is being used.  Configures the link
1204 + *  for auto-negotiation or forces speed/duplex.
1205 + **/
1206 +static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
1207 +{
1208 +       u32 ctrl_reg, reg;
1209 +
1210 +       DEBUGFUNC("e1000_setup_serdes_link_82575");
1211 +
1212 +       if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1213 +           !e1000_sgmii_active_82575(hw))
1214 +               return E1000_SUCCESS;
1215 +
1216 +       /*
1217 +        * On the 82575, SerDes loopback mode persists until it is
1218 +        * explicitly turned off or a power cycle is performed.  A read to
1219 +        * the register does not indicate its status.  Therefore, we ensure
1220 +        * loopback mode is disabled during initialization.
1221 +        */
1222 +       E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1223 +
1224 +       /* power on the sfp cage if present */
1225 +       reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1226 +       reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1227 +       E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1228 +
1229 +       ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1230 +       ctrl_reg |= E1000_CTRL_SLU;
1231 +
1232 +       if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1233 +               /* set both sw defined pins */
1234 +               ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1235 +
1236 +               /* Set switch control to serdes energy detect */
1237 +               reg = E1000_READ_REG(hw, E1000_CONNSW);
1238 +               reg |= E1000_CONNSW_ENRGSRC;
1239 +               E1000_WRITE_REG(hw, E1000_CONNSW, reg);
1240 +       }
1241 +
1242 +       reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1243 +
1244 +       if (e1000_sgmii_active_82575(hw)) {
1245 +               /* allow time for SFP cage to power up phy */
1246 +               msec_delay(300);
1247 +
1248 +               /* AN time out should be disabled for SGMII mode */
1249 +               reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1250 +       } else {
1251 +               ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1252 +                           E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1253 +       }
1254 +
1255 +       E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1256 +
1257 +       /*
1258 +        * New SerDes mode allows for forcing speed or autonegotiating speed
1259 +        * at 1gb. Autoneg should be default set by most drivers. This is the
1260 +        * mode that will be compatible with older link partners and switches.
1261 +        * However, both are supported by the hardware and some drivers/tools.
1262 +        */
1263 +
1264 +       reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1265 +                E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1266 +
1267 +       /*
1268 +        * We force flow control to prevent the CTRL register values from being
1269 +        * overwritten by the autonegotiated flow control values
1270 +        */
1271 +       reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1272 +
1273 +       /*
1274 +        * we always set sgmii to autoneg since it is the phy that will be
1275 +        * forcing the link and the serdes is just a go-between
1276 +        */
1277 +       if (hw->mac.autoneg || e1000_sgmii_active_82575(hw)) {
1278 +               /* Set PCS register for autoneg */
1279 +               reg |= E1000_PCS_LCTL_FSV_1000 |  /* Force 1000 */
1280 +                      E1000_PCS_LCTL_FDV_FULL |  /* SerDes Full dplx */
1281 +                      E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1282 +                      E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1283 +               DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1284 +       } else {
1285 +               /* Check for duplex first */
1286 +               if (hw->mac.forced_speed_duplex & E1000_ALL_FULL_DUPLEX)
1287 +                       reg |= E1000_PCS_LCTL_FDV_FULL;
1288 +
1289 +               /* No need to check for 1000/full since the spec states that
1290 +                * it requires autoneg to be enabled */
1291 +               /* Now set speed */
1292 +               if (hw->mac.forced_speed_duplex & E1000_ALL_100_SPEED)
1293 +                       reg |= E1000_PCS_LCTL_FSV_100;
1294 +
1295 +               /* Force speed and force link */
1296 +               reg |= E1000_PCS_LCTL_FSD |
1297 +                      E1000_PCS_LCTL_FORCE_LINK |
1298 +                      E1000_PCS_LCTL_FLV_LINK_UP;
1299 +
1300 +               DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1301 +       }
1302 +
1303 +       E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1304 +
1305 +       if (!e1000_sgmii_active_82575(hw))
1306 +               e1000_force_mac_fc_generic(hw);
1307 +
1308 +       return E1000_SUCCESS;
1309 +}
1310 +
1311 +/**
1312 + *  e1000_valid_led_default_82575 - Verify a valid default LED config
1313 + *  @hw: pointer to the HW structure
1314 + *  @data: pointer to the NVM (EEPROM)
1315 + *
1316 + *  Read the EEPROM for the current default LED configuration.  If the
1317 + *  LED configuration is not valid, set to a valid LED configuration.
1318 + **/
1319 +static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
1320 +{
1321 +       s32 ret_val;
1322 +
1323 +       DEBUGFUNC("e1000_valid_led_default_82575");
1324 +
1325 +       ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1326 +       if (ret_val) {
1327 +               DEBUGOUT("NVM Read Error\n");
1328 +               goto out;
1329 +       }
1330 +
1331 +       if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1332 +               switch(hw->phy.media_type) {
1333 +               case e1000_media_type_internal_serdes:
1334 +                       *data = ID_LED_DEFAULT_82575_SERDES;
1335 +                       break;
1336 +               case e1000_media_type_copper:
1337 +               default:
1338 +                       *data = ID_LED_DEFAULT;
1339 +                       break;
1340 +               }
1341 +       }
1342 +out:
1343 +       return ret_val;
1344 +}
1345 +
1346 +/**
1347 + *  e1000_sgmii_active_82575 - Return sgmii state
1348 + *  @hw: pointer to the HW structure
1349 + *
1350 + *  82575 silicon has a serialized gigabit media independent interface (sgmii)
1351 + *  which can be enabled for use in the embedded applications.  Simply
1352 + *  return the current state of the sgmii interface.
1353 + **/
1354 +static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
1355 +{
1356 +       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1357 +       return dev_spec->sgmii_active;
1358 +}
1359 +
1360 +/**
1361 + *  e1000_reset_init_script_82575 - Inits HW defaults after reset
1362 + *  @hw: pointer to the HW structure
1363 + *
1364 + *  Inits recommended HW defaults after a reset when there is no EEPROM
1365 + *  detected. This is only for the 82575.
1366 + **/
1367 +static s32 e1000_reset_init_script_82575(struct e1000_hw* hw)
1368 +{
1369 +       DEBUGFUNC("e1000_reset_init_script_82575");
1370 +
1371 +       if (hw->mac.type == e1000_82575) {
1372 +               DEBUGOUT("Running reset init script for 82575\n");
1373 +               /* SerDes configuration via SERDESCTRL */
1374 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
1375 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
1376 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
1377 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
1378 +
1379 +               /* CCM configuration via CCMCTL register */
1380 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
1381 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
1382 +
1383 +               /* PCIe lanes configuration */
1384 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
1385 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
1386 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
1387 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
1388 +
1389 +               /* PCIe PLL Configuration */
1390 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
1391 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
1392 +               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
1393 +       }
1394 +
1395 +       return E1000_SUCCESS;
1396 +}
1397 +
1398 +/**
1399 + *  e1000_read_mac_addr_82575 - Read device MAC address
1400 + *  @hw: pointer to the HW structure
1401 + **/
1402 +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
1403 +{
1404 +       s32 ret_val = E1000_SUCCESS;
1405 +
1406 +       DEBUGFUNC("e1000_read_mac_addr_82575");
1407 +
1408 +       /*
1409 +        * If there's an alternate MAC address place it in RAR0
1410 +        * so that it will override the Si installed default perm
1411 +        * address.
1412 +        */
1413 +       ret_val = e1000_check_alt_mac_addr_generic(hw);
1414 +       if (ret_val)
1415 +               goto out;
1416 +
1417 +       ret_val = e1000_read_mac_addr_generic(hw);
1418 +
1419 +out:
1420 +       return ret_val;
1421 +}
1422 +
1423 +/**
1424 + * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
1425 + * @hw: pointer to the HW structure
1426 + *
1427 + * In the case of a PHY power down to save power, or to turn off link during a
1428 + * driver unload, or wake on lan is not enabled, remove the link.
1429 + **/
1430 +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
1431 +{
1432 +       struct e1000_phy_info *phy = &hw->phy;
1433 +       struct e1000_mac_info *mac = &hw->mac;
1434 +
1435 +       if (!(phy->ops.check_reset_block))
1436 +               return;
1437 +
1438 +       /* If the management interface is not enabled, then power down */
1439 +       if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
1440 +               e1000_power_down_phy_copper(hw);
1441 +
1442 +       return;
1443 +}
1444 +
1445 +/**
1446 + *  e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
1447 + *  @hw: pointer to the HW structure
1448 + *
1449 + *  Clears the hardware counters by reading the counter registers.
1450 + **/
1451 +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
1452 +{
1453 +       DEBUGFUNC("e1000_clear_hw_cntrs_82575");
1454 +
1455 +       e1000_clear_hw_cntrs_base_generic(hw);
1456 +
1457 +       E1000_READ_REG(hw, E1000_PRC64);
1458 +       E1000_READ_REG(hw, E1000_PRC127);
1459 +       E1000_READ_REG(hw, E1000_PRC255);
1460 +       E1000_READ_REG(hw, E1000_PRC511);
1461 +       E1000_READ_REG(hw, E1000_PRC1023);
1462 +       E1000_READ_REG(hw, E1000_PRC1522);
1463 +       E1000_READ_REG(hw, E1000_PTC64);
1464 +       E1000_READ_REG(hw, E1000_PTC127);
1465 +       E1000_READ_REG(hw, E1000_PTC255);
1466 +       E1000_READ_REG(hw, E1000_PTC511);
1467 +       E1000_READ_REG(hw, E1000_PTC1023);
1468 +       E1000_READ_REG(hw, E1000_PTC1522);
1469 +
1470 +       E1000_READ_REG(hw, E1000_ALGNERRC);
1471 +       E1000_READ_REG(hw, E1000_RXERRC);
1472 +       E1000_READ_REG(hw, E1000_TNCRS);
1473 +       E1000_READ_REG(hw, E1000_CEXTERR);
1474 +       E1000_READ_REG(hw, E1000_TSCTC);
1475 +       E1000_READ_REG(hw, E1000_TSCTFC);
1476 +
1477 +       E1000_READ_REG(hw, E1000_MGTPRC);
1478 +       E1000_READ_REG(hw, E1000_MGTPDC);
1479 +       E1000_READ_REG(hw, E1000_MGTPTC);
1480 +
1481 +       E1000_READ_REG(hw, E1000_IAC);
1482 +       E1000_READ_REG(hw, E1000_ICRXOC);
1483 +
1484 +       E1000_READ_REG(hw, E1000_ICRXPTC);
1485 +       E1000_READ_REG(hw, E1000_ICRXATC);
1486 +       E1000_READ_REG(hw, E1000_ICTXPTC);
1487 +       E1000_READ_REG(hw, E1000_ICTXATC);
1488 +       E1000_READ_REG(hw, E1000_ICTXQEC);
1489 +       E1000_READ_REG(hw, E1000_ICTXQMTC);
1490 +       E1000_READ_REG(hw, E1000_ICRXDMTC);
1491 +
1492 +       E1000_READ_REG(hw, E1000_CBTMPC);
1493 +       E1000_READ_REG(hw, E1000_HTDPMC);
1494 +       E1000_READ_REG(hw, E1000_CBRMPC);
1495 +       E1000_READ_REG(hw, E1000_RPTHC);
1496 +       E1000_READ_REG(hw, E1000_HGPTC);
1497 +       E1000_READ_REG(hw, E1000_HTCBDPC);
1498 +       E1000_READ_REG(hw, E1000_HGORCL);
1499 +       E1000_READ_REG(hw, E1000_HGORCH);
1500 +       E1000_READ_REG(hw, E1000_HGOTCL);
1501 +       E1000_READ_REG(hw, E1000_HGOTCH);
1502 +       E1000_READ_REG(hw, E1000_LENERRS);
1503 +
1504 +       /* This register should not be read in copper configurations */
1505 +       if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
1506 +           e1000_sgmii_active_82575(hw))
1507 +               E1000_READ_REG(hw, E1000_SCVPC);
1508 +}
1509 +
1510 +/**
1511 + *  e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1512 + *  @hw: pointer to the HW structure
1513 + *
1514 + *  After rx enable if managability is enabled then there is likely some
1515 + *  bad data at the start of the fifo and possibly in the DMA fifo.  This
1516 + *  function clears the fifos and flushes any packets that came in as rx was
1517 + *  being enabled.
1518 + **/
1519 +void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
1520 +{
1521 +       u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1522 +       int i, ms_wait;
1523 +
1524 +       DEBUGFUNC("e1000_rx_fifo_workaround_82575");
1525 +       if (hw->mac.type != e1000_82575 ||
1526 +           !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1527 +               return;
1528 +
1529 +       /* Disable all RX queues */
1530 +       for (i = 0; i < 4; i++) {
1531 +               rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
1532 +               E1000_WRITE_REG(hw, E1000_RXDCTL(i),
1533 +                               rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1534 +       }
1535 +       /* Poll all queues to verify they have shut down */
1536 +       for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1537 +               msec_delay(1);
1538 +               rx_enabled = 0;
1539 +               for (i = 0; i < 4; i++)
1540 +                       rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
1541 +               if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1542 +                       break;
1543 +       }
1544 +
1545 +       if (ms_wait == 10)
1546 +               DEBUGOUT("Queue disable timed out after 10ms\n");
1547 +
1548 +       /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1549 +        * incoming packets are rejected.  Set enable and wait 2ms so that
1550 +        * any packet that was coming in as RCTL.EN was set is flushed
1551 +        */
1552 +       rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1553 +       E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1554 +
1555 +       rlpml = E1000_READ_REG(hw, E1000_RLPML);
1556 +       E1000_WRITE_REG(hw, E1000_RLPML, 0);
1557 +
1558 +       rctl = E1000_READ_REG(hw, E1000_RCTL);
1559 +       temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1560 +       temp_rctl |= E1000_RCTL_LPE;
1561 +
1562 +       E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
1563 +       E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1564 +       E1000_WRITE_FLUSH(hw);
1565 +       msec_delay(2);
1566 +
1567 +       /* Enable RX queues that were previously enabled and restore our
1568 +        * previous state
1569 +        */
1570 +       for (i = 0; i < 4; i++)
1571 +               E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
1572 +       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1573 +       E1000_WRITE_FLUSH(hw);
1574 +
1575 +       E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
1576 +       E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1577 +
1578 +       /* Flush receive errors generated by workaround */
1579 +       E1000_READ_REG(hw, E1000_ROC);
1580 +       E1000_READ_REG(hw, E1000_RNBC);
1581 +       E1000_READ_REG(hw, E1000_MPC);
1582 +}
1583 +
1584 +/**
1585 + *  e1000_set_pcie_completion_timeout - set pci-e completion timeout
1586 + *  @hw: pointer to the HW structure
1587 + *
1588 + *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1589 + *  however the hardware default for these parts is 500us to 1ms which is less
1590 + *  than the 10ms recommended by the pci-e spec.  To address this we need to
1591 + *  increase the value to either 10ms to 200ms for capability version 1 config,
1592 + *  or 16ms to 55ms for version 2.
1593 + **/
1594 +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
1595 +{
1596 +       u32 gcr = E1000_READ_REG(hw, E1000_GCR);
1597 +       s32 ret_val = E1000_SUCCESS;
1598 +       u16 pcie_devctl2;
1599 +
1600 +       /* only take action if timeout value is defaulted to 0 */
1601 +       if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1602 +               goto out;
1603 +
1604 +       /*
1605 +        * if capababilities version is type 1 we can write the
1606 +        * timeout of 10ms to 200ms through the GCR register
1607 +        */
1608 +       if (!(gcr & E1000_GCR_CAP_VER2)) {
1609 +               gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1610 +               goto out;
1611 +       }
1612 +
1613 +       /*
1614 +        * for version 2 capabilities we need to write the config space
1615 +        * directly in order to set the completion timeout value for
1616 +        * 16ms to 55ms
1617 +        */
1618 +       ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1619 +                                         &pcie_devctl2);
1620 +       if (ret_val)
1621 +               goto out;
1622 +
1623 +       pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1624 +
1625 +       ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1626 +                                          &pcie_devctl2);
1627 +out:
1628 +       /* disable completion timeout resend */
1629 +       gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1630 +
1631 +       E1000_WRITE_REG(hw, E1000_GCR, gcr);
1632 +       return ret_val;
1633 +}
1634 +
1635 +/**
1636 + *  e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
1637 + *  @hw: pointer to the hardware struct
1638 + *  @enable: state to enter, either enabled or disabled
1639 + *
1640 + *  enables/disables L2 switch loopback functionality.
1641 + **/
1642 +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1643 +{
1644 +       u32 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
1645 +
1646 +       if (enable)
1647 +               dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1648 +       else
1649 +               dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1650 +
1651 +       E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
1652 +}
1653 +
1654 +/**
1655 + *  e1000_vmdq_set_replication_pf - enable or disable vmdq replication
1656 + *  @hw: pointer to the hardware struct
1657 + *  @enable: state to enter, either enabled or disabled
1658 + *
1659 + *  enables/disables replication of packets across multiple pools.
1660 + **/
1661 +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1662 +{
1663 +       u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1664 +
1665 +       if (enable)
1666 +               vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1667 +       else
1668 +               vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1669 +
1670 +       E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1671 +}
1672 +
1673 Index: linux-2.6.22/drivers/net/igb/e1000_82575.h
1674 ===================================================================
1675 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
1676 +++ linux-2.6.22/drivers/net/igb/e1000_82575.h  2009-12-18 12:39:22.000000000 -0500
1677 @@ -0,0 +1,439 @@
1678 +/*******************************************************************************
1679 +
1680 +  Intel(R) Gigabit Ethernet Linux driver
1681 +  Copyright(c) 2007-2009 Intel Corporation.
1682 +
1683 +  This program is free software; you can redistribute it and/or modify it
1684 +  under the terms and conditions of the GNU General Public License,
1685 +  version 2, as published by the Free Software Foundation.
1686 +
1687 +  This program is distributed in the hope it will be useful, but WITHOUT
1688 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1689 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
1690 +  more details.
1691 +
1692 +  You should have received a copy of the GNU General Public License along with
1693 +  this program; if not, write to the Free Software Foundation, Inc.,
1694 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1695 +
1696 +  The full GNU General Public License is included in this distribution in
1697 +  the file called "COPYING".
1698 +
1699 +  Contact Information:
1700 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1701 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
1702 +
1703 +*******************************************************************************/
1704 +
1705 +#ifndef _E1000_82575_H_
1706 +#define _E1000_82575_H_
1707 +
1708 +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
1709 +                                     (ID_LED_DEF1_DEF2 <<  8) | \
1710 +                                     (ID_LED_DEF1_DEF2 <<  4) | \
1711 +                                     (ID_LED_OFF1_ON2))
1712 +/*
1713 + * Receive Address Register Count
1714 + * Number of high/low register pairs in the RAR.  The RAR (Receive Address
1715 + * Registers) holds the directed and multicast addresses that we monitor.
1716 + * These entries are also used for MAC-based filtering.
1717 + */
1718 +/*
1719 + * For 82576, there are an additional set of RARs that begin at an offset
1720 + * separate from the first set of RARs.
1721 + */
1722 +#define E1000_RAR_ENTRIES_82575   16
1723 +#define E1000_RAR_ENTRIES_82576   24
1724 +
1725 +struct e1000_adv_data_desc {
1726 +       __le64 buffer_addr;    /* Address of the descriptor's data buffer */
1727 +       union {
1728 +               u32 data;
1729 +               struct {
1730 +                       u32 datalen :16; /* Data buffer length */
1731 +                       u32 rsvd    :4;
1732 +                       u32 dtyp    :4;  /* Descriptor type */
1733 +                       u32 dcmd    :8;  /* Descriptor command */
1734 +               } config;
1735 +       } lower;
1736 +       union {
1737 +               u32 data;
1738 +               struct {
1739 +                       u32 status  :4;  /* Descriptor status */
1740 +                       u32 idx     :4;
1741 +                       u32 popts   :6;  /* Packet Options */
1742 +                       u32 paylen  :18; /* Payload length */
1743 +               } options;
1744 +       } upper;
1745 +};
1746 +
1747 +#define E1000_TXD_DTYP_ADV_C    0x2  /* Advanced Context Descriptor */
1748 +#define E1000_TXD_DTYP_ADV_D    0x3  /* Advanced Data Descriptor */
1749 +#define E1000_ADV_TXD_CMD_DEXT  0x20 /* Descriptor extension (0 = legacy) */
1750 +#define E1000_ADV_TUCMD_IPV4    0x2  /* IP Packet Type: 1=IPv4 */
1751 +#define E1000_ADV_TUCMD_IPV6    0x0  /* IP Packet Type: 0=IPv6 */
1752 +#define E1000_ADV_TUCMD_L4T_UDP 0x0  /* L4 Packet TYPE of UDP */
1753 +#define E1000_ADV_TUCMD_L4T_TCP 0x4  /* L4 Packet TYPE of TCP */
1754 +#define E1000_ADV_TUCMD_MKRREQ  0x10 /* Indicates markers are required */
1755 +#define E1000_ADV_DCMD_EOP      0x1  /* End of Packet */
1756 +#define E1000_ADV_DCMD_IFCS     0x2  /* Insert FCS (Ethernet CRC) */
1757 +#define E1000_ADV_DCMD_RS       0x8  /* Report Status */
1758 +#define E1000_ADV_DCMD_VLE      0x40 /* Add VLAN tag */
1759 +#define E1000_ADV_DCMD_TSE      0x80 /* TCP Seg enable */
1760 +/* Extended Device Control */
1761 +#define E1000_CTRL_EXT_NSICR    0x00000001 /* Disable Intr Clear all on read */
1762 +
1763 +struct e1000_adv_context_desc {
1764 +       union {
1765 +               u32 ip_config;
1766 +               struct {
1767 +                       u32 iplen    :9;
1768 +                       u32 maclen   :7;
1769 +                       u32 vlan_tag :16;
1770 +               } fields;
1771 +       } ip_setup;
1772 +       u32 seq_num;
1773 +       union {
1774 +               u64 l4_config;
1775 +               struct {
1776 +                       u32 mkrloc :9;
1777 +                       u32 tucmd  :11;
1778 +                       u32 dtyp   :4;
1779 +                       u32 adv    :8;
1780 +                       u32 rsvd   :4;
1781 +                       u32 idx    :4;
1782 +                       u32 l4len  :8;
1783 +                       u32 mss    :16;
1784 +               } fields;
1785 +       } l4_setup;
1786 +};
1787 +
1788 +/* SRRCTL bit definitions */
1789 +#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
1790 +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK                  0x00000F00
1791 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
1792 +#define E1000_SRRCTL_DESCTYPE_LEGACY                    0x00000000
1793 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
1794 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT                 0x04000000
1795 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
1796 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION           0x06000000
1797 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
1798 +#define E1000_SRRCTL_DESCTYPE_MASK                      0x0E000000
1799 +#define E1000_SRRCTL_DROP_EN                            0x80000000
1800 +
1801 +#define E1000_SRRCTL_BSIZEPKT_MASK      0x0000007F
1802 +#define E1000_SRRCTL_BSIZEHDR_MASK      0x00003F00
1803 +
1804 +#define E1000_TX_HEAD_WB_ENABLE   0x1
1805 +#define E1000_TX_SEQNUM_WB_ENABLE 0x2
1806 +
1807 +#define E1000_MRQC_ENABLE_RSS_4Q            0x00000002
1808 +#define E1000_MRQC_ENABLE_VMDQ              0x00000003
1809 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q       0x00000005
1810 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP       0x00400000
1811 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP       0x00800000
1812 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
1813 +
1814 +#define E1000_VMRCTL_MIRROR_PORT_SHIFT      8
1815 +#define E1000_VMRCTL_MIRROR_DSTPORT_MASK    (7 << E1000_VMRCTL_MIRROR_PORT_SHIFT)
1816 +#define E1000_VMRCTL_POOL_MIRROR_ENABLE     (1 << 0)
1817 +#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE   (1 << 1)
1818 +#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
1819 +
1820 +#define E1000_EICR_TX_QUEUE ( \
1821 +    E1000_EICR_TX_QUEUE0 |    \
1822 +    E1000_EICR_TX_QUEUE1 |    \
1823 +    E1000_EICR_TX_QUEUE2 |    \
1824 +    E1000_EICR_TX_QUEUE3)
1825 +
1826 +#define E1000_EICR_RX_QUEUE ( \
1827 +    E1000_EICR_RX_QUEUE0 |    \
1828 +    E1000_EICR_RX_QUEUE1 |    \
1829 +    E1000_EICR_RX_QUEUE2 |    \
1830 +    E1000_EICR_RX_QUEUE3)
1831 +
1832 +#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
1833 +#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
1834 +
1835 +#define EIMS_ENABLE_MASK ( \
1836 +    E1000_EIMS_RX_QUEUE  | \
1837 +    E1000_EIMS_TX_QUEUE  | \
1838 +    E1000_EIMS_TCP_TIMER | \
1839 +    E1000_EIMS_OTHER)
1840 +
1841 +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
1842 +#define E1000_IMIR_PORT_IM_EN     0x00010000  /* TCP port enable */
1843 +#define E1000_IMIR_PORT_BP        0x00020000  /* TCP port check bypass */
1844 +#define E1000_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
1845 +#define E1000_IMIREXT_CTRL_URG    0x00002000  /* Check URG bit in header */
1846 +#define E1000_IMIREXT_CTRL_ACK    0x00004000  /* Check ACK bit in header */
1847 +#define E1000_IMIREXT_CTRL_PSH    0x00008000  /* Check PSH bit in header */
1848 +#define E1000_IMIREXT_CTRL_RST    0x00010000  /* Check RST bit in header */
1849 +#define E1000_IMIREXT_CTRL_SYN    0x00020000  /* Check SYN bit in header */
1850 +#define E1000_IMIREXT_CTRL_FIN    0x00040000  /* Check FIN bit in header */
1851 +#define E1000_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of ctrl bits */
1852 +
1853 +/* Receive Descriptor - Advanced */
1854 +union e1000_adv_rx_desc {
1855 +       struct {
1856 +               __le64 pkt_addr;             /* Packet buffer address */
1857 +               __le64 hdr_addr;             /* Header buffer address */
1858 +       } read;
1859 +       struct {
1860 +               struct {
1861 +                       union {
1862 +                               __le32 data;
1863 +                               struct {
1864 +                                       __le16 pkt_info; /*RSS type, Pkt type*/
1865 +                                       __le16 hdr_info; /* Split Header,
1866 +                                                         * header buffer len*/
1867 +                               } hs_rss;
1868 +                       } lo_dword;
1869 +                       union {
1870 +                               __le32 rss;          /* RSS Hash */
1871 +                               struct {
1872 +                                       __le16 ip_id;    /* IP id */
1873 +                                       __le16 csum;     /* Packet Checksum */
1874 +                               } csum_ip;
1875 +                       } hi_dword;
1876 +               } lower;
1877 +               struct {
1878 +                       __le32 status_error;     /* ext status/error */
1879 +                       __le16 length;           /* Packet length */
1880 +                       __le16 vlan;             /* VLAN tag */
1881 +               } upper;
1882 +       } wb;  /* writeback */
1883 +};
1884 +
1885 +#define E1000_RXDADV_RSSTYPE_MASK        0x0000000F
1886 +#define E1000_RXDADV_RSSTYPE_SHIFT       12
1887 +#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
1888 +#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
1889 +#define E1000_RXDADV_SPLITHEADER_EN      0x00001000
1890 +#define E1000_RXDADV_SPH                 0x8000
1891 +#define E1000_RXDADV_STAT_TS             0x10000 /* Pkt was time stamped */
1892 +#define E1000_RXDADV_ERR_HBO             0x00800000
1893 +
1894 +/* RSS Hash results */
1895 +#define E1000_RXDADV_RSSTYPE_NONE        0x00000000
1896 +#define E1000_RXDADV_RSSTYPE_IPV4_TCP    0x00000001
1897 +#define E1000_RXDADV_RSSTYPE_IPV4        0x00000002
1898 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP    0x00000003
1899 +#define E1000_RXDADV_RSSTYPE_IPV6_EX     0x00000004
1900 +#define E1000_RXDADV_RSSTYPE_IPV6        0x00000005
1901 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
1902 +#define E1000_RXDADV_RSSTYPE_IPV4_UDP    0x00000007
1903 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP    0x00000008
1904 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
1905 +
1906 +/* RSS Packet Types as indicated in the receive descriptor */
1907 +#define E1000_RXDADV_PKTTYPE_NONE        0x00000000
1908 +#define E1000_RXDADV_PKTTYPE_IPV4        0x00000010 /* IPV4 hdr present */
1909 +#define E1000_RXDADV_PKTTYPE_IPV4_EX     0x00000020 /* IPV4 hdr + extensions */
1910 +#define E1000_RXDADV_PKTTYPE_IPV6        0x00000040 /* IPV6 hdr present */
1911 +#define E1000_RXDADV_PKTTYPE_IPV6_EX     0x00000080 /* IPV6 hdr + extensions */
1912 +#define E1000_RXDADV_PKTTYPE_TCP         0x00000100 /* TCP hdr present */
1913 +#define E1000_RXDADV_PKTTYPE_UDP         0x00000200 /* UDP hdr present */
1914 +#define E1000_RXDADV_PKTTYPE_SCTP        0x00000400 /* SCTP hdr present */
1915 +#define E1000_RXDADV_PKTTYPE_NFS         0x00000800 /* NFS hdr present */
1916 +
1917 +#define E1000_RXDADV_PKTTYPE_IPSEC_ESP   0x00001000 /* IPSec ESP */
1918 +#define E1000_RXDADV_PKTTYPE_IPSEC_AH    0x00002000 /* IPSec AH */
1919 +#define E1000_RXDADV_PKTTYPE_LINKSEC     0x00004000 /* LinkSec Encap */
1920 +#define E1000_RXDADV_PKTTYPE_ETQF        0x00008000 /* PKTTYPE is ETQF index */
1921 +#define E1000_RXDADV_PKTTYPE_ETQF_MASK   0x00000070 /* ETQF has 8 indices */
1922 +#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT  4          /* Right-shift 4 bits */
1923 +
1924 +/* LinkSec results */
1925 +/* Security Processing bit Indication */
1926 +#define E1000_RXDADV_LNKSEC_STATUS_SECP         0x00020000
1927 +#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK      0x18000000
1928 +#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH   0x08000000
1929 +#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR  0x10000000
1930 +#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG       0x18000000
1931 +
1932 +#define E1000_RXDADV_IPSEC_STATUS_SECP          0x00020000
1933 +#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK       0x18000000
1934 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL       0x08000000
1935 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH         0x10000000
1936 +#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED  0x18000000
1937 +
1938 +/* Transmit Descriptor - Advanced */
1939 +union e1000_adv_tx_desc {
1940 +       struct {
1941 +               __le64 buffer_addr;    /* Address of descriptor's data buf */
1942 +               __le32 cmd_type_len;
1943 +               __le32 olinfo_status;
1944 +       } read;
1945 +       struct {
1946 +               __le64 rsvd;       /* Reserved */
1947 +               __le32 nxtseq_seed;
1948 +               __le32 status;
1949 +       } wb;
1950 +};
1951 +
1952 +/* Adv Transmit Descriptor Config Masks */
1953 +#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
1954 +#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
1955 +#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
1956 +#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
1957 +#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
1958 +#define E1000_ADVTXD_DCMD_DDTYP_ISCSI  0x10000000 /* DDP hdr type or iSCSI */
1959 +#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
1960 +#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
1961 +#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
1962 +#define E1000_ADVTXD_MAC_LINKSEC  0x00040000 /* Apply LinkSec on packet */
1963 +#define E1000_ADVTXD_MAC_TSTAMP   0x00080000 /* IEEE1588 Timestamp packet */
1964 +#define E1000_ADVTXD_STAT_SN_CRC  0x00000002 /* NXTSEQ/SEED present in WB */
1965 +#define E1000_ADVTXD_IDX_SHIFT    4  /* Adv desc Index shift */
1966 +#define E1000_ADVTXD_POPTS_ISCO_1ST  0x00000000 /* 1st TSO of iSCSI PDU */
1967 +#define E1000_ADVTXD_POPTS_ISCO_MDL  0x00000800 /* Middle TSO of iSCSI PDU */
1968 +#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
1969 +#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/
1970 +#define E1000_ADVTXD_POPTS_IPSEC     0x00000400 /* IPSec offload request */
1971 +#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
1972 +
1973 +/* Context descriptors */
1974 +struct e1000_adv_tx_context_desc {
1975 +       __le32 vlan_macip_lens;
1976 +       __le32 seqnum_seed;
1977 +       __le32 type_tucmd_mlhl;
1978 +       __le32 mss_l4len_idx;
1979 +};
1980 +
1981 +#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
1982 +#define E1000_ADVTXD_VLAN_SHIFT     16  /* Adv ctxt vlan tag shift */
1983 +#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
1984 +#define E1000_ADVTXD_TUCMD_IPV6    0x00000000  /* IP Packet Type: 0=IPv6 */
1985 +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000  /* L4 Packet TYPE of UDP */
1986 +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
1987 +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000  /* L4 Packet TYPE of SCTP */
1988 +#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP    0x00002000 /* IPSec Type ESP */
1989 +/* IPSec Encrypt Enable for ESP */
1990 +#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN  0x00004000
1991 +#define E1000_ADVTXD_TUCMD_MKRREQ  0x00002000 /* Req requires Markers and CRC */
1992 +#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
1993 +#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
1994 +/* Adv ctxt IPSec SA IDX mask */
1995 +#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK     0x000000FF
1996 +/* Adv ctxt IPSec ESP len mask */
1997 +#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK      0x000000FF
1998 +
1999 +/* Additional Transmit Descriptor Control definitions */
2000 +#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
2001 +#define E1000_TXDCTL_SWFLSH        0x04000000 /* Tx Desc. write-back flushing */
2002 +/* Tx Queue Arbitration Priority 0=low, 1=high */
2003 +#define E1000_TXDCTL_PRIORITY      0x08000000
2004 +
2005 +/* Additional Receive Descriptor Control definitions */
2006 +#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
2007 +#define E1000_RXDCTL_SWFLSH        0x04000000 /* Rx Desc. write-back flushing */
2008 +
2009 +/* Direct Cache Access (DCA) definitions */
2010 +#define E1000_DCA_CTRL_DCA_ENABLE  0x00000000 /* DCA Enable */
2011 +#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
2012 +
2013 +#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
2014 +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
2015 +
2016 +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
2017 +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
2018 +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
2019 +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
2020 +
2021 +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
2022 +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
2023 +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
2024 +
2025 +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
2026 +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
2027 +#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
2028 +#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
2029 +
2030 +/* Additional interrupt register bit definitions */
2031 +#define E1000_ICR_LSECPNS       0x00000020          /* PN threshold - server */
2032 +#define E1000_IMS_LSECPNS       E1000_ICR_LSECPNS   /* PN threshold - server */
2033 +#define E1000_ICS_LSECPNS       E1000_ICR_LSECPNS   /* PN threshold - server */
2034 +
2035 +/* ETQF register bit definitions */
2036 +#define E1000_ETQF_FILTER_ENABLE   (1 << 26)
2037 +#define E1000_ETQF_IMM_INT         (1 << 29)
2038 +#define E1000_ETQF_1588            (1 << 30)
2039 +#define E1000_ETQF_QUEUE_ENABLE    (1 << 31)
2040 +/*
2041 + * ETQF filter list: one static filter per filter consumer. This is
2042 + *                   to avoid filter collisions later. Add new filters
2043 + *                   here!!
2044 + *
2045 + * Current filters:
2046 + *    EAPOL 802.1x (0x888e): Filter 0
2047 + */
2048 +#define E1000_ETQF_FILTER_EAPOL          0
2049 +
2050 +#define E1000_FTQF_VF_BP               0x00008000
2051 +#define E1000_FTQF_1588_TIME_STAMP     0x08000000
2052 +#define E1000_FTQF_MASK                0xF0000000
2053 +#define E1000_FTQF_MASK_PROTO_BP       0x10000000
2054 +#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
2055 +#define E1000_FTQF_MASK_DEST_ADDR_BP   0x40000000
2056 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
2057 +
2058 +#define E1000_NVM_APME_82575          0x0400
2059 +#define MAX_NUM_VFS                   8
2060 +
2061 +#define E1000_DTXSWC_MAC_SPOOF_MASK   0x000000FF /* Per VF MAC spoof control */
2062 +#define E1000_DTXSWC_VLAN_SPOOF_MASK  0x0000FF00 /* Per VF VLAN spoof control */
2063 +#define E1000_DTXSWC_LLE_MASK         0x00FF0000 /* Per VF Local LB enables */
2064 +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
2065 +#define E1000_DTXSWC_LLE_SHIFT        16
2066 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31)  /* global VF LB enable */
2067 +
2068 +/* Easy defines for setting default pool, would normally be left a zero */
2069 +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
2070 +#define E1000_VT_CTL_DEFAULT_POOL_MASK  (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
2071 +
2072 +/* Other useful VMD_CTL register defines */
2073 +#define E1000_VT_CTL_IGNORE_MAC         (1 << 28)
2074 +#define E1000_VT_CTL_DISABLE_DEF_POOL   (1 << 29)
2075 +#define E1000_VT_CTL_VM_REPL_EN         (1 << 30)
2076 +
2077 +/* Per VM Offload register setup */
2078 +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
2079 +#define E1000_VMOLR_LPE        0x00010000 /* Accept Long packet */
2080 +#define E1000_VMOLR_RSSE       0x00020000 /* Enable RSS */
2081 +#define E1000_VMOLR_AUPE       0x01000000 /* Accept untagged packets */
2082 +#define E1000_VMOLR_ROMPE      0x02000000 /* Accept overflow multicast */
2083 +#define E1000_VMOLR_ROPE       0x04000000 /* Accept overflow unicast */
2084 +#define E1000_VMOLR_BAM        0x08000000 /* Accept Broadcast packets */
2085 +#define E1000_VMOLR_MPME       0x10000000 /* Multicast promiscuous mode */
2086 +#define E1000_VMOLR_STRVLAN    0x40000000 /* Vlan stripping enable */
2087 +#define E1000_VMOLR_STRCRC     0x80000000 /* CRC stripping enable */
2088 +
2089 +#define E1000_VLVF_ARRAY_SIZE     32
2090 +#define E1000_VLVF_VLANID_MASK    0x00000FFF
2091 +#define E1000_VLVF_POOLSEL_SHIFT  12
2092 +#define E1000_VLVF_POOLSEL_MASK   (0xFF << E1000_VLVF_POOLSEL_SHIFT)
2093 +#define E1000_VLVF_LVLAN          0x00100000
2094 +#define E1000_VLVF_VLANID_ENABLE  0x80000000
2095 +
2096 +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
2097 +
2098 +#define E1000_IOVCTL 0x05BBC
2099 +#define E1000_IOVCTL_REUSE_VFQ 0x00000001
2100 +
2101 +#define E1000_RPLOLR_STRVLAN   0x40000000
2102 +#define E1000_RPLOLR_STRCRC    0x80000000
2103 +
2104 +#define E1000_DTXCTL_8023LL     0x0004
2105 +#define E1000_DTXCTL_VLAN_ADDED 0x0008
2106 +#define E1000_DTXCTL_OOS_ENABLE 0x0010
2107 +#define E1000_DTXCTL_MDP_EN     0x0020
2108 +#define E1000_DTXCTL_SPOOF_INT  0x0040
2109 +
2110 +#define ALL_QUEUES   0xFFFF
2111 +
2112 +/* RX packet buffer size defines */
2113 +#define E1000_RXPBS_SIZE_MASK_82576  0x0000007F
2114 +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
2115 +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
2116 +#endif /* _E1000_82575_H_ */
2117 Index: linux-2.6.22/drivers/net/igb/e1000_api.c
2118 ===================================================================
2119 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
2120 +++ linux-2.6.22/drivers/net/igb/e1000_api.c    2009-12-18 12:39:22.000000000 -0500
2121 @@ -0,0 +1,1096 @@
2122 +/*******************************************************************************
2123 +
2124 +  Intel(R) Gigabit Ethernet Linux driver
2125 +  Copyright(c) 2007-2009 Intel Corporation.
2126 +
2127 +  This program is free software; you can redistribute it and/or modify it
2128 +  under the terms and conditions of the GNU General Public License,
2129 +  version 2, as published by the Free Software Foundation.
2130 +
2131 +  This program is distributed in the hope it will be useful, but WITHOUT
2132 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
2133 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
2134 +  more details.
2135 +
2136 +  You should have received a copy of the GNU General Public License along with
2137 +  this program; if not, write to the Free Software Foundation, Inc.,
2138 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
2139 +
2140 +  The full GNU General Public License is included in this distribution in
2141 +  the file called "COPYING".
2142 +
2143 +  Contact Information:
2144 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
2145 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
2146 +
2147 +*******************************************************************************/
2148 +
2149 +#include "e1000_api.h"
2150 +
2151 +/**
2152 + *  e1000_init_mac_params - Initialize MAC function pointers
2153 + *  @hw: pointer to the HW structure
2154 + *
2155 + *  This function initializes the function pointers for the MAC
2156 + *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
2157 + **/
2158 +s32 e1000_init_mac_params(struct e1000_hw *hw)
2159 +{
2160 +       s32 ret_val = E1000_SUCCESS;
2161 +
2162 +       if (hw->mac.ops.init_params) {
2163 +               ret_val = hw->mac.ops.init_params(hw);
2164 +               if (ret_val) {
2165 +                       DEBUGOUT("MAC Initialization Error\n");
2166 +                       goto out;
2167 +               }
2168 +       } else {
2169 +               DEBUGOUT("mac.init_mac_params was NULL\n");
2170 +               ret_val = -E1000_ERR_CONFIG;
2171 +       }
2172 +
2173 +out:
2174 +       return ret_val;
2175 +}
2176 +
2177 +/**
2178 + *  e1000_init_nvm_params - Initialize NVM function pointers
2179 + *  @hw: pointer to the HW structure
2180 + *
2181 + *  This function initializes the function pointers for the NVM
2182 + *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
2183 + **/
2184 +s32 e1000_init_nvm_params(struct e1000_hw *hw)
2185 +{
2186 +       s32 ret_val = E1000_SUCCESS;
2187 +
2188 +       if (hw->nvm.ops.init_params) {
2189 +               ret_val = hw->nvm.ops.init_params(hw);
2190 +               if (ret_val) {
2191 +                       DEBUGOUT("NVM Initialization Error\n");
2192 +                       goto out;
2193 +               }
2194 +       } else {
2195 +               DEBUGOUT("nvm.init_nvm_params was NULL\n");
2196 +               ret_val = -E1000_ERR_CONFIG;
2197 +       }
2198 +
2199 +out:
2200 +       return ret_val;
2201 +}
2202 +
2203 +/**
2204 + *  e1000_init_phy_params - Initialize PHY function pointers
2205 + *  @hw: pointer to the HW structure
2206 + *
2207 + *  This function initializes the function pointers for the PHY
2208 + *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
2209 + **/
2210 +s32 e1000_init_phy_params(struct e1000_hw *hw)
2211 +{
2212 +       s32 ret_val = E1000_SUCCESS;
2213 +
2214 +       if (hw->phy.ops.init_params) {
2215 +               ret_val = hw->phy.ops.init_params(hw);
2216 +               if (ret_val) {
2217 +                       DEBUGOUT("PHY Initialization Error\n");
2218 +                       goto out;
2219 +               }
2220 +       } else {
2221 +               DEBUGOUT("phy.init_phy_params was NULL\n");
2222 +               ret_val =  -E1000_ERR_CONFIG;
2223 +       }
2224 +
2225 +out:
2226 +       return ret_val;
2227 +}
2228 +
2229 +/**
2230 + *  e1000_init_mbx_params - Initialize mailbox function pointers
2231 + *  @hw: pointer to the HW structure
2232 + *
2233 + *  This function initializes the function pointers for the PHY
2234 + *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
2235 + **/
2236 +s32 e1000_init_mbx_params(struct e1000_hw *hw)
2237 +{
2238 +       s32 ret_val = E1000_SUCCESS;
2239 +
2240 +       if (hw->mbx.ops.init_params) {
2241 +               ret_val = hw->mbx.ops.init_params(hw);
2242 +               if (ret_val) {
2243 +                       DEBUGOUT("Mailbox Initialization Error\n");
2244 +                       goto out;
2245 +               }
2246 +       } else {
2247 +               DEBUGOUT("mbx.init_mbx_params was NULL\n");
2248 +               ret_val =  -E1000_ERR_CONFIG;
2249 +       }
2250 +
2251 +out:
2252 +       return ret_val;
2253 +}
2254 +
2255 +/**
2256 + *  e1000_set_mac_type - Sets MAC type
2257 + *  @hw: pointer to the HW structure
2258 + *
2259 + *  This function sets the mac type of the adapter based on the
2260 + *  device ID stored in the hw structure.
2261 + *  MUST BE FIRST FUNCTION CALLED (explicitly or through
2262 + *  e1000_setup_init_funcs()).
2263 + **/
2264 +s32 e1000_set_mac_type(struct e1000_hw *hw)
2265 +{
2266 +       struct e1000_mac_info *mac = &hw->mac;
2267 +       s32 ret_val = E1000_SUCCESS;
2268 +
2269 +       DEBUGFUNC("e1000_set_mac_type");
2270 +
2271 +       switch (hw->device_id) {
2272 +       case E1000_DEV_ID_82575EB_COPPER:
2273 +       case E1000_DEV_ID_82575EB_FIBER_SERDES:
2274 +       case E1000_DEV_ID_82575GB_QUAD_COPPER:
2275 +               mac->type = e1000_82575;
2276 +               break;
2277 +       case E1000_DEV_ID_82576:
2278 +       case E1000_DEV_ID_82576_FIBER:
2279 +       case E1000_DEV_ID_82576_SERDES:
2280 +       case E1000_DEV_ID_82576_QUAD_COPPER:
2281 +       case E1000_DEV_ID_82576_NS:
2282 +       case E1000_DEV_ID_82576_NS_SERDES:
2283 +       case E1000_DEV_ID_82576_SERDES_QUAD:
2284 +               mac->type = e1000_82576;
2285 +               break;
2286 +       default:
2287 +               /* Should never have loaded on this device */
2288 +               ret_val = -E1000_ERR_MAC_INIT;
2289 +               break;
2290 +       }
2291 +
2292 +       return ret_val;
2293 +}
2294 +
2295 +/**
2296 + *  e1000_setup_init_funcs - Initializes function pointers
2297 + *  @hw: pointer to the HW structure
2298 + *  @init_device: true will initialize the rest of the function pointers
2299 + *                 getting the device ready for use.  false will only set
2300 + *                 MAC type and the function pointers for the other init
2301 + *                 functions.  Passing false will not generate any hardware
2302 + *                 reads or writes.
2303 + *
2304 + *  This function must be called by a driver in order to use the rest
2305 + *  of the 'shared' code files. Called by drivers only.
2306 + **/
2307 +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
2308 +{
2309 +       s32 ret_val;
2310 +
2311 +       /* Can't do much good without knowing the MAC type. */
2312 +       ret_val = e1000_set_mac_type(hw);
2313 +       if (ret_val) {
2314 +               DEBUGOUT("ERROR: MAC type could not be set properly.\n");
2315 +               goto out;
2316 +       }
2317 +
2318 +       if (!hw->hw_addr) {
2319 +               DEBUGOUT("ERROR: Registers not mapped\n");
2320 +               ret_val = -E1000_ERR_CONFIG;
2321 +               goto out;
2322 +       }
2323 +
2324 +       /*
2325 +        * Init function pointers to generic implementations. We do this first
2326 +        * allowing a driver module to override it afterward.
2327 +        */
2328 +       e1000_init_mac_ops_generic(hw);
2329 +       e1000_init_nvm_ops_generic(hw);
2330 +       e1000_init_mbx_ops_generic(hw);
2331 +
2332 +       /*
2333 +        * Set up the init function pointers. These are functions within the
2334 +        * adapter family file that sets up function pointers for the rest of
2335 +        * the functions in that family.
2336 +        */
2337 +       switch (hw->mac.type) {
2338 +       case e1000_82575:
2339 +       case e1000_82576:
2340 +               e1000_init_function_pointers_82575(hw);
2341 +               break;
2342 +       default:
2343 +               DEBUGOUT("Hardware not supported\n");
2344 +               ret_val = -E1000_ERR_CONFIG;
2345 +               break;
2346 +       }
2347 +
2348 +       /*
2349 +        * Initialize the rest of the function pointers. These require some
2350 +        * register reads/writes in some cases.
2351 +        */
2352 +       if (!(ret_val) && init_device) {
2353 +               ret_val = e1000_init_mac_params(hw);
2354 +               if (ret_val)
2355 +                       goto out;
2356 +
2357 +               ret_val = e1000_init_nvm_params(hw);
2358 +               if (ret_val)
2359 +                       goto out;
2360 +
2361 +               ret_val = e1000_init_phy_params(hw);
2362 +               if (ret_val)
2363 +                       goto out;
2364 +
2365 +               ret_val = e1000_init_mbx_params(hw);
2366 +               if (ret_val)
2367 +                       goto out;
2368 +       }
2369 +
2370 +out:
2371 +       return ret_val;
2372 +}
2373 +
2374 +/**
2375 + *  e1000_get_bus_info - Obtain bus information for adapter
2376 + *  @hw: pointer to the HW structure
2377 + *
2378 + *  This will obtain information about the HW bus for which the
2379 + *  adapter is attached and stores it in the hw structure. This is a
2380 + *  function pointer entry point called by drivers.
2381 + **/
2382 +s32 e1000_get_bus_info(struct e1000_hw *hw)
2383 +{
2384 +       if (hw->mac.ops.get_bus_info)
2385 +               return hw->mac.ops.get_bus_info(hw);
2386 +
2387 +       return E1000_SUCCESS;
2388 +}
2389 +
2390 +/**
2391 + *  e1000_clear_vfta - Clear VLAN filter table
2392 + *  @hw: pointer to the HW structure
2393 + *
2394 + *  This clears the VLAN filter table on the adapter. This is a function
2395 + *  pointer entry point called by drivers.
2396 + **/
2397 +void e1000_clear_vfta(struct e1000_hw *hw)
2398 +{
2399 +       if (hw->mac.ops.clear_vfta)
2400 +               hw->mac.ops.clear_vfta(hw);
2401 +}
2402 +
2403 +/**
2404 + *  e1000_write_vfta - Write value to VLAN filter table
2405 + *  @hw: pointer to the HW structure
2406 + *  @offset: the 32-bit offset in which to write the value to.
2407 + *  @value: the 32-bit value to write at location offset.
2408 + *
2409 + *  This writes a 32-bit value to a 32-bit offset in the VLAN filter
2410 + *  table. This is a function pointer entry point called by drivers.
2411 + **/
2412 +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
2413 +{
2414 +       if (hw->mac.ops.write_vfta)
2415 +               hw->mac.ops.write_vfta(hw, offset, value);
2416 +}
2417 +
2418 +/**
2419 + *  e1000_update_mc_addr_list - Update Multicast addresses
2420 + *  @hw: pointer to the HW structure
2421 + *  @mc_addr_list: array of multicast addresses to program
2422 + *  @mc_addr_count: number of multicast addresses to program
2423 + *
2424 + *  Updates the Multicast Table Array.
2425 + *  The caller must have a packed mc_addr_list of multicast addresses.
2426 + **/
2427 +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
2428 +                               u32 mc_addr_count)
2429 +{
2430 +       if (hw->mac.ops.update_mc_addr_list)
2431 +               hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
2432 +                                               mc_addr_count);
2433 +}
2434 +
2435 +/**
2436 + *  e1000_force_mac_fc - Force MAC flow control
2437 + *  @hw: pointer to the HW structure
2438 + *
2439 + *  Force the MAC's flow control settings. Currently no func pointer exists
2440 + *  and all implementations are handled in the generic version of this
2441 + *  function.
2442 + **/
2443 +s32 e1000_force_mac_fc(struct e1000_hw *hw)
2444 +{
2445 +       return e1000_force_mac_fc_generic(hw);
2446 +}
2447 +
2448 +/**
2449 + *  e1000_check_for_link - Check/Store link connection
2450 + *  @hw: pointer to the HW structure
2451 + *
2452 + *  This checks the link condition of the adapter and stores the
2453 + *  results in the hw->mac structure. This is a function pointer entry
2454 + *  point called by drivers.
2455 + **/
2456 +s32 e1000_check_for_link(struct e1000_hw *hw)
2457 +{
2458 +       if (hw->mac.ops.check_for_link)
2459 +               return hw->mac.ops.check_for_link(hw);
2460 +
2461 +       return -E1000_ERR_CONFIG;
2462 +}
2463 +
2464 +/**
2465 + *  e1000_check_mng_mode - Check management mode
2466 + *  @hw: pointer to the HW structure
2467 + *
2468 + *  This checks if the adapter has manageability enabled.
2469 + *  This is a function pointer entry point called by drivers.
2470 + **/
2471 +bool e1000_check_mng_mode(struct e1000_hw *hw)
2472 +{
2473 +       if (hw->mac.ops.check_mng_mode)
2474 +               return hw->mac.ops.check_mng_mode(hw);
2475 +
2476 +       return false;
2477 +}
2478 +
2479 +/**
2480 + *  e1000_mng_write_dhcp_info - Writes DHCP info to host interface
2481 + *  @hw: pointer to the HW structure
2482 + *  @buffer: pointer to the host interface
2483 + *  @length: size of the buffer
2484 + *
2485 + *  Writes the DHCP information to the host interface.
2486 + **/
2487 +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2488 +{
2489 +       return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
2490 +}
2491 +
2492 +/**
2493 + *  e1000_reset_hw - Reset hardware
2494 + *  @hw: pointer to the HW structure
2495 + *
2496 + *  This resets the hardware into a known state. This is a function pointer
2497 + *  entry point called by drivers.
2498 + **/
2499 +s32 e1000_reset_hw(struct e1000_hw *hw)
2500 +{
2501 +       if (hw->mac.ops.reset_hw)
2502 +               return hw->mac.ops.reset_hw(hw);
2503 +
2504 +       return -E1000_ERR_CONFIG;
2505 +}
2506 +
2507 +/**
2508 + *  e1000_init_hw - Initialize hardware
2509 + *  @hw: pointer to the HW structure
2510 + *
2511 + *  This inits the hardware readying it for operation. This is a function
2512 + *  pointer entry point called by drivers.
2513 + **/
2514 +s32 e1000_init_hw(struct e1000_hw *hw)
2515 +{
2516 +       if (hw->mac.ops.init_hw)
2517 +               return hw->mac.ops.init_hw(hw);
2518 +
2519 +       return -E1000_ERR_CONFIG;
2520 +}
2521 +
2522 +/**
2523 + *  e1000_setup_link - Configures link and flow control
2524 + *  @hw: pointer to the HW structure
2525 + *
2526 + *  This configures link and flow control settings for the adapter. This
2527 + *  is a function pointer entry point called by drivers. While modules can
2528 + *  also call this, they probably call their own version of this function.
2529 + **/
2530 +s32 e1000_setup_link(struct e1000_hw *hw)
2531 +{
2532 +       if (hw->mac.ops.setup_link)
2533 +               return hw->mac.ops.setup_link(hw);
2534 +
2535 +       return -E1000_ERR_CONFIG;
2536 +}
2537 +
2538 +/**
2539 + *  e1000_get_speed_and_duplex - Returns current speed and duplex
2540 + *  @hw: pointer to the HW structure
2541 + *  @speed: pointer to a 16-bit value to store the speed
2542 + *  @duplex: pointer to a 16-bit value to store the duplex.
2543 + *
2544 + *  This returns the speed and duplex of the adapter in the two 'out'
2545 + *  variables passed in. This is a function pointer entry point called
2546 + *  by drivers.
2547 + **/
2548 +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
2549 +{
2550 +       if (hw->mac.ops.get_link_up_info)
2551 +               return hw->mac.ops.get_link_up_info(hw, speed, duplex);
2552 +
2553 +       return -E1000_ERR_CONFIG;
2554 +}
2555 +
2556 +/**
2557 + *  e1000_setup_led - Configures SW controllable LED
2558 + *  @hw: pointer to the HW structure
2559 + *
2560 + *  This prepares the SW controllable LED for use and saves the current state
2561 + *  of the LED so it can be later restored. This is a function pointer entry
2562 + *  point called by drivers.
2563 + **/
2564 +s32 e1000_setup_led(struct e1000_hw *hw)
2565 +{
2566 +       if (hw->mac.ops.setup_led)
2567 +               return hw->mac.ops.setup_led(hw);
2568 +
2569 +       return E1000_SUCCESS;
2570 +}
2571 +
2572 +/**
2573 + *  e1000_cleanup_led - Restores SW controllable LED
2574 + *  @hw: pointer to the HW structure
2575 + *
2576 + *  This restores the SW controllable LED to the value saved off by
2577 + *  e1000_setup_led. This is a function pointer entry point called by drivers.
2578 + **/
2579 +s32 e1000_cleanup_led(struct e1000_hw *hw)
2580 +{
2581 +       if (hw->mac.ops.cleanup_led)
2582 +               return hw->mac.ops.cleanup_led(hw);
2583 +
2584 +       return E1000_SUCCESS;
2585 +}
2586 +
2587 +/**
2588 + *  e1000_blink_led - Blink SW controllable LED
2589 + *  @hw: pointer to the HW structure
2590 + *
2591 + *  This starts the adapter LED blinking. Request the LED to be setup first
2592 + *  and cleaned up after. This is a function pointer entry point called by
2593 + *  drivers.
2594 + **/
2595 +s32 e1000_blink_led(struct e1000_hw *hw)
2596 +{
2597 +       if (hw->mac.ops.blink_led)
2598 +               return hw->mac.ops.blink_led(hw);
2599 +
2600 +       return E1000_SUCCESS;
2601 +}
2602 +
2603 +/**
2604 + *  e1000_id_led_init - store LED configurations in SW
2605 + *  @hw: pointer to the HW structure
2606 + *
2607 + *  Initializes the LED config in SW. This is a function pointer entry point
2608 + *  called by drivers.
2609 + **/
2610 +s32 e1000_id_led_init(struct e1000_hw *hw)
2611 +{
2612 +       if (hw->mac.ops.id_led_init)
2613 +               return hw->mac.ops.id_led_init(hw);
2614 +
2615 +       return E1000_SUCCESS;
2616 +}
2617 +
2618 +/**
2619 + *  e1000_led_on - Turn on SW controllable LED
2620 + *  @hw: pointer to the HW structure
2621 + *
2622 + *  Turns the SW defined LED on. This is a function pointer entry point
2623 + *  called by drivers.
2624 + **/
2625 +s32 e1000_led_on(struct e1000_hw *hw)
2626 +{
2627 +       if (hw->mac.ops.led_on)
2628 +               return hw->mac.ops.led_on(hw);
2629 +
2630 +       return E1000_SUCCESS;
2631 +}
2632 +
2633 +/**
2634 + *  e1000_led_off - Turn off SW controllable LED
2635 + *  @hw: pointer to the HW structure
2636 + *
2637 + *  Turns the SW defined LED off. This is a function pointer entry point
2638 + *  called by drivers.
2639 + **/
2640 +s32 e1000_led_off(struct e1000_hw *hw)
2641 +{
2642 +       if (hw->mac.ops.led_off)
2643 +               return hw->mac.ops.led_off(hw);
2644 +
2645 +       return E1000_SUCCESS;
2646 +}
2647 +
2648 +/**
2649 + *  e1000_reset_adaptive - Reset adaptive IFS
2650 + *  @hw: pointer to the HW structure
2651 + *
2652 + *  Resets the adaptive IFS. Currently no func pointer exists and all
2653 + *  implementations are handled in the generic version of this function.
2654 + **/
2655 +void e1000_reset_adaptive(struct e1000_hw *hw)
2656 +{
2657 +       e1000_reset_adaptive_generic(hw);
2658 +}
2659 +
2660 +/**
2661 + *  e1000_update_adaptive - Update adaptive IFS
2662 + *  @hw: pointer to the HW structure
2663 + *
2664 + *  Updates adapter IFS. Currently no func pointer exists and all
2665 + *  implementations are handled in the generic version of this function.
2666 + **/
2667 +void e1000_update_adaptive(struct e1000_hw *hw)
2668 +{
2669 +       e1000_update_adaptive_generic(hw);
2670 +}
2671 +
2672 +/**
2673 + *  e1000_disable_pcie_master - Disable PCI-Express master access
2674 + *  @hw: pointer to the HW structure
2675 + *
2676 + *  Disables PCI-Express master access and verifies there are no pending
2677 + *  requests. Currently no func pointer exists and all implementations are
2678 + *  handled in the generic version of this function.
2679 + **/
2680 +s32 e1000_disable_pcie_master(struct e1000_hw *hw)
2681 +{
2682 +       return e1000_disable_pcie_master_generic(hw);
2683 +}
2684 +
2685 +/**
2686 + *  e1000_config_collision_dist - Configure collision distance
2687 + *  @hw: pointer to the HW structure
2688 + *
2689 + *  Configures the collision distance to the default value and is used
2690 + *  during link setup.
2691 + **/
2692 +void e1000_config_collision_dist(struct e1000_hw *hw)
2693 +{
2694 +       if (hw->mac.ops.config_collision_dist)
2695 +               hw->mac.ops.config_collision_dist(hw);
2696 +}
2697 +
2698 +/**
2699 + *  e1000_rar_set - Sets a receive address register
2700 + *  @hw: pointer to the HW structure
2701 + *  @addr: address to set the RAR to
2702 + *  @index: the RAR to set
2703 + *
2704 + *  Sets a Receive Address Register (RAR) to the specified address.
2705 + **/
2706 +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
2707 +{
2708 +       if (hw->mac.ops.rar_set)
2709 +               hw->mac.ops.rar_set(hw, addr, index);
2710 +}
2711 +
2712 +/**
2713 + *  e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
2714 + *  @hw: pointer to the HW structure
2715 + *
2716 + *  Ensures that the MDI/MDIX SW state is valid.
2717 + **/
2718 +s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
2719 +{
2720 +       if (hw->mac.ops.validate_mdi_setting)
2721 +               return hw->mac.ops.validate_mdi_setting(hw);
2722 +
2723 +       return E1000_SUCCESS;
2724 +}
2725 +
2726 +/**
2727 + *  e1000_mta_set - Sets multicast table bit
2728 + *  @hw: pointer to the HW structure
2729 + *  @hash_value: Multicast hash value.
2730 + *
2731 + *  This sets the bit in the multicast table corresponding to the
2732 + *  hash value.  This is a function pointer entry point called by drivers.
2733 + **/
2734 +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
2735 +{
2736 +       if (hw->mac.ops.mta_set)
2737 +               hw->mac.ops.mta_set(hw, hash_value);
2738 +}
2739 +
2740 +/**
2741 + *  e1000_hash_mc_addr - Determines address location in multicast table
2742 + *  @hw: pointer to the HW structure
2743 + *  @mc_addr: Multicast address to hash.
2744 + *
2745 + *  This hashes an address to determine its location in the multicast
2746 + *  table. Currently no func pointer exists and all implementations
2747 + *  are handled in the generic version of this function.
2748 + **/
2749 +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
2750 +{
2751 +       return e1000_hash_mc_addr_generic(hw, mc_addr);
2752 +}
2753 +
2754 +/**
2755 + *  e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
2756 + *  @hw: pointer to the HW structure
2757 + *
2758 + *  Enables packet filtering on transmit packets if manageability is enabled
2759 + *  and host interface is enabled.
2760 + *  Currently no func pointer exists and all implementations are handled in the
2761 + *  generic version of this function.
2762 + **/
2763 +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
2764 +{
2765 +       return e1000_enable_tx_pkt_filtering_generic(hw);
2766 +}
2767 +
2768 +/**
2769 + *  e1000_mng_host_if_write - Writes to the manageability host interface
2770 + *  @hw: pointer to the HW structure
2771 + *  @buffer: pointer to the host interface buffer
2772 + *  @length: size of the buffer
2773 + *  @offset: location in the buffer to write to
2774 + *  @sum: sum of the data (not checksum)
2775 + *
2776 + *  This function writes the buffer content at the offset given on the host if.
2777 + *  It also does alignment considerations to do the writes in most efficient
2778 + *  way.  Also fills up the sum of the buffer in *buffer parameter.
2779 + **/
2780 +s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length,
2781 +                            u16 offset, u8 *sum)
2782 +{
2783 +       if (hw->mac.ops.mng_host_if_write)
2784 +               return hw->mac.ops.mng_host_if_write(hw, buffer, length,
2785 +                                                    offset, sum);
2786 +
2787 +       return E1000_NOT_IMPLEMENTED;
2788 +}
2789 +
2790 +/**
2791 + *  e1000_mng_write_cmd_header - Writes manageability command header
2792 + *  @hw: pointer to the HW structure
2793 + *  @hdr: pointer to the host interface command header
2794 + *
2795 + *  Writes the command header after does the checksum calculation.
2796 + **/
2797 +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2798 +                               struct e1000_host_mng_command_header *hdr)
2799 +{
2800 +       if (hw->mac.ops.mng_write_cmd_header)
2801 +               return hw->mac.ops.mng_write_cmd_header(hw, hdr);
2802 +
2803 +       return E1000_NOT_IMPLEMENTED;
2804 +}
2805 +
2806 +/**
2807 + *  e1000_mng_enable_host_if - Checks host interface is enabled
2808 + *  @hw: pointer to the HW structure
2809 + *
2810 + *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2811 + *
2812 + *  This function checks whether the HOST IF is enabled for command operation
2813 + *  and also checks whether the previous command is completed.  It busy waits
2814 + *  in case of previous command is not completed.
2815 + **/
2816 +s32 e1000_mng_enable_host_if(struct e1000_hw * hw)
2817 +{
2818 +       if (hw->mac.ops.mng_enable_host_if)
2819 +               return hw->mac.ops.mng_enable_host_if(hw);
2820 +
2821 +       return E1000_NOT_IMPLEMENTED;
2822 +}
2823 +
2824 +/**
2825 + *  e1000_wait_autoneg - Waits for autonegotiation completion
2826 + *  @hw: pointer to the HW structure
2827 + *
2828 + *  Waits for autoneg to complete. Currently no func pointer exists and all
2829 + *  implementations are handled in the generic version of this function.
2830 + **/
2831 +s32 e1000_wait_autoneg(struct e1000_hw *hw)
2832 +{
2833 +       if (hw->mac.ops.wait_autoneg)
2834 +               return hw->mac.ops.wait_autoneg(hw);
2835 +
2836 +       return E1000_SUCCESS;
2837 +}
2838 +
2839 +/**
2840 + *  e1000_check_reset_block - Verifies PHY can be reset
2841 + *  @hw: pointer to the HW structure
2842 + *
2843 + *  Checks if the PHY is in a state that can be reset or if manageability
2844 + *  has it tied up. This is a function pointer entry point called by drivers.
2845 + **/
2846 +s32 e1000_check_reset_block(struct e1000_hw *hw)
2847 +{
2848 +       if (hw->phy.ops.check_reset_block)
2849 +               return hw->phy.ops.check_reset_block(hw);
2850 +
2851 +       return E1000_SUCCESS;
2852 +}
2853 +
2854 +/**
2855 + *  e1000_read_phy_reg - Reads PHY register
2856 + *  @hw: pointer to the HW structure
2857 + *  @offset: the register to read
2858 + *  @data: the buffer to store the 16-bit read.
2859 + *
2860 + *  Reads the PHY register and returns the value in data.
2861 + *  This is a function pointer entry point called by drivers.
2862 + **/
2863 +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
2864 +{
2865 +       if (hw->phy.ops.read_reg)
2866 +               return hw->phy.ops.read_reg(hw, offset, data);
2867 +
2868 +       return E1000_SUCCESS;
2869 +}
2870 +
2871 +/**
2872 + *  e1000_write_phy_reg - Writes PHY register
2873 + *  @hw: pointer to the HW structure
2874 + *  @offset: the register to write
2875 + *  @data: the value to write.
2876 + *
2877 + *  Writes the PHY register at offset with the value in data.
2878 + *  This is a function pointer entry point called by drivers.
2879 + **/
2880 +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
2881 +{
2882 +       if (hw->phy.ops.write_reg)
2883 +               return hw->phy.ops.write_reg(hw, offset, data);
2884 +
2885 +       return E1000_SUCCESS;
2886 +}
2887 +
2888 +/**
2889 + *  e1000_release_phy - Generic release PHY
2890 + *  @hw: pointer to the HW structure
2891 + *
2892 + *  Return if silicon family does not require a semaphore when accessing the
2893 + *  PHY.
2894 + **/
2895 +void e1000_release_phy(struct e1000_hw *hw)
2896 +{
2897 +       if (hw->phy.ops.release)
2898 +               hw->phy.ops.release(hw);
2899 +}
2900 +
2901 +/**
2902 + *  e1000_acquire_phy - Generic acquire PHY
2903 + *  @hw: pointer to the HW structure
2904 + *
2905 + *  Return success if silicon family does not require a semaphore when
2906 + *  accessing the PHY.
2907 + **/
2908 +s32 e1000_acquire_phy(struct e1000_hw *hw)
2909 +{
2910 +       if (hw->phy.ops.acquire)
2911 +               return hw->phy.ops.acquire(hw);
2912 +
2913 +       return E1000_SUCCESS;
2914 +}
2915 +
2916 +/**
2917 + *  e1000_read_kmrn_reg - Reads register using Kumeran interface
2918 + *  @hw: pointer to the HW structure
2919 + *  @offset: the register to read
2920 + *  @data: the location to store the 16-bit value read.
2921 + *
2922 + *  Reads a register out of the Kumeran interface. Currently no func pointer
2923 + *  exists and all implementations are handled in the generic version of
2924 + *  this function.
2925 + **/
2926 +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
2927 +{
2928 +       return e1000_read_kmrn_reg_generic(hw, offset, data);
2929 +}
2930 +
2931 +/**
2932 + *  e1000_write_kmrn_reg - Writes register using Kumeran interface
2933 + *  @hw: pointer to the HW structure
2934 + *  @offset: the register to write
2935 + *  @data: the value to write.
2936 + *
2937 + *  Writes a register to the Kumeran interface. Currently no func pointer
2938 + *  exists and all implementations are handled in the generic version of
2939 + *  this function.
2940 + **/
2941 +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
2942 +{
2943 +       return e1000_write_kmrn_reg_generic(hw, offset, data);
2944 +}
2945 +
2946 +/**
2947 + *  e1000_get_cable_length - Retrieves cable length estimation
2948 + *  @hw: pointer to the HW structure
2949 + *
2950 + *  This function estimates the cable length and stores them in
2951 + *  hw->phy.min_length and hw->phy.max_length. This is a function pointer
2952 + *  entry point called by drivers.
2953 + **/
2954 +s32 e1000_get_cable_length(struct e1000_hw *hw)
2955 +{
2956 +       if (hw->phy.ops.get_cable_length)
2957 +               return hw->phy.ops.get_cable_length(hw);
2958 +
2959 +       return E1000_SUCCESS;
2960 +}
2961 +
2962 +/**
2963 + *  e1000_get_phy_info - Retrieves PHY information from registers
2964 + *  @hw: pointer to the HW structure
2965 + *
2966 + *  This function gets some information from various PHY registers and
2967 + *  populates hw->phy values with it. This is a function pointer entry
2968 + *  point called by drivers.
2969 + **/
2970 +s32 e1000_get_phy_info(struct e1000_hw *hw)
2971 +{
2972 +       if (hw->phy.ops.get_info)
2973 +               return hw->phy.ops.get_info(hw);
2974 +
2975 +       return E1000_SUCCESS;
2976 +}
2977 +
2978 +/**
2979 + *  e1000_phy_hw_reset - Hard PHY reset
2980 + *  @hw: pointer to the HW structure
2981 + *
2982 + *  Performs a hard PHY reset. This is a function pointer entry point called
2983 + *  by drivers.
2984 + **/
2985 +s32 e1000_phy_hw_reset(struct e1000_hw *hw)
2986 +{
2987 +       if (hw->phy.ops.reset)
2988 +               return hw->phy.ops.reset(hw);
2989 +
2990 +       return E1000_SUCCESS;
2991 +}
2992 +
2993 +/**
2994 + *  e1000_phy_commit - Soft PHY reset
2995 + *  @hw: pointer to the HW structure
2996 + *
2997 + *  Performs a soft PHY reset on those that apply. This is a function pointer
2998 + *  entry point called by drivers.
2999 + **/
3000 +s32 e1000_phy_commit(struct e1000_hw *hw)
3001 +{
3002 +       if (hw->phy.ops.commit)
3003 +               return hw->phy.ops.commit(hw);
3004 +
3005 +       return E1000_SUCCESS;
3006 +}
3007 +
3008 +/**
3009 + *  e1000_set_d0_lplu_state - Sets low power link up state for D0
3010 + *  @hw: pointer to the HW structure
3011 + *  @active: boolean used to enable/disable lplu
3012 + *
3013 + *  Success returns 0, Failure returns 1
3014 + *
3015 + *  The low power link up (lplu) state is set to the power management level D0
3016 + *  and SmartSpeed is disabled when active is true, else clear lplu for D0
3017 + *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
3018 + *  is used during Dx states where the power conservation is most important.
3019 + *  During driver activity, SmartSpeed should be enabled so performance is
3020 + *  maintained.  This is a function pointer entry point called by drivers.
3021 + **/
3022 +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
3023 +{
3024 +       if (hw->phy.ops.set_d0_lplu_state)
3025 +               return hw->phy.ops.set_d0_lplu_state(hw, active);
3026 +
3027 +       return E1000_SUCCESS;
3028 +}
3029 +
3030 +/**
3031 + *  e1000_set_d3_lplu_state - Sets low power link up state for D3
3032 + *  @hw: pointer to the HW structure
3033 + *  @active: boolean used to enable/disable lplu
3034 + *
3035 + *  Success returns 0, Failure returns 1
3036 + *
3037 + *  The low power link up (lplu) state is set to the power management level D3
3038 + *  and SmartSpeed is disabled when active is true, else clear lplu for D3
3039 + *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
3040 + *  is used during Dx states where the power conservation is most important.
3041 + *  During driver activity, SmartSpeed should be enabled so performance is
3042 + *  maintained.  This is a function pointer entry point called by drivers.
3043 + **/
3044 +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
3045 +{
3046 +       if (hw->phy.ops.set_d3_lplu_state)
3047 +               return hw->phy.ops.set_d3_lplu_state(hw, active);
3048 +
3049 +       return E1000_SUCCESS;
3050 +}
3051 +
3052 +/**
3053 + *  e1000_read_mac_addr - Reads MAC address
3054 + *  @hw: pointer to the HW structure
3055 + *
3056 + *  Reads the MAC address out of the adapter and stores it in the HW structure.
3057 + *  Currently no func pointer exists and all implementations are handled in the
3058 + *  generic version of this function.
3059 + **/
3060 +s32 e1000_read_mac_addr(struct e1000_hw *hw)
3061 +{
3062 +       if (hw->mac.ops.read_mac_addr)
3063 +               return hw->mac.ops.read_mac_addr(hw);
3064 +
3065 +       return e1000_read_mac_addr_generic(hw);
3066 +}
3067 +
3068 +/**
3069 + *  e1000_read_pba_num - Read device part number
3070 + *  @hw: pointer to the HW structure
3071 + *  @pba_num: pointer to device part number
3072 + *
3073 + *  Reads the product board assembly (PBA) number from the EEPROM and stores
3074 + *  the value in pba_num.
3075 + *  Currently no func pointer exists and all implementations are handled in the
3076 + *  generic version of this function.
3077 + **/
3078 +s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
3079 +{
3080 +       return e1000_read_pba_num_generic(hw, pba_num);
3081 +}
3082 +
3083 +/**
3084 + *  e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
3085 + *  @hw: pointer to the HW structure
3086 + *
3087 + *  Validates the NVM checksum is correct. This is a function pointer entry
3088 + *  point called by drivers.
3089 + **/
3090 +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
3091 +{
3092 +       if (hw->nvm.ops.validate)
3093 +               return hw->nvm.ops.validate(hw);
3094 +
3095 +       return -E1000_ERR_CONFIG;
3096 +}
3097 +
3098 +/**
3099 + *  e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
3100 + *  @hw: pointer to the HW structure
3101 + *
3102 + *  Updates the NVM checksum. Currently no func pointer exists and all
3103 + *  implementations are handled in the generic version of this function.
3104 + **/
3105 +s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
3106 +{
3107 +       if (hw->nvm.ops.update)
3108 +               return hw->nvm.ops.update(hw);
3109 +
3110 +       return -E1000_ERR_CONFIG;
3111 +}
3112 +
3113 +/**
3114 + *  e1000_reload_nvm - Reloads EEPROM
3115 + *  @hw: pointer to the HW structure
3116 + *
3117 + *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
3118 + *  extended control register.
3119 + **/
3120 +void e1000_reload_nvm(struct e1000_hw *hw)
3121 +{
3122 +       if (hw->nvm.ops.reload)
3123 +               hw->nvm.ops.reload(hw);
3124 +}
3125 +
3126 +/**
3127 + *  e1000_read_nvm - Reads NVM (EEPROM)
3128 + *  @hw: pointer to the HW structure
3129 + *  @offset: the word offset to read
3130 + *  @words: number of 16-bit words to read
3131 + *  @data: pointer to the properly sized buffer for the data.
3132 + *
3133 + *  Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
3134 + *  pointer entry point called by drivers.
3135 + **/
3136 +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
3137 +{
3138 +       if (hw->nvm.ops.read)
3139 +               return hw->nvm.ops.read(hw, offset, words, data);
3140 +
3141 +       return -E1000_ERR_CONFIG;
3142 +}
3143 +
3144 +/**
3145 + *  e1000_write_nvm - Writes to NVM (EEPROM)
3146 + *  @hw: pointer to the HW structure
3147 + *  @offset: the word offset to read
3148 + *  @words: number of 16-bit words to write
3149 + *  @data: pointer to the properly sized buffer for the data.
3150 + *
3151 + *  Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
3152 + *  pointer entry point called by drivers.
3153 + **/
3154 +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
3155 +{
3156 +       if (hw->nvm.ops.write)
3157 +               return hw->nvm.ops.write(hw, offset, words, data);
3158 +
3159 +       return E1000_SUCCESS;
3160 +}
3161 +
3162 +/**
3163 + *  e1000_write_8bit_ctrl_reg - Writes 8bit Control register
3164 + *  @hw: pointer to the HW structure
3165 + *  @reg: 32bit register offset
3166 + *  @offset: the register to write
3167 + *  @data: the value to write.
3168 + *
3169 + *  Writes the PHY register at offset with the value in data.
3170 + *  This is a function pointer entry point called by drivers.
3171 + **/
3172 +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
3173 +                              u8 data)
3174 +{
3175 +       return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
3176 +}
3177 +
3178 +/**
3179 + * e1000_power_up_phy - Restores link in case of PHY power down
3180 + * @hw: pointer to the HW structure
3181 + *
3182 + * The phy may be powered down to save power, to turn off link when the
3183 + * driver is unloaded, or wake on lan is not enabled (among others).
3184 + **/
3185 +void e1000_power_up_phy(struct e1000_hw *hw)
3186 +{
3187 +       if (hw->phy.ops.power_up)
3188 +               hw->phy.ops.power_up(hw);
3189 +
3190 +       e1000_setup_link(hw);
3191 +}
3192 +
3193 +/**
3194 + * e1000_power_down_phy - Power down PHY
3195 + * @hw: pointer to the HW structure
3196 + *
3197 + * The phy may be powered down to save power, to turn off link when the
3198 + * driver is unloaded, or wake on lan is not enabled (among others).
3199 + **/
3200 +void e1000_power_down_phy(struct e1000_hw *hw)
3201 +{
3202 +       if (hw->phy.ops.power_down)
3203 +               hw->phy.ops.power_down(hw);
3204 +}
3205 +
3206 +/**
3207 + *  e1000_shutdown_fiber_serdes_link - Remove link during power down
3208 + *  @hw: pointer to the HW structure
3209 + *
3210 + *  Shutdown the optics and PCS on driver unload.
3211 + **/
3212 +void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
3213 +{
3214 +       if (hw->mac.ops.shutdown_serdes)
3215 +               hw->mac.ops.shutdown_serdes(hw);
3216 +}
3217 +
3218 Index: linux-2.6.22/drivers/net/igb/e1000_api.h
3219 ===================================================================
3220 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
3221 +++ linux-2.6.22/drivers/net/igb/e1000_api.h    2009-12-18 12:39:22.000000000 -0500
3222 @@ -0,0 +1,147 @@
3223 +/*******************************************************************************
3224 +
3225 +  Intel(R) Gigabit Ethernet Linux driver
3226 +  Copyright(c) 2007-2009 Intel Corporation.
3227 +
3228 +  This program is free software; you can redistribute it and/or modify it
3229 +  under the terms and conditions of the GNU General Public License,
3230 +  version 2, as published by the Free Software Foundation.
3231 +
3232 +  This program is distributed in the hope it will be useful, but WITHOUT
3233 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3234 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
3235 +  more details.
3236 +
3237 +  You should have received a copy of the GNU General Public License along with
3238 +  this program; if not, write to the Free Software Foundation, Inc.,
3239 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3240 +
3241 +  The full GNU General Public License is included in this distribution in
3242 +  the file called "COPYING".
3243 +
3244 +  Contact Information:
3245 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
3246 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3247 +
3248 +*******************************************************************************/
3249 +
3250 +#ifndef _E1000_API_H_
3251 +#define _E1000_API_H_
3252 +
3253 +#include "e1000_hw.h"
3254 +
3255 +extern void    e1000_init_function_pointers_82575(struct e1000_hw *hw);
3256 +extern void    e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
3257 +extern void    e1000_init_function_pointers_vf(struct e1000_hw *hw);
3258 +extern void    e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
3259 +
3260 +s32  e1000_set_mac_type(struct e1000_hw *hw);
3261 +s32  e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
3262 +s32  e1000_init_mac_params(struct e1000_hw *hw);
3263 +s32  e1000_init_nvm_params(struct e1000_hw *hw);
3264 +s32  e1000_init_phy_params(struct e1000_hw *hw);
3265 +s32  e1000_init_mbx_params(struct e1000_hw *hw);
3266 +s32  e1000_get_bus_info(struct e1000_hw *hw);
3267 +void e1000_clear_vfta(struct e1000_hw *hw);
3268 +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
3269 +s32  e1000_force_mac_fc(struct e1000_hw *hw);
3270 +s32  e1000_check_for_link(struct e1000_hw *hw);
3271 +s32  e1000_reset_hw(struct e1000_hw *hw);
3272 +s32  e1000_init_hw(struct e1000_hw *hw);
3273 +s32  e1000_setup_link(struct e1000_hw *hw);
3274 +s32  e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed,
3275 +                                u16 *duplex);
3276 +s32  e1000_disable_pcie_master(struct e1000_hw *hw);
3277 +void e1000_config_collision_dist(struct e1000_hw *hw);
3278 +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
3279 +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
3280 +u32  e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
3281 +void e1000_update_mc_addr_list(struct e1000_hw *hw,
3282 +                               u8 *mc_addr_list, u32 mc_addr_count);
3283 +s32  e1000_setup_led(struct e1000_hw *hw);
3284 +s32  e1000_cleanup_led(struct e1000_hw *hw);
3285 +s32  e1000_check_reset_block(struct e1000_hw *hw);
3286 +s32  e1000_blink_led(struct e1000_hw *hw);
3287 +s32  e1000_led_on(struct e1000_hw *hw);
3288 +s32  e1000_led_off(struct e1000_hw *hw);
3289 +s32 e1000_id_led_init(struct e1000_hw *hw);
3290 +void e1000_reset_adaptive(struct e1000_hw *hw);
3291 +void e1000_update_adaptive(struct e1000_hw *hw);
3292 +s32  e1000_get_cable_length(struct e1000_hw *hw);
3293 +s32  e1000_validate_mdi_setting(struct e1000_hw *hw);
3294 +s32  e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
3295 +s32  e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
3296 +s32  e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
3297 +                               u32 offset, u8 data);
3298 +s32  e1000_get_phy_info(struct e1000_hw *hw);
3299 +void e1000_release_phy(struct e1000_hw *hw);
3300 +s32  e1000_acquire_phy(struct e1000_hw *hw);
3301 +s32  e1000_phy_hw_reset(struct e1000_hw *hw);
3302 +s32  e1000_phy_commit(struct e1000_hw *hw);
3303 +void e1000_power_up_phy(struct e1000_hw *hw);
3304 +void e1000_power_down_phy(struct e1000_hw *hw);
3305 +s32  e1000_read_mac_addr(struct e1000_hw *hw);
3306 +s32  e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num);
3307 +void e1000_reload_nvm(struct e1000_hw *hw);
3308 +s32  e1000_update_nvm_checksum(struct e1000_hw *hw);
3309 +s32  e1000_validate_nvm_checksum(struct e1000_hw *hw);
3310 +s32  e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
3311 +s32  e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
3312 +s32  e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
3313 +s32  e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
3314 +                     u16 *data);
3315 +s32  e1000_wait_autoneg(struct e1000_hw *hw);
3316 +s32  e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
3317 +s32  e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
3318 +bool e1000_check_mng_mode(struct e1000_hw *hw);
3319 +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
3320 +s32  e1000_mng_enable_host_if(struct e1000_hw *hw);
3321 +s32  e1000_mng_host_if_write(struct e1000_hw *hw,
3322 +                             u8 *buffer, u16 length, u16 offset, u8 *sum);
3323 +s32  e1000_mng_write_cmd_header(struct e1000_hw *hw,
3324 +                                struct e1000_host_mng_command_header *hdr);
3325 +s32  e1000_mng_write_dhcp_info(struct e1000_hw * hw,
3326 +                                    u8 *buffer, u16 length);
3327 +
3328 +/*
3329 + * TBI_ACCEPT macro definition:
3330 + *
3331 + * This macro requires:
3332 + *      adapter = a pointer to struct e1000_hw
3333 + *      status = the 8 bit status field of the Rx descriptor with EOP set
3334 + *      error = the 8 bit error field of the Rx descriptor with EOP set
3335 + *      length = the sum of all the length fields of the Rx descriptors that
3336 + *               make up the current frame
3337 + *      last_byte = the last byte of the frame DMAed by the hardware
3338 + *      max_frame_length = the maximum frame length we want to accept.
3339 + *      min_frame_length = the minimum frame length we want to accept.
3340 + *
3341 + * This macro is a conditional that should be used in the interrupt
3342 + * handler's Rx processing routine when RxErrors have been detected.
3343 + *
3344 + * Typical use:
3345 + *  ...
3346 + *  if (TBI_ACCEPT) {
3347 + *      accept_frame = true;
3348 + *      e1000_tbi_adjust_stats(adapter, MacAddress);
3349 + *      frame_length--;
3350 + *  } else {
3351 + *      accept_frame = false;
3352 + *  }
3353 + *  ...
3354 + */
3355 +
3356 +/* The carrier extension symbol, as received by the NIC. */
3357 +#define CARRIER_EXTENSION   0x0F
3358 +
3359 +#define TBI_ACCEPT(a, status, errors, length, last_byte, min_frame_size, max_frame_size) \
3360 +    (e1000_tbi_sbp_enabled_82543(a) && \
3361 +     (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
3362 +     ((last_byte) == CARRIER_EXTENSION) && \
3363 +     (((status) & E1000_RXD_STAT_VP) ? \
3364 +          (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
3365 +           ((length) <= (max_frame_size + 1))) : \
3366 +          (((length) > min_frame_size) && \
3367 +           ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
3368 +
3369 +#endif
3370 Index: linux-2.6.22/drivers/net/igb/e1000_defines.h
3371 ===================================================================
3372 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
3373 +++ linux-2.6.22/drivers/net/igb/e1000_defines.h        2009-12-18 12:39:22.000000000 -0500
3374 @@ -0,0 +1,1513 @@
3375 +/*******************************************************************************
3376 +
3377 +  Intel(R) Gigabit Ethernet Linux driver
3378 +  Copyright(c) 2007-2009 Intel Corporation.
3379 +
3380 +  This program is free software; you can redistribute it and/or modify it
3381 +  under the terms and conditions of the GNU General Public License,
3382 +  version 2, as published by the Free Software Foundation.
3383 +
3384 +  This program is distributed in the hope it will be useful, but WITHOUT
3385 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3386 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
3387 +  more details.
3388 +
3389 +  You should have received a copy of the GNU General Public License along with
3390 +  this program; if not, write to the Free Software Foundation, Inc.,
3391 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3392 +
3393 +  The full GNU General Public License is included in this distribution in
3394 +  the file called "COPYING".
3395 +
3396 +  Contact Information:
3397 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
3398 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3399 +
3400 +*******************************************************************************/
3401 +
3402 +#ifndef _E1000_DEFINES_H_
3403 +#define _E1000_DEFINES_H_
3404 +
3405 +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
3406 +#define REQ_TX_DESCRIPTOR_MULTIPLE  8
3407 +#define REQ_RX_DESCRIPTOR_MULTIPLE  8
3408 +
3409 +/* Definitions for power management and wakeup registers */
3410 +/* Wake Up Control */
3411 +#define E1000_WUC_APME       0x00000001 /* APM Enable */
3412 +#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
3413 +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
3414 +#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
3415 +#define E1000_WUC_LSCWE      0x00000010 /* Link Status wake up enable */
3416 +#define E1000_WUC_LSCWO      0x00000020 /* Link Status wake up override */
3417 +#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
3418 +#define E1000_WUC_PHY_WAKE   0x00000100 /* if PHY supports wakeup */
3419 +
3420 +/* Wake Up Filter Control */
3421 +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
3422 +#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
3423 +#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
3424 +#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
3425 +#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
3426 +#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
3427 +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
3428 +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
3429 +#define E1000_WUFC_IGNORE_TCO   0x00008000 /* Ignore WakeOn TCO packets */
3430 +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
3431 +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
3432 +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
3433 +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
3434 +#define E1000_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
3435 +#define E1000_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
3436 +#define E1000_WUFC_ALL_FILTERS  0x000F00FF /* Mask for all wakeup filters */
3437 +#define E1000_WUFC_FLX_OFFSET   16 /* Offset to the Flexible Filters bits */
3438 +#define E1000_WUFC_FLX_FILTERS  0x000F0000 /*Mask for the 4 flexible filters */
3439 +/*
3440 + * For 82576 to utilize Extended filter masks in addition to
3441 + * existing (filter) masks
3442 + */
3443 +#define E1000_WUFC_EXT_FLX_FILTERS      0x00300000 /* Ext. FLX filter mask */
3444 +
3445 +/* Wake Up Status */
3446 +#define E1000_WUS_LNKC         E1000_WUFC_LNKC
3447 +#define E1000_WUS_MAG          E1000_WUFC_MAG
3448 +#define E1000_WUS_EX           E1000_WUFC_EX
3449 +#define E1000_WUS_MC           E1000_WUFC_MC
3450 +#define E1000_WUS_BC           E1000_WUFC_BC
3451 +#define E1000_WUS_ARP          E1000_WUFC_ARP
3452 +#define E1000_WUS_IPV4         E1000_WUFC_IPV4
3453 +#define E1000_WUS_IPV6         E1000_WUFC_IPV6
3454 +#define E1000_WUS_FLX0         E1000_WUFC_FLX0
3455 +#define E1000_WUS_FLX1         E1000_WUFC_FLX1
3456 +#define E1000_WUS_FLX2         E1000_WUFC_FLX2
3457 +#define E1000_WUS_FLX3         E1000_WUFC_FLX3
3458 +#define E1000_WUS_FLX_FILTERS  E1000_WUFC_FLX_FILTERS
3459 +
3460 +/* Wake Up Packet Length */
3461 +#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
3462 +
3463 +/* Four Flexible Filters are supported */
3464 +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
3465 +/* Two Extended Flexible Filters are supported (82576) */
3466 +#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX     2
3467 +#define E1000_FHFT_LENGTH_OFFSET        0xFC /* Length byte in FHFT */
3468 +#define E1000_FHFT_LENGTH_MASK          0x0FF /* Length in lower byte */
3469 +
3470 +/* Each Flexible Filter is at most 128 (0x80) bytes in length */
3471 +#define E1000_FLEXIBLE_FILTER_SIZE_MAX  128
3472 +
3473 +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
3474 +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
3475 +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
3476 +
3477 +/* Extended Device Control */
3478 +#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
3479 +#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
3480 +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
3481 +#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
3482 +#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
3483 +/* Reserved (bits 4,5) in >= 82575 */
3484 +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Definable Pin 4 */
3485 +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Definable Pin 5 */
3486 +#define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
3487 +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Definable Pin 6 */
3488 +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
3489 +/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
3490 +#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
3491 +#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
3492 +#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
3493 +#define E1000_CTRL_EXT_SDP3_DIR  0x00000800 /* Direction of SDP3 0=in 1=out */
3494 +#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
3495 +#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
3496 +#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
3497 +/* Physical Func Reset Done Indication */
3498 +#define E1000_CTRL_EXT_PFRSTD    0x00004000
3499 +#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
3500 +#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
3501 +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
3502 +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
3503 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
3504 +#define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
3505 +#define E1000_CTRL_EXT_LINK_MODE_KMRN    0x00000000
3506 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
3507 +#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES  0x00800000
3508 +#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
3509 +#define E1000_CTRL_EXT_EIAME          0x01000000
3510 +#define E1000_CTRL_EXT_IRCA           0x00000001
3511 +#define E1000_CTRL_EXT_WR_WMARK_MASK  0x03000000
3512 +#define E1000_CTRL_EXT_WR_WMARK_256   0x00000000
3513 +#define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
3514 +#define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
3515 +#define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
3516 +#define E1000_CTRL_EXT_CANC           0x04000000 /* Int delay cancellation */
3517 +#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
3518 +/* IAME enable bit (27) was removed in >= 82575 */
3519 +#define E1000_CTRL_EXT_IAME          0x08000000 /* Int acknowledge Auto-mask */
3520 +#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error
3521 +                                                  * detection enabled */
3522 +#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity
3523 +                                                  * error detection enable */
3524 +#define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
3525 +#define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
3526 +#define E1000_I2CCMD_REG_ADDR_SHIFT   16
3527 +#define E1000_I2CCMD_REG_ADDR         0x00FF0000
3528 +#define E1000_I2CCMD_PHY_ADDR_SHIFT   24
3529 +#define E1000_I2CCMD_PHY_ADDR         0x07000000
3530 +#define E1000_I2CCMD_OPCODE_READ      0x08000000
3531 +#define E1000_I2CCMD_OPCODE_WRITE     0x00000000
3532 +#define E1000_I2CCMD_RESET            0x10000000
3533 +#define E1000_I2CCMD_READY            0x20000000
3534 +#define E1000_I2CCMD_INTERRUPT_ENA    0x40000000
3535 +#define E1000_I2CCMD_ERROR            0x80000000
3536 +#define E1000_MAX_SGMII_PHY_REG_ADDR  255
3537 +#define E1000_I2CCMD_PHY_TIMEOUT      200
3538 +#define E1000_IVAR_VALID        0x80
3539 +#define E1000_GPIE_NSICR        0x00000001
3540 +#define E1000_GPIE_MSIX_MODE    0x00000010
3541 +#define E1000_GPIE_EIAME        0x40000000
3542 +#define E1000_GPIE_PBA          0x80000000
3543 +
3544 +/* Receive Descriptor bit definitions */
3545 +#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
3546 +#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
3547 +#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
3548 +#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
3549 +#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
3550 +#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
3551 +#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
3552 +#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
3553 +#define E1000_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */
3554 +#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
3555 +#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
3556 +#define E1000_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
3557 +#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
3558 +#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
3559 +#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
3560 +#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
3561 +#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
3562 +#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
3563 +#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
3564 +#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
3565 +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
3566 +#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
3567 +#define E1000_RXD_SPC_PRI_SHIFT 13
3568 +#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
3569 +#define E1000_RXD_SPC_CFI_SHIFT 12
3570 +
3571 +#define E1000_RXDEXT_STATERR_CE    0x01000000
3572 +#define E1000_RXDEXT_STATERR_SE    0x02000000
3573 +#define E1000_RXDEXT_STATERR_SEQ   0x04000000
3574 +#define E1000_RXDEXT_STATERR_CXE   0x10000000
3575 +#define E1000_RXDEXT_STATERR_TCPE  0x20000000
3576 +#define E1000_RXDEXT_STATERR_IPE   0x40000000
3577 +#define E1000_RXDEXT_STATERR_RXE   0x80000000
3578 +
3579 +/* mask to determine if packets should be dropped due to frame errors */
3580 +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
3581 +    E1000_RXD_ERR_CE  |                \
3582 +    E1000_RXD_ERR_SE  |                \
3583 +    E1000_RXD_ERR_SEQ |                \
3584 +    E1000_RXD_ERR_CXE |                \
3585 +    E1000_RXD_ERR_RXE)
3586 +
3587 +/* Same mask, but for extended and packet split descriptors */
3588 +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
3589 +    E1000_RXDEXT_STATERR_CE  |            \
3590 +    E1000_RXDEXT_STATERR_SE  |            \
3591 +    E1000_RXDEXT_STATERR_SEQ |            \
3592 +    E1000_RXDEXT_STATERR_CXE |            \
3593 +    E1000_RXDEXT_STATERR_RXE)
3594 +
3595 +#define E1000_MRQC_ENABLE_MASK                 0x00000007
3596 +#define E1000_MRQC_ENABLE_RSS_2Q               0x00000001
3597 +#define E1000_MRQC_ENABLE_RSS_INT              0x00000004
3598 +#define E1000_MRQC_RSS_FIELD_MASK              0xFFFF0000
3599 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP          0x00010000
3600 +#define E1000_MRQC_RSS_FIELD_IPV4              0x00020000
3601 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX       0x00040000
3602 +#define E1000_MRQC_RSS_FIELD_IPV6_EX           0x00080000
3603 +#define E1000_MRQC_RSS_FIELD_IPV6              0x00100000
3604 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP          0x00200000
3605 +
3606 +#define E1000_RXDPS_HDRSTAT_HDRSP              0x00008000
3607 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK        0x000003FF
3608 +
3609 +/* Management Control */
3610 +#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
3611 +#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
3612 +#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
3613 +#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
3614 +#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
3615 +#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
3616 +#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
3617 +#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
3618 +#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
3619 +/* Enable Neighbor Discovery Filtering */
3620 +#define E1000_MANC_NEIGHBOR_EN   0x00004000
3621 +#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
3622 +#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
3623 +#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
3624 +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
3625 +#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
3626 +#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
3627 +/* Enable MAC address filtering */
3628 +#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000
3629 +/* Enable MNG packets to host memory */
3630 +#define E1000_MANC_EN_MNG2HOST   0x00200000
3631 +/* Enable IP address filtering */
3632 +#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000
3633 +#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
3634 +#define E1000_MANC_BR_EN            0x01000000 /* Enable broadcast filtering */
3635 +#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
3636 +#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
3637 +#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
3638 +#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
3639 +#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
3640 +#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
3641 +
3642 +#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
3643 +#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
3644 +
3645 +/* Receive Control */
3646 +#define E1000_RCTL_RST            0x00000001    /* Software reset */
3647 +#define E1000_RCTL_EN             0x00000002    /* enable */
3648 +#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
3649 +#define E1000_RCTL_UPE            0x00000008    /* unicast promisc enable */
3650 +#define E1000_RCTL_MPE            0x00000010    /* multicast promisc enable */
3651 +#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
3652 +#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
3653 +#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
3654 +#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
3655 +#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
3656 +#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
3657 +#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
3658 +#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min thresh size */
3659 +#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* rx desc min thresh size */
3660 +#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* rx desc min thresh size */
3661 +#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
3662 +#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
3663 +#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
3664 +#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
3665 +#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
3666 +#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
3667 +#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
3668 +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
3669 +#define E1000_RCTL_SZ_2048        0x00000000    /* rx buffer size 2048 */
3670 +#define E1000_RCTL_SZ_1024        0x00010000    /* rx buffer size 1024 */
3671 +#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
3672 +#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
3673 +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
3674 +#define E1000_RCTL_SZ_16384       0x00010000    /* rx buffer size 16384 */
3675 +#define E1000_RCTL_SZ_8192        0x00020000    /* rx buffer size 8192 */
3676 +#define E1000_RCTL_SZ_4096        0x00030000    /* rx buffer size 4096 */
3677 +#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
3678 +#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
3679 +#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
3680 +#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
3681 +#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
3682 +#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
3683 +#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
3684 +#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
3685 +#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
3686 +
3687 +/*
3688 + * Use byte values for the following shift parameters
3689 + * Usage:
3690 + *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
3691 + *                  E1000_PSRCTL_BSIZE0_MASK) |
3692 + *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
3693 + *                  E1000_PSRCTL_BSIZE1_MASK) |
3694 + *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
3695 + *                  E1000_PSRCTL_BSIZE2_MASK) |
3696 + *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
3697 + *                  E1000_PSRCTL_BSIZE3_MASK))
3698 + * where value0 = [128..16256],  default=256
3699 + *       value1 = [1024..64512], default=4096
3700 + *       value2 = [0..64512],    default=4096
3701 + *       value3 = [0..64512],    default=0
3702 + */
3703 +
3704 +#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
3705 +#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
3706 +#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
3707 +#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
3708 +
3709 +#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
3710 +#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
3711 +#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
3712 +#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
3713 +
3714 +/* SWFW_SYNC Definitions */
3715 +#define E1000_SWFW_EEP_SM   0x01
3716 +#define E1000_SWFW_PHY0_SM  0x02
3717 +#define E1000_SWFW_PHY1_SM  0x04
3718 +#define E1000_SWFW_CSR_SM   0x08
3719 +
3720 +/* FACTPS Definitions */
3721 +#define E1000_FACTPS_LFS    0x40000000  /* LAN Function Select */
3722 +/* Device Control */
3723 +#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
3724 +#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
3725 +#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
3726 +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
3727 +#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
3728 +#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
3729 +#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
3730 +#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
3731 +#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
3732 +#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
3733 +#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
3734 +#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
3735 +#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
3736 +#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
3737 +#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
3738 +#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
3739 +#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
3740 +#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
3741 +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock
3742 +                                             * indication in SDP[0] */
3743 +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through
3744 +                                               * PHYRST_N pin */
3745 +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external
3746 +                                           * LINK_0 and LINK_1 pins */
3747 +#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
3748 +#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
3749 +#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
3750 +#define E1000_CTRL_ADVD3WUC 0x00100000  /* D3 WUC */
3751 +#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
3752 +#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
3753 +#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
3754 +#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
3755 +#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
3756 +#define E1000_CTRL_RST      0x04000000  /* Global reset */
3757 +#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
3758 +#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
3759 +#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
3760 +#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
3761 +#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
3762 +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to ME */
3763 +#define E1000_CTRL_I2C_ENA  0x02000000  /* I2C enable */
3764 +
3765 +/*
3766 + * Bit definitions for the Management Data IO (MDIO) and Management Data
3767 + * Clock (MDC) pins in the Device Control Register.
3768 + */
3769 +#define E1000_CTRL_PHY_RESET_DIR  E1000_CTRL_SWDPIO0
3770 +#define E1000_CTRL_PHY_RESET      E1000_CTRL_SWDPIN0
3771 +#define E1000_CTRL_MDIO_DIR       E1000_CTRL_SWDPIO2
3772 +#define E1000_CTRL_MDIO           E1000_CTRL_SWDPIN2
3773 +#define E1000_CTRL_MDC_DIR        E1000_CTRL_SWDPIO3
3774 +#define E1000_CTRL_MDC            E1000_CTRL_SWDPIN3
3775 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
3776 +#define E1000_CTRL_PHY_RESET4     E1000_CTRL_EXT_SDP4_DATA
3777 +
3778 +#define E1000_CONNSW_ENRGSRC             0x4
3779 +#define E1000_PCS_CFG_PCS_EN             8
3780 +#define E1000_PCS_LCTL_FLV_LINK_UP       1
3781 +#define E1000_PCS_LCTL_FSV_10            0
3782 +#define E1000_PCS_LCTL_FSV_100           2
3783 +#define E1000_PCS_LCTL_FSV_1000          4
3784 +#define E1000_PCS_LCTL_FDV_FULL          8
3785 +#define E1000_PCS_LCTL_FSD               0x10
3786 +#define E1000_PCS_LCTL_FORCE_LINK        0x20
3787 +#define E1000_PCS_LCTL_LOW_LINK_LATCH    0x40
3788 +#define E1000_PCS_LCTL_FORCE_FCTRL       0x80
3789 +#define E1000_PCS_LCTL_AN_ENABLE         0x10000
3790 +#define E1000_PCS_LCTL_AN_RESTART        0x20000
3791 +#define E1000_PCS_LCTL_AN_TIMEOUT        0x40000
3792 +#define E1000_PCS_LCTL_AN_SGMII_BYPASS   0x80000
3793 +#define E1000_PCS_LCTL_AN_SGMII_TRIGGER  0x100000
3794 +#define E1000_PCS_LCTL_FAST_LINK_TIMER   0x1000000
3795 +#define E1000_PCS_LCTL_LINK_OK_FIX       0x2000000
3796 +#define E1000_PCS_LCTL_CRS_ON_NI         0x4000000
3797 +#define E1000_ENABLE_SERDES_LOOPBACK     0x0410
3798 +
3799 +#define E1000_PCS_LSTS_LINK_OK           1
3800 +#define E1000_PCS_LSTS_SPEED_10          0
3801 +#define E1000_PCS_LSTS_SPEED_100         2
3802 +#define E1000_PCS_LSTS_SPEED_1000        4
3803 +#define E1000_PCS_LSTS_DUPLEX_FULL       8
3804 +#define E1000_PCS_LSTS_SYNK_OK           0x10
3805 +#define E1000_PCS_LSTS_AN_COMPLETE       0x10000
3806 +#define E1000_PCS_LSTS_AN_PAGE_RX        0x20000
3807 +#define E1000_PCS_LSTS_AN_TIMED_OUT      0x40000
3808 +#define E1000_PCS_LSTS_AN_REMOTE_FAULT   0x80000
3809 +#define E1000_PCS_LSTS_AN_ERROR_RWS      0x100000
3810 +
3811 +/* Device Status */
3812 +#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
3813 +#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
3814 +#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
3815 +#define E1000_STATUS_FUNC_SHIFT 2
3816 +#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
3817 +#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
3818 +#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
3819 +#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
3820 +#define E1000_STATUS_SPEED_MASK 0x000000C0
3821 +#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
3822 +#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
3823 +#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
3824 +#define E1000_STATUS_LAN_INIT_DONE 0x00000200  /* Lan Init Completion by NVM */
3825 +#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
3826 +#define E1000_STATUS_PHYRA      0x00000400      /* PHY Reset Asserted */
3827 +#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state.
3828 +                                                 * Clear on write '0'. */
3829 +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
3830 +#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
3831 +#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
3832 +#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
3833 +#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
3834 +#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
3835 +#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
3836 +#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
3837 +#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
3838 +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
3839 +#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution
3840 +                                            * disabled */
3841 +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
3842 +#define E1000_STATUS_FUSE_8       0x04000000
3843 +#define E1000_STATUS_FUSE_9       0x08000000
3844 +#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
3845 +#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
3846 +
3847 +/* Constants used to interpret the masked PCI-X bus speed. */
3848 +#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed 50-66 MHz */
3849 +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
3850 +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /*PCI-X bus speed 100-133 MHz*/
3851 +
3852 +#define SPEED_10    10
3853 +#define SPEED_100   100
3854 +#define SPEED_1000  1000
3855 +#define HALF_DUPLEX 1
3856 +#define FULL_DUPLEX 2
3857 +
3858 +#define PHY_FORCE_TIME   20
3859 +
3860 +#define ADVERTISE_10_HALF                 0x0001
3861 +#define ADVERTISE_10_FULL                 0x0002
3862 +#define ADVERTISE_100_HALF                0x0004
3863 +#define ADVERTISE_100_FULL                0x0008
3864 +#define ADVERTISE_1000_HALF               0x0010 /* Not used, just FYI */
3865 +#define ADVERTISE_1000_FULL               0x0020
3866 +
3867 +/* 1000/H is not supported, nor spec-compliant. */
3868 +#define E1000_ALL_SPEED_DUPLEX  (ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
3869 +                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL | \
3870 +                                                     ADVERTISE_1000_FULL)
3871 +#define E1000_ALL_NOT_GIG       (ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
3872 +                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
3873 +#define E1000_ALL_100_SPEED    (ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
3874 +#define E1000_ALL_10_SPEED      (ADVERTISE_10_HALF |   ADVERTISE_10_FULL)
3875 +#define E1000_ALL_FULL_DUPLEX   (ADVERTISE_10_FULL |  ADVERTISE_100_FULL | \
3876 +                                                     ADVERTISE_1000_FULL)
3877 +#define E1000_ALL_HALF_DUPLEX   (ADVERTISE_10_HALF |  ADVERTISE_100_HALF)
3878 +
3879 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
3880 +
3881 +/* LED Control */
3882 +#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
3883 +#define E1000_LEDCTL_LED0_MODE_SHIFT      0
3884 +#define E1000_LEDCTL_LED0_BLINK_RATE      0x00000020
3885 +#define E1000_LEDCTL_LED0_IVRT            0x00000040
3886 +#define E1000_LEDCTL_LED0_BLINK           0x00000080
3887 +#define E1000_LEDCTL_LED1_MODE_MASK       0x00000F00
3888 +#define E1000_LEDCTL_LED1_MODE_SHIFT      8
3889 +#define E1000_LEDCTL_LED1_BLINK_RATE      0x00002000
3890 +#define E1000_LEDCTL_LED1_IVRT            0x00004000
3891 +#define E1000_LEDCTL_LED1_BLINK           0x00008000
3892 +#define E1000_LEDCTL_LED2_MODE_MASK       0x000F0000
3893 +#define E1000_LEDCTL_LED2_MODE_SHIFT      16
3894 +#define E1000_LEDCTL_LED2_BLINK_RATE      0x00200000
3895 +#define E1000_LEDCTL_LED2_IVRT            0x00400000
3896 +#define E1000_LEDCTL_LED2_BLINK           0x00800000
3897 +#define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
3898 +#define E1000_LEDCTL_LED3_MODE_SHIFT      24
3899 +#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
3900 +#define E1000_LEDCTL_LED3_IVRT            0x40000000
3901 +#define E1000_LEDCTL_LED3_BLINK           0x80000000
3902 +
3903 +#define E1000_LEDCTL_MODE_LINK_10_1000  0x0
3904 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
3905 +#define E1000_LEDCTL_MODE_LINK_UP       0x2
3906 +#define E1000_LEDCTL_MODE_ACTIVITY      0x3
3907 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
3908 +#define E1000_LEDCTL_MODE_LINK_10       0x5
3909 +#define E1000_LEDCTL_MODE_LINK_100      0x6
3910 +#define E1000_LEDCTL_MODE_LINK_1000     0x7
3911 +#define E1000_LEDCTL_MODE_PCIX_MODE     0x8
3912 +#define E1000_LEDCTL_MODE_FULL_DUPLEX   0x9
3913 +#define E1000_LEDCTL_MODE_COLLISION     0xA
3914 +#define E1000_LEDCTL_MODE_BUS_SPEED     0xB
3915 +#define E1000_LEDCTL_MODE_BUS_SIZE      0xC
3916 +#define E1000_LEDCTL_MODE_PAUSED        0xD
3917 +#define E1000_LEDCTL_MODE_LED_ON        0xE
3918 +#define E1000_LEDCTL_MODE_LED_OFF       0xF
3919 +
3920 +/* Transmit Descriptor bit definitions */
3921 +#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
3922 +#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
3923 +#define E1000_TXD_POPTS_SHIFT 8         /* POPTS shift */
3924 +#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
3925 +#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
3926 +#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
3927 +#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
3928 +#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
3929 +#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
3930 +#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
3931 +#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
3932 +#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
3933 +#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
3934 +#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
3935 +#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
3936 +#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
3937 +#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
3938 +#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
3939 +#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
3940 +#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
3941 +#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
3942 +/* Extended desc bits for Linksec and timesync */
3943 +
3944 +/* Transmit Control */
3945 +#define E1000_TCTL_RST    0x00000001    /* software reset */
3946 +#define E1000_TCTL_EN     0x00000002    /* enable tx */
3947 +#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
3948 +#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
3949 +#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
3950 +#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
3951 +#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
3952 +#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
3953 +#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
3954 +#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
3955 +#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
3956 +
3957 +/* Transmit Arbitration Count */
3958 +#define E1000_TARC0_ENABLE     0x00000400   /* Enable Tx Queue 0 */
3959 +
3960 +/* SerDes Control */
3961 +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
3962 +
3963 +/* Receive Checksum Control */
3964 +#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
3965 +#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
3966 +#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
3967 +#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
3968 +#define E1000_RXCSUM_CRCOFL    0x00000800   /* CRC32 offload enable */
3969 +#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
3970 +#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
3971 +
3972 +/* Header split receive */
3973 +#define E1000_RFCTL_ISCSI_DIS           0x00000001
3974 +#define E1000_RFCTL_ISCSI_DWC_MASK      0x0000003E
3975 +#define E1000_RFCTL_ISCSI_DWC_SHIFT     1
3976 +#define E1000_RFCTL_NFSW_DIS            0x00000040
3977 +#define E1000_RFCTL_NFSR_DIS            0x00000080
3978 +#define E1000_RFCTL_NFS_VER_MASK        0x00000300
3979 +#define E1000_RFCTL_NFS_VER_SHIFT       8
3980 +#define E1000_RFCTL_IPV6_DIS            0x00000400
3981 +#define E1000_RFCTL_IPV6_XSUM_DIS       0x00000800
3982 +#define E1000_RFCTL_ACK_DIS             0x00001000
3983 +#define E1000_RFCTL_ACKD_DIS            0x00002000
3984 +#define E1000_RFCTL_IPFRSP_DIS          0x00004000
3985 +#define E1000_RFCTL_EXTEN               0x00008000
3986 +#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
3987 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
3988 +#define E1000_RFCTL_LEF                 0x00040000
3989 +
3990 +/* Collision related configuration parameters */
3991 +#define E1000_COLLISION_THRESHOLD       15
3992 +#define E1000_CT_SHIFT                  4
3993 +#define E1000_COLLISION_DISTANCE        63
3994 +#define E1000_COLD_SHIFT                12
3995 +
3996 +/* Default values for the transmit IPG register */
3997 +#define DEFAULT_82543_TIPG_IPGT_FIBER  9
3998 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8
3999 +
4000 +#define E1000_TIPG_IPGT_MASK  0x000003FF
4001 +#define E1000_TIPG_IPGR1_MASK 0x000FFC00
4002 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000
4003 +
4004 +#define DEFAULT_82543_TIPG_IPGR1 8
4005 +#define E1000_TIPG_IPGR1_SHIFT  10
4006 +
4007 +#define DEFAULT_82543_TIPG_IPGR2 6
4008 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
4009 +#define E1000_TIPG_IPGR2_SHIFT  20
4010 +
4011 +/* Ethertype field values */
4012 +#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
4013 +
4014 +#define ETHERNET_FCS_SIZE       4
4015 +#define MAX_JUMBO_FRAME_SIZE    0x3F00
4016 +
4017 +/* Extended Configuration Control and Size */
4018 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
4019 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE       0x00000001
4020 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE       0x00000008
4021 +#define E1000_EXTCNF_CTRL_SWFLAG                 0x00000020
4022 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK   0x00FF0000
4023 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT          16
4024 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK   0x0FFF0000
4025 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT          16
4026 +
4027 +#define E1000_PHY_CTRL_SPD_EN             0x00000001
4028 +#define E1000_PHY_CTRL_D0A_LPLU           0x00000002
4029 +#define E1000_PHY_CTRL_NOND0A_LPLU        0x00000004
4030 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
4031 +#define E1000_PHY_CTRL_GBE_DISABLE        0x00000040
4032 +
4033 +#define E1000_KABGTXD_BGSQLBIAS           0x00050000
4034 +
4035 +/* PBA constants */
4036 +#define E1000_PBA_6K  0x0006    /* 6KB */
4037 +#define E1000_PBA_8K  0x0008    /* 8KB */
4038 +#define E1000_PBA_10K 0x000A    /* 10KB */
4039 +#define E1000_PBA_12K 0x000C    /* 12KB */
4040 +#define E1000_PBA_14K 0x000E    /* 14KB */
4041 +#define E1000_PBA_16K 0x0010    /* 16KB */
4042 +#define E1000_PBA_18K 0x0012
4043 +#define E1000_PBA_20K 0x0014
4044 +#define E1000_PBA_22K 0x0016
4045 +#define E1000_PBA_24K 0x0018
4046 +#define E1000_PBA_26K 0x001A
4047 +#define E1000_PBA_30K 0x001E
4048 +#define E1000_PBA_32K 0x0020
4049 +#define E1000_PBA_34K 0x0022
4050 +#define E1000_PBA_35K 0x0023
4051 +#define E1000_PBA_38K 0x0026
4052 +#define E1000_PBA_40K 0x0028
4053 +#define E1000_PBA_48K 0x0030    /* 48KB */
4054 +#define E1000_PBA_64K 0x0040    /* 64KB */
4055 +
4056 +#define E1000_PBS_16K E1000_PBA_16K
4057 +#define E1000_PBS_24K E1000_PBA_24K
4058 +
4059 +#define IFS_MAX       80
4060 +#define IFS_MIN       40
4061 +#define IFS_RATIO     4
4062 +#define IFS_STEP      10
4063 +#define MIN_NUM_XMITS 1000
4064 +
4065 +/* SW Semaphore Register */
4066 +#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
4067 +#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
4068 +#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
4069 +#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
4070 +
4071 +#define E1000_SWSM2_LOCK        0x00000002 /* Secondary driver semaphore bit */
4072 +
4073 +/* Interrupt Cause Read */
4074 +#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
4075 +#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
4076 +#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
4077 +#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
4078 +#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
4079 +#define E1000_ICR_RXO           0x00000040 /* rx overrun */
4080 +#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
4081 +#define E1000_ICR_VMMB          0x00000100 /* VM MB event */
4082 +#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
4083 +#define E1000_ICR_RXCFG         0x00000400 /* Rx /c/ ordered set */
4084 +#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
4085 +#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
4086 +#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
4087 +#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
4088 +#define E1000_ICR_TXD_LOW       0x00008000
4089 +#define E1000_ICR_SRPD          0x00010000
4090 +#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
4091 +#define E1000_ICR_MNG           0x00040000 /* Manageability event */
4092 +#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
4093 +#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver
4094 +                                            * should claim the interrupt */
4095 +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */
4096 +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* Q0 Tx desc FIFO parity error */
4097 +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */
4098 +#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
4099 +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* Q1 Rx desc FIFO parity error */
4100 +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* Q1 Tx desc FIFO parity error */
4101 +#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
4102 +#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW
4103 +                                            * bit in the FWSM */
4104 +#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates
4105 +                                            * an interrupt */
4106 +#define E1000_ICR_DOUTSYNC      0x10000000 /* NIC DMA out of sync */
4107 +#define E1000_ICR_EPRST         0x00100000 /* ME hardware reset occurs */
4108 +
4109 +
4110 +/* Extended Interrupt Cause Read */
4111 +#define E1000_EICR_RX_QUEUE0    0x00000001 /* Rx Queue 0 Interrupt */
4112 +#define E1000_EICR_RX_QUEUE1    0x00000002 /* Rx Queue 1 Interrupt */
4113 +#define E1000_EICR_RX_QUEUE2    0x00000004 /* Rx Queue 2 Interrupt */
4114 +#define E1000_EICR_RX_QUEUE3    0x00000008 /* Rx Queue 3 Interrupt */
4115 +#define E1000_EICR_TX_QUEUE0    0x00000100 /* Tx Queue 0 Interrupt */
4116 +#define E1000_EICR_TX_QUEUE1    0x00000200 /* Tx Queue 1 Interrupt */
4117 +#define E1000_EICR_TX_QUEUE2    0x00000400 /* Tx Queue 2 Interrupt */
4118 +#define E1000_EICR_TX_QUEUE3    0x00000800 /* Tx Queue 3 Interrupt */
4119 +#define E1000_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
4120 +#define E1000_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
4121 +/* TCP Timer */
4122 +#define E1000_TCPTIMER_KS       0x00000100 /* KickStart */
4123 +#define E1000_TCPTIMER_COUNT_ENABLE       0x00000200 /* Count Enable */
4124 +#define E1000_TCPTIMER_COUNT_FINISH       0x00000400 /* Count finish */
4125 +#define E1000_TCPTIMER_LOOP     0x00000800 /* Loop */
4126 +
4127 +/*
4128 + * This defines the bits that are set in the Interrupt Mask
4129 + * Set/Read Register.  Each bit is documented below:
4130 + *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
4131 + *   o RXSEQ  = Receive Sequence Error
4132 + */
4133 +#define POLL_IMS_ENABLE_MASK ( \
4134 +    E1000_IMS_RXDMT0 |    \
4135 +    E1000_IMS_RXSEQ)
4136 +
4137 +/*
4138 + * This defines the bits that are set in the Interrupt Mask
4139 + * Set/Read Register.  Each bit is documented below:
4140 + *   o RXT0   = Receiver Timer Interrupt (ring 0)
4141 + *   o TXDW   = Transmit Descriptor Written Back
4142 + *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
4143 + *   o RXSEQ  = Receive Sequence Error
4144 + *   o LSC    = Link Status Change
4145 + */
4146 +#define IMS_ENABLE_MASK ( \
4147 +    E1000_IMS_RXT0   |    \
4148 +    E1000_IMS_TXDW   |    \
4149 +    E1000_IMS_RXDMT0 |    \
4150 +    E1000_IMS_RXSEQ  |    \
4151 +    E1000_IMS_LSC)
4152 +
4153 +/* Interrupt Mask Set */
4154 +#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Tx desc written back */
4155 +#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
4156 +#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
4157 +#define E1000_IMS_VMMB      E1000_ICR_VMMB      /* Mail box activity */
4158 +#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
4159 +#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
4160 +#define E1000_IMS_RXO       E1000_ICR_RXO       /* rx overrun */
4161 +#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
4162 +#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
4163 +#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
4164 +#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
4165 +#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
4166 +#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
4167 +#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
4168 +#define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
4169 +#define E1000_IMS_SRPD      E1000_ICR_SRPD
4170 +#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
4171 +#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
4172 +#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
4173 +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
4174 +                                                         * parity error */
4175 +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
4176 +                                                         * parity error */
4177 +#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer
4178 +                                                         * parity error */
4179 +#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity
4180 +                                                         * error */
4181 +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
4182 +                                                         * parity error */
4183 +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
4184 +                                                         * parity error */
4185 +#define E1000_IMS_DSW       E1000_ICR_DSW
4186 +#define E1000_IMS_PHYINT    E1000_ICR_PHYINT
4187 +#define E1000_IMS_DOUTSYNC  E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
4188 +#define E1000_IMS_EPRST     E1000_ICR_EPRST
4189 +
4190 +/* Extended Interrupt Mask Set */
4191 +#define E1000_EIMS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
4192 +#define E1000_EIMS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
4193 +#define E1000_EIMS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
4194 +#define E1000_EIMS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
4195 +#define E1000_EIMS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
4196 +#define E1000_EIMS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
4197 +#define E1000_EIMS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
4198 +#define E1000_EIMS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
4199 +#define E1000_EIMS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
4200 +#define E1000_EIMS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
4201 +
4202 +/* Interrupt Cause Set */
4203 +#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Tx desc written back */
4204 +#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
4205 +#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
4206 +#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
4207 +#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
4208 +#define E1000_ICS_RXO       E1000_ICR_RXO       /* rx overrun */
4209 +#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
4210 +#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
4211 +#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
4212 +#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
4213 +#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
4214 +#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
4215 +#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
4216 +#define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
4217 +#define E1000_ICS_SRPD      E1000_ICR_SRPD
4218 +#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
4219 +#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
4220 +#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
4221 +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
4222 +                                                         * parity error */
4223 +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
4224 +                                                         * parity error */
4225 +#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer
4226 +                                                         * parity error */
4227 +#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity
4228 +                                                         * error */
4229 +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
4230 +                                                         * parity error */
4231 +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
4232 +                                                         * parity error */
4233 +#define E1000_ICS_DSW       E1000_ICR_DSW
4234 +#define E1000_ICS_DOUTSYNC  E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
4235 +#define E1000_ICS_PHYINT    E1000_ICR_PHYINT
4236 +#define E1000_ICS_EPRST     E1000_ICR_EPRST
4237 +
4238 +/* Extended Interrupt Cause Set */
4239 +#define E1000_EICS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
4240 +#define E1000_EICS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
4241 +#define E1000_EICS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
4242 +#define E1000_EICS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
4243 +#define E1000_EICS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
4244 +#define E1000_EICS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
4245 +#define E1000_EICS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
4246 +#define E1000_EICS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
4247 +#define E1000_EICS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
4248 +#define E1000_EICS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
4249 +
4250 +#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
4251 +
4252 +/* Transmit Descriptor Control */
4253 +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
4254 +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
4255 +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
4256 +#define E1000_TXDCTL_GRAN    0x01000000 /* TXDCTL Granularity */
4257 +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
4258 +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
4259 +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
4260 +/* Enable the counting of descriptors still to be processed. */
4261 +#define E1000_TXDCTL_COUNT_DESC 0x00400000
4262 +
4263 +/* Flow Control Constants */
4264 +#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
4265 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
4266 +#define FLOW_CONTROL_TYPE         0x8808
4267 +
4268 +/* 802.1q VLAN Packet Size */
4269 +#define VLAN_TAG_SIZE              4    /* 802.3ac tag (not DMA'd) */
4270 +#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
4271 +
4272 +/* Receive Address */
4273 +/*
4274 + * Number of high/low register pairs in the RAR. The RAR (Receive Address
4275 + * Registers) holds the directed and multicast addresses that we monitor.
4276 + * Technically, we have 16 spots.  However, we reserve one of these spots
4277 + * (RAR[15]) for our directed address used by controllers with
4278 + * manageability enabled, allowing us room for 15 multicast addresses.
4279 + */
4280 +#define E1000_RAR_ENTRIES     15
4281 +#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
4282 +#define E1000_RAL_MAC_ADDR_LEN 4
4283 +#define E1000_RAH_MAC_ADDR_LEN 2
4284 +#define E1000_RAH_POOL_MASK 0x03FC0000
4285 +#define E1000_RAH_POOL_1 0x00040000
4286 +
4287 +/* Error Codes */
4288 +#define E1000_SUCCESS      0
4289 +#define E1000_ERR_NVM      1
4290 +#define E1000_ERR_PHY      2
4291 +#define E1000_ERR_CONFIG   3
4292 +#define E1000_ERR_PARAM    4
4293 +#define E1000_ERR_MAC_INIT 5
4294 +#define E1000_ERR_PHY_TYPE 6
4295 +#define E1000_ERR_RESET   9
4296 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10
4297 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11
4298 +#define E1000_BLK_PHY_RESET   12
4299 +#define E1000_ERR_SWFW_SYNC 13
4300 +#define E1000_NOT_IMPLEMENTED 14
4301 +#define E1000_ERR_MBX      15
4302 +
4303 +/* Loop limit on how long we wait for auto-negotiation to complete */
4304 +#define FIBER_LINK_UP_LIMIT               50
4305 +#define COPPER_LINK_UP_LIMIT              10
4306 +#define PHY_AUTO_NEG_LIMIT                45
4307 +#define PHY_FORCE_LIMIT                   20
4308 +/* Number of 100 microseconds we wait for PCI Express master disable */
4309 +#define MASTER_DISABLE_TIMEOUT      800
4310 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */
4311 +#define PHY_CFG_TIMEOUT             100
4312 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
4313 +#define MDIO_OWNERSHIP_TIMEOUT      10
4314 +/* Number of milliseconds for NVM auto read done after MAC reset. */
4315 +#define AUTO_READ_DONE_TIMEOUT      10
4316 +
4317 +/* Flow Control */
4318 +#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
4319 +#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
4320 +#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
4321 +#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
4322 +
4323 +/* Transmit Configuration Word */
4324 +#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
4325 +#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
4326 +#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
4327 +#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
4328 +#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
4329 +#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
4330 +#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
4331 +#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
4332 +#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
4333 +#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
4334 +
4335 +/* Receive Configuration Word */
4336 +#define E1000_RXCW_CW         0x0000ffff        /* RxConfigWord mask */
4337 +#define E1000_RXCW_NC         0x04000000        /* Receive config no carrier */
4338 +#define E1000_RXCW_IV         0x08000000        /* Receive config invalid */
4339 +#define E1000_RXCW_CC         0x10000000        /* Receive config change */
4340 +#define E1000_RXCW_C          0x20000000        /* Receive config */
4341 +#define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
4342 +#define E1000_RXCW_ANC        0x80000000        /* Auto-neg complete */
4343 +
4344 +#define E1000_TSYNCTXCTL_VALID    0x00000001 /* tx timestamp valid */
4345 +#define E1000_TSYNCTXCTL_ENABLED  0x00000010 /* enable tx timestampping */
4346 +
4347 +#define E1000_TSYNCRXCTL_VALID      0x00000001 /* rx timestamp valid */
4348 +#define E1000_TSYNCRXCTL_TYPE_MASK  0x0000000E /* rx type mask */
4349 +#define E1000_TSYNCRXCTL_TYPE_L2_V2       0x00
4350 +#define E1000_TSYNCRXCTL_TYPE_L4_V1       0x02
4351 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2    0x04
4352 +#define E1000_TSYNCRXCTL_TYPE_ALL         0x08
4353 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2    0x0A
4354 +#define E1000_TSYNCRXCTL_ENABLED    0x00000010 /* enable rx timestampping */
4355 +
4356 +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK   0x000000FF
4357 +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE       0x00
4358 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE  0x01
4359 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE   0x02
4360 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
4361 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
4362 +
4363 +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK               0x00000F00
4364 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE                 0x0000
4365 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE            0x0100
4366 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE       0x0200
4367 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE      0x0300
4368 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE             0x0800
4369 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE           0x0900
4370 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE  0x0A00
4371 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE             0x0B00
4372 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE           0x0C00
4373 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE           0x0D00
4374 +
4375 +#define E1000_TIMINCA_16NS_SHIFT 24
4376 +
4377 +/* PCI Express Control */
4378 +#define E1000_GCR_RXD_NO_SNOOP          0x00000001
4379 +#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
4380 +#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
4381 +#define E1000_GCR_TXD_NO_SNOOP          0x00000008
4382 +#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
4383 +#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
4384 +#define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
4385 +#define E1000_GCR_CMPL_TMOUT_10ms       0x00001000
4386 +#define E1000_GCR_CMPL_TMOUT_RESEND     0x00010000
4387 +#define E1000_GCR_CAP_VER2              0x00040000
4388 +
4389 +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
4390 +                           E1000_GCR_RXDSCW_NO_SNOOP      | \
4391 +                           E1000_GCR_RXDSCR_NO_SNOOP      | \
4392 +                           E1000_GCR_TXD_NO_SNOOP         | \
4393 +                           E1000_GCR_TXDSCW_NO_SNOOP      | \
4394 +                           E1000_GCR_TXDSCR_NO_SNOOP)
4395 +
4396 +/* PHY Control Register */
4397 +#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
4398 +#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
4399 +#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
4400 +#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
4401 +#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
4402 +#define MII_CR_POWER_DOWN       0x0800  /* Power down */
4403 +#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
4404 +#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
4405 +#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
4406 +#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
4407 +#define MII_CR_SPEED_1000       0x0040
4408 +#define MII_CR_SPEED_100        0x2000
4409 +#define MII_CR_SPEED_10         0x0000
4410 +
4411 +/* PHY Status Register */
4412 +#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
4413 +#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
4414 +#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
4415 +#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
4416 +#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
4417 +#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
4418 +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
4419 +#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
4420 +#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
4421 +#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
4422 +#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
4423 +#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
4424 +#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
4425 +#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
4426 +#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
4427 +
4428 +/* Autoneg Advertisement Register */
4429 +#define NWAY_AR_SELECTOR_FIELD   0x0001   /* indicates IEEE 802.3 CSMA/CD */
4430 +#define NWAY_AR_10T_HD_CAPS      0x0020   /* 10T   Half Duplex Capable */
4431 +#define NWAY_AR_10T_FD_CAPS      0x0040   /* 10T   Full Duplex Capable */
4432 +#define NWAY_AR_100TX_HD_CAPS    0x0080   /* 100TX Half Duplex Capable */
4433 +#define NWAY_AR_100TX_FD_CAPS    0x0100   /* 100TX Full Duplex Capable */
4434 +#define NWAY_AR_100T4_CAPS       0x0200   /* 100T4 Capable */
4435 +#define NWAY_AR_PAUSE            0x0400   /* Pause operation desired */
4436 +#define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
4437 +#define NWAY_AR_REMOTE_FAULT     0x2000   /* Remote Fault detected */
4438 +#define NWAY_AR_NEXT_PAGE        0x8000   /* Next Page ability supported */
4439 +
4440 +/* Link Partner Ability Register (Base Page) */
4441 +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
4442 +#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
4443 +#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
4444 +#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
4445 +#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
4446 +#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
4447 +#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
4448 +#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
4449 +#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
4450 +#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
4451 +#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
4452 +
4453 +/* Autoneg Expansion Register */
4454 +#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
4455 +#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
4456 +#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
4457 +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
4458 +#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
4459 +
4460 +/* 1000BASE-T Control Register */
4461 +#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
4462 +#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
4463 +#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
4464 +#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
4465 +                                        /* 0=DTE device */
4466 +#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
4467 +                                        /* 0=Configure PHY as Slave */
4468 +#define CR_1000T_MS_ENABLE      0x1000 /* 1=Master/Slave manual config value */
4469 +                                        /* 0=Automatic Master/Slave config */
4470 +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
4471 +#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
4472 +#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
4473 +#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
4474 +#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
4475 +
4476 +/* 1000BASE-T Status Register */
4477 +#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
4478 +#define SR_1000T_ASYM_PAUSE_DIR  0x0100 /* LP asymmetric pause direction bit */
4479 +#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
4480 +#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
4481 +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
4482 +#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
4483 +#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local Tx is Master, 0=Slave */
4484 +#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
4485 +
4486 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
4487 +
4488 +/* PHY 1000 MII Register/Bit Definitions */
4489 +/* PHY Registers defined by IEEE */
4490 +#define PHY_CONTROL      0x00 /* Control Register */
4491 +#define PHY_STATUS       0x01 /* Status Register */
4492 +#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
4493 +#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
4494 +#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
4495 +#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
4496 +#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
4497 +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
4498 +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
4499 +#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
4500 +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
4501 +#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
4502 +
4503 +#define PHY_CONTROL_LB   0x4000 /* PHY Loopback bit */
4504 +
4505 +/* NVM Control */
4506 +#define E1000_EECD_SK        0x00000001 /* NVM Clock */
4507 +#define E1000_EECD_CS        0x00000002 /* NVM Chip Select */
4508 +#define E1000_EECD_DI        0x00000004 /* NVM Data In */
4509 +#define E1000_EECD_DO        0x00000008 /* NVM Data Out */
4510 +#define E1000_EECD_FWE_MASK  0x00000030
4511 +#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
4512 +#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
4513 +#define E1000_EECD_FWE_SHIFT 4
4514 +#define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
4515 +#define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
4516 +#define E1000_EECD_PRES      0x00000100 /* NVM Present */
4517 +#define E1000_EECD_SIZE      0x00000200 /* NVM Size (0=64 word 1=256 word) */
4518 +/* NVM Addressing bits based on type 0=small, 1=large */
4519 +#define E1000_EECD_ADDR_BITS 0x00000400
4520 +#define E1000_EECD_TYPE      0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
4521 +#define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
4522 +#define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
4523 +#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
4524 +#define E1000_EECD_SIZE_EX_SHIFT     11
4525 +#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
4526 +#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
4527 +#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
4528 +#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
4529 +#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
4530 +#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
4531 +#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
4532 +#define E1000_EECD_SECVAL_SHIFT      22
4533 +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
4534 +
4535 +#define E1000_NVM_SWDPIN0   0x0001   /* SWDPIN 0 NVM Value */
4536 +#define E1000_NVM_LED_LOGIC 0x0020   /* Led Logic Word */
4537 +#define E1000_NVM_RW_REG_DATA   16  /* Offset to data in NVM read/write regs */
4538 +#define E1000_NVM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
4539 +#define E1000_NVM_RW_REG_START  1    /* Start operation */
4540 +#define E1000_NVM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
4541 +#define E1000_NVM_POLL_WRITE    1    /* Flag for polling for write complete */
4542 +#define E1000_NVM_POLL_READ     0    /* Flag for polling for read complete */
4543 +#define E1000_FLASH_UPDATES  2000
4544 +
4545 +/* NVM Word Offsets */
4546 +#define NVM_COMPAT                 0x0003
4547 +#define NVM_ID_LED_SETTINGS        0x0004
4548 +#define NVM_VERSION                0x0005
4549 +#define NVM_SERDES_AMPLITUDE       0x0006 /* SERDES output amplitude */
4550 +#define NVM_PHY_CLASS_WORD         0x0007
4551 +#define NVM_INIT_CONTROL1_REG      0x000A
4552 +#define NVM_INIT_CONTROL2_REG      0x000F
4553 +#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010
4554 +#define NVM_INIT_CONTROL3_PORT_B   0x0014
4555 +#define NVM_INIT_3GIO_3            0x001A
4556 +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
4557 +#define NVM_INIT_CONTROL3_PORT_A   0x0024
4558 +#define NVM_CFG                    0x0012
4559 +#define NVM_FLASH_VERSION          0x0032
4560 +#define NVM_ALT_MAC_ADDR_PTR       0x0037
4561 +#define NVM_CHECKSUM_REG           0x003F
4562 +
4563 +#define E1000_NVM_CFG_DONE_PORT_0  0x040000 /* MNG config cycle done */
4564 +#define E1000_NVM_CFG_DONE_PORT_1  0x080000 /* ...for second port */
4565 +
4566 +/* Mask bits for fields in Word 0x0f of the NVM */
4567 +#define NVM_WORD0F_PAUSE_MASK       0x3000
4568 +#define NVM_WORD0F_PAUSE            0x1000
4569 +#define NVM_WORD0F_ASM_DIR          0x2000
4570 +#define NVM_WORD0F_ANE              0x0800
4571 +#define NVM_WORD0F_SWPDIO_EXT_MASK  0x00F0
4572 +#define NVM_WORD0F_LPLU             0x0001
4573 +
4574 +/* Mask bits for fields in Word 0x1a of the NVM */
4575 +#define NVM_WORD1A_ASPM_MASK  0x000C
4576 +
4577 +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
4578 +#define NVM_SUM                    0xBABA
4579 +
4580 +#define NVM_MAC_ADDR_OFFSET        0
4581 +#define NVM_PBA_OFFSET_0           8
4582 +#define NVM_PBA_OFFSET_1           9
4583 +#define NVM_RESERVED_WORD          0xFFFF
4584 +#define NVM_PHY_CLASS_A            0x8000
4585 +#define NVM_SERDES_AMPLITUDE_MASK  0x000F
4586 +#define NVM_SIZE_MASK              0x1C00
4587 +#define NVM_SIZE_SHIFT             10
4588 +#define NVM_WORD_SIZE_BASE_SHIFT   6
4589 +#define NVM_SWDPIO_EXT_SHIFT       4
4590 +
4591 +/* NVM Commands - SPI */
4592 +#define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
4593 +#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
4594 +#define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
4595 +#define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
4596 +#define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
4597 +#define NVM_WRDI_OPCODE_SPI        0x04 /* NVM reset Write Enable latch */
4598 +#define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
4599 +#define NVM_WRSR_OPCODE_SPI        0x01 /* NVM write Status register */
4600 +
4601 +/* SPI NVM Status Register */
4602 +#define NVM_STATUS_RDY_SPI         0x01
4603 +#define NVM_STATUS_WEN_SPI         0x02
4604 +#define NVM_STATUS_BP0_SPI         0x04
4605 +#define NVM_STATUS_BP1_SPI         0x08
4606 +#define NVM_STATUS_WPEN_SPI        0x80
4607 +
4608 +/* Word definitions for ID LED Settings */
4609 +#define ID_LED_RESERVED_0000 0x0000
4610 +#define ID_LED_RESERVED_FFFF 0xFFFF
4611 +#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2  << 12) | \
4612 +                              (ID_LED_OFF1_OFF2 <<  8) | \
4613 +                              (ID_LED_DEF1_DEF2 <<  4) | \
4614 +                              (ID_LED_DEF1_DEF2))
4615 +#define ID_LED_DEF1_DEF2     0x1
4616 +#define ID_LED_DEF1_ON2      0x2
4617 +#define ID_LED_DEF1_OFF2     0x3
4618 +#define ID_LED_ON1_DEF2      0x4
4619 +#define ID_LED_ON1_ON2       0x5
4620 +#define ID_LED_ON1_OFF2      0x6
4621 +#define ID_LED_OFF1_DEF2     0x7
4622 +#define ID_LED_OFF1_ON2      0x8
4623 +#define ID_LED_OFF1_OFF2     0x9
4624 +
4625 +#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
4626 +#define IGP_ACTIVITY_LED_ENABLE 0x0300
4627 +#define IGP_LED3_MODE           0x07000000
4628 +
4629 +/* PCI/PCI-X/PCI-EX Config space */
4630 +#define PCI_HEADER_TYPE_REGISTER     0x0E
4631 +#define PCIE_LINK_STATUS             0x12
4632 +#define PCIE_DEVICE_CONTROL2         0x28
4633 +
4634 +#define PCI_HEADER_TYPE_MULTIFUNC    0x80
4635 +#define PCIE_LINK_WIDTH_MASK         0x3F0
4636 +#define PCIE_LINK_WIDTH_SHIFT        4
4637 +#define PCIE_DEVICE_CONTROL2_16ms    0x0005
4638 +
4639 +#ifndef ETH_ADDR_LEN
4640 +#define ETH_ADDR_LEN                 6
4641 +#endif
4642 +
4643 +#define PHY_REVISION_MASK      0xFFFFFFF0
4644 +#define MAX_PHY_REG_ADDRESS    0x1F  /* 5 bit address bus (0-0x1F) */
4645 +#define MAX_PHY_MULTI_PAGE_REG 0xF
4646 +
4647 +/* Bit definitions for valid PHY IDs. */
4648 +/*
4649 + * I = Integrated
4650 + * E = External
4651 + */
4652 +#define M88E1000_E_PHY_ID    0x01410C50
4653 +#define M88E1000_I_PHY_ID    0x01410C30
4654 +#define M88E1011_I_PHY_ID    0x01410C20
4655 +#define IGP01E1000_I_PHY_ID  0x02A80380
4656 +#define M88E1011_I_REV_4     0x04
4657 +#define M88E1111_I_PHY_ID    0x01410CC0
4658 +#define GG82563_E_PHY_ID     0x01410CA0
4659 +#define IGP03E1000_E_PHY_ID  0x02A80390
4660 +#define IFE_E_PHY_ID         0x02A80330
4661 +#define IFE_PLUS_E_PHY_ID    0x02A80320
4662 +#define IFE_C_E_PHY_ID       0x02A80310
4663 +#define IGP04E1000_E_PHY_ID  0x02A80391
4664 +#define M88_VENDOR           0x0141
4665 +
4666 +/* M88E1000 Specific Registers */
4667 +#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
4668 +#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
4669 +#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
4670 +#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
4671 +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
4672 +#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
4673 +
4674 +#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
4675 +#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
4676 +#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
4677 +#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
4678 +#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
4679 +
4680 +/* M88E1000 PHY Specific Control Register */
4681 +#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
4682 +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
4683 +#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
4684 +/* 1=CLK125 low, 0=CLK125 toggling */
4685 +#define M88E1000_PSCR_CLK125_DISABLE    0x0010
4686 +#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000 /* MDI Crossover Mode bits 6:5 */
4687 +                                               /* Manual MDI configuration */
4688 +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
4689 +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
4690 +#define M88E1000_PSCR_AUTO_X_1000T     0x0040
4691 +/* Auto crossover enabled all speeds */
4692 +#define M88E1000_PSCR_AUTO_X_MODE      0x0060
4693 +/*
4694 + * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
4695 + * 0=Normal 10BASE-T Rx Threshold
4696 + */
4697 +#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080
4698 +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
4699 +#define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
4700 +#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
4701 +#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
4702 +#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Tx */
4703 +
4704 +/* M88E1000 PHY Specific Status Register */
4705 +#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
4706 +#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
4707 +#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
4708 +#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
4709 +/*
4710 + * 0 = <50M
4711 + * 1 = 50-80M
4712 + * 2 = 80-110M
4713 + * 3 = 110-140M
4714 + * 4 = >140M
4715 + */
4716 +#define M88E1000_PSSR_CABLE_LENGTH       0x0380
4717 +#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
4718 +#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
4719 +#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
4720 +#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
4721 +#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
4722 +#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
4723 +#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
4724 +#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
4725 +
4726 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
4727 +
4728 +/* M88E1000 Extended PHY Specific Control Register */
4729 +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
4730 +/*
4731 + * 1 = Lost lock detect enabled.
4732 + * Will assert lost lock and bring
4733 + * link down if idle not seen
4734 + * within 1ms in 1000BASE-T
4735 + */
4736 +#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000
4737 +/*
4738 + * Number of times we will attempt to autonegotiate before downshifting if we
4739 + * are the master
4740 + */
4741 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
4742 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
4743 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X   0x0400
4744 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X   0x0800
4745 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X   0x0C00
4746 +/*
4747 + * Number of times we will attempt to autonegotiate before downshifting if we
4748 + * are the slave
4749 + */
4750 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
4751 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS   0x0000
4752 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
4753 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
4754 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
4755 +#define M88E1000_EPSCR_TX_CLK_2_5     0x0060 /* 2.5 MHz TX_CLK */
4756 +#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
4757 +#define M88E1000_EPSCR_TX_CLK_0       0x0000 /* NO  TX_CLK */
4758 +
4759 +/* M88EC018 Rev 2 specific DownShift settings */
4760 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
4761 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X    0x0000
4762 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X    0x0200
4763 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X    0x0400
4764 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X    0x0600
4765 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
4766 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X    0x0A00
4767 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X    0x0C00
4768 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X    0x0E00
4769 +
4770 +/*
4771 + * Bits...
4772 + * 15-5: page
4773 + * 4-0: register offset
4774 + */
4775 +#define GG82563_PAGE_SHIFT        5
4776 +#define GG82563_REG(page, reg)    \
4777 +        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
4778 +#define GG82563_MIN_ALT_REG       30
4779 +
4780 +/* GG82563 Specific Registers */
4781 +#define GG82563_PHY_SPEC_CTRL           \
4782 +        GG82563_REG(0, 16) /* PHY Specific Control */
4783 +#define GG82563_PHY_SPEC_STATUS         \
4784 +        GG82563_REG(0, 17) /* PHY Specific Status */
4785 +#define GG82563_PHY_INT_ENABLE          \
4786 +        GG82563_REG(0, 18) /* Interrupt Enable */
4787 +#define GG82563_PHY_SPEC_STATUS_2       \
4788 +        GG82563_REG(0, 19) /* PHY Specific Status 2 */
4789 +#define GG82563_PHY_RX_ERR_CNTR         \
4790 +        GG82563_REG(0, 21) /* Receive Error Counter */
4791 +#define GG82563_PHY_PAGE_SELECT         \
4792 +        GG82563_REG(0, 22) /* Page Select */
4793 +#define GG82563_PHY_SPEC_CTRL_2         \
4794 +        GG82563_REG(0, 26) /* PHY Specific Control 2 */
4795 +#define GG82563_PHY_PAGE_SELECT_ALT     \
4796 +        GG82563_REG(0, 29) /* Alternate Page Select */
4797 +#define GG82563_PHY_TEST_CLK_CTRL       \
4798 +        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
4799 +
4800 +#define GG82563_PHY_MAC_SPEC_CTRL       \
4801 +        GG82563_REG(2, 21) /* MAC Specific Control Register */
4802 +#define GG82563_PHY_MAC_SPEC_CTRL_2     \
4803 +        GG82563_REG(2, 26) /* MAC Specific Control 2 */
4804 +
4805 +#define GG82563_PHY_DSP_DISTANCE    \
4806 +        GG82563_REG(5, 26) /* DSP Distance */
4807 +
4808 +/* Page 193 - Port Control Registers */
4809 +#define GG82563_PHY_KMRN_MODE_CTRL   \
4810 +        GG82563_REG(193, 16) /* Kumeran Mode Control */
4811 +#define GG82563_PHY_PORT_RESET          \
4812 +        GG82563_REG(193, 17) /* Port Reset */
4813 +#define GG82563_PHY_REVISION_ID         \
4814 +        GG82563_REG(193, 18) /* Revision ID */
4815 +#define GG82563_PHY_DEVICE_ID           \
4816 +        GG82563_REG(193, 19) /* Device ID */
4817 +#define GG82563_PHY_PWR_MGMT_CTRL       \
4818 +        GG82563_REG(193, 20) /* Power Management Control */
4819 +#define GG82563_PHY_RATE_ADAPT_CTRL     \
4820 +        GG82563_REG(193, 25) /* Rate Adaptation Control */
4821 +
4822 +/* Page 194 - KMRN Registers */
4823 +#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
4824 +        GG82563_REG(194, 16) /* FIFO's Control/Status */
4825 +#define GG82563_PHY_KMRN_CTRL           \
4826 +        GG82563_REG(194, 17) /* Control */
4827 +#define GG82563_PHY_INBAND_CTRL         \
4828 +        GG82563_REG(194, 18) /* Inband Control */
4829 +#define GG82563_PHY_KMRN_DIAGNOSTIC     \
4830 +        GG82563_REG(194, 19) /* Diagnostic */
4831 +#define GG82563_PHY_ACK_TIMEOUTS        \
4832 +        GG82563_REG(194, 20) /* Acknowledge Timeouts */
4833 +#define GG82563_PHY_ADV_ABILITY         \
4834 +        GG82563_REG(194, 21) /* Advertised Ability */
4835 +#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
4836 +        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
4837 +#define GG82563_PHY_ADV_NEXT_PAGE       \
4838 +        GG82563_REG(194, 24) /* Advertised Next Page */
4839 +#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
4840 +        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
4841 +#define GG82563_PHY_KMRN_MISC           \
4842 +        GG82563_REG(194, 26) /* Misc. */
4843 +
4844 +/* MDI Control */
4845 +#define E1000_MDIC_DATA_MASK 0x0000FFFF
4846 +#define E1000_MDIC_REG_MASK  0x001F0000
4847 +#define E1000_MDIC_REG_SHIFT 16
4848 +#define E1000_MDIC_PHY_MASK  0x03E00000
4849 +#define E1000_MDIC_PHY_SHIFT 21
4850 +#define E1000_MDIC_OP_WRITE  0x04000000
4851 +#define E1000_MDIC_OP_READ   0x08000000
4852 +#define E1000_MDIC_READY     0x10000000
4853 +#define E1000_MDIC_INT_EN    0x20000000
4854 +#define E1000_MDIC_ERROR     0x40000000
4855 +
4856 +/* SerDes Control */
4857 +#define E1000_GEN_CTL_READY             0x80000000
4858 +#define E1000_GEN_CTL_ADDRESS_SHIFT     8
4859 +#define E1000_GEN_POLL_TIMEOUT          640
4860 +
4861 +/* LinkSec register fields */
4862 +#define E1000_LSECTXCAP_SUM_MASK        0x00FF0000
4863 +#define E1000_LSECTXCAP_SUM_SHIFT       16
4864 +#define E1000_LSECRXCAP_SUM_MASK        0x00FF0000
4865 +#define E1000_LSECRXCAP_SUM_SHIFT       16
4866 +
4867 +#define E1000_LSECTXCTRL_EN_MASK        0x00000003
4868 +#define E1000_LSECTXCTRL_DISABLE        0x0
4869 +#define E1000_LSECTXCTRL_AUTH           0x1
4870 +#define E1000_LSECTXCTRL_AUTH_ENCRYPT   0x2
4871 +#define E1000_LSECTXCTRL_AISCI          0x00000020
4872 +#define E1000_LSECTXCTRL_PNTHRSH_MASK   0xFFFFFF00
4873 +#define E1000_LSECTXCTRL_RSV_MASK       0x000000D8
4874 +
4875 +#define E1000_LSECRXCTRL_EN_MASK        0x0000000C
4876 +#define E1000_LSECRXCTRL_EN_SHIFT       2
4877 +#define E1000_LSECRXCTRL_DISABLE        0x0
4878 +#define E1000_LSECRXCTRL_CHECK          0x1
4879 +#define E1000_LSECRXCTRL_STRICT         0x2
4880 +#define E1000_LSECRXCTRL_DROP           0x3
4881 +#define E1000_LSECRXCTRL_PLSH           0x00000040
4882 +#define E1000_LSECRXCTRL_RP             0x00000080
4883 +#define E1000_LSECRXCTRL_RSV_MASK       0xFFFFFF33
4884 +
4885 +
4886 +
4887 +#endif /* _E1000_DEFINES_H_ */
4888 Index: linux-2.6.22/drivers/net/igb/e1000_hw.h
4889 ===================================================================
4890 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
4891 +++ linux-2.6.22/drivers/net/igb/e1000_hw.h     2009-12-18 12:39:22.000000000 -0500
4892 @@ -0,0 +1,692 @@
4893 +/*******************************************************************************
4894 +
4895 +  Intel(R) Gigabit Ethernet Linux driver
4896 +  Copyright(c) 2007-2009 Intel Corporation.
4897 +
4898 +  This program is free software; you can redistribute it and/or modify it
4899 +  under the terms and conditions of the GNU General Public License,
4900 +  version 2, as published by the Free Software Foundation.
4901 +
4902 +  This program is distributed in the hope it will be useful, but WITHOUT
4903 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4904 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
4905 +  more details.
4906 +
4907 +  You should have received a copy of the GNU General Public License along with
4908 +  this program; if not, write to the Free Software Foundation, Inc.,
4909 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4910 +
4911 +  The full GNU General Public License is included in this distribution in
4912 +  the file called "COPYING".
4913 +
4914 +  Contact Information:
4915 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
4916 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
4917 +
4918 +*******************************************************************************/
4919 +
4920 +#ifndef _E1000_HW_H_
4921 +#define _E1000_HW_H_
4922 +
4923 +#include "e1000_osdep.h"
4924 +#include "e1000_regs.h"
4925 +#include "e1000_defines.h"
4926 +
4927 +struct e1000_hw;
4928 +
4929 +#define E1000_DEV_ID_82576                    0x10C9
4930 +#define E1000_DEV_ID_82576_FIBER              0x10E6
4931 +#define E1000_DEV_ID_82576_SERDES             0x10E7
4932 +#define E1000_DEV_ID_82576_QUAD_COPPER        0x10E8
4933 +#define E1000_DEV_ID_82576_NS                 0x150A
4934 +#define E1000_DEV_ID_82576_NS_SERDES          0x1518
4935 +#define E1000_DEV_ID_82576_SERDES_QUAD        0x150D
4936 +#define E1000_DEV_ID_82575EB_COPPER           0x10A7
4937 +#define E1000_DEV_ID_82575EB_FIBER_SERDES     0x10A9
4938 +#define E1000_DEV_ID_82575GB_QUAD_COPPER      0x10D6
4939 +#define E1000_REVISION_0 0
4940 +#define E1000_REVISION_1 1
4941 +#define E1000_REVISION_2 2
4942 +#define E1000_REVISION_3 3
4943 +#define E1000_REVISION_4 4
4944 +
4945 +#define E1000_FUNC_0     0
4946 +#define E1000_FUNC_1     1
4947 +
4948 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0   0
4949 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1   3
4950 +
4951 +enum e1000_mac_type {
4952 +       e1000_undefined = 0,
4953 +       e1000_82575,
4954 +       e1000_82576,
4955 +       e1000_num_macs  /* List is 1-based, so subtract 1 for true count. */
4956 +};
4957 +
4958 +enum e1000_media_type {
4959 +       e1000_media_type_unknown = 0,
4960 +       e1000_media_type_copper = 1,
4961 +       e1000_media_type_fiber = 2,
4962 +       e1000_media_type_internal_serdes = 3,
4963 +       e1000_num_media_types
4964 +};
4965 +
4966 +enum e1000_nvm_type {
4967 +       e1000_nvm_unknown = 0,
4968 +       e1000_nvm_none,
4969 +       e1000_nvm_eeprom_spi,
4970 +       e1000_nvm_flash_hw,
4971 +       e1000_nvm_flash_sw
4972 +};
4973 +
4974 +enum e1000_nvm_override {
4975 +       e1000_nvm_override_none = 0,
4976 +       e1000_nvm_override_spi_small,
4977 +       e1000_nvm_override_spi_large,
4978 +};
4979 +
4980 +enum e1000_phy_type {
4981 +       e1000_phy_unknown = 0,
4982 +       e1000_phy_none,
4983 +       e1000_phy_m88,
4984 +       e1000_phy_igp,
4985 +       e1000_phy_igp_2,
4986 +       e1000_phy_gg82563,
4987 +       e1000_phy_igp_3,
4988 +       e1000_phy_ife,
4989 +       e1000_phy_vf,
4990 +};
4991 +
4992 +enum e1000_bus_type {
4993 +       e1000_bus_type_unknown = 0,
4994 +       e1000_bus_type_pci,
4995 +       e1000_bus_type_pcix,
4996 +       e1000_bus_type_pci_express,
4997 +       e1000_bus_type_reserved
4998 +};
4999 +
5000 +enum e1000_bus_speed {
5001 +       e1000_bus_speed_unknown = 0,
5002 +       e1000_bus_speed_33,
5003 +       e1000_bus_speed_66,
5004 +       e1000_bus_speed_100,
5005 +       e1000_bus_speed_120,
5006 +       e1000_bus_speed_133,
5007 +       e1000_bus_speed_2500,
5008 +       e1000_bus_speed_5000,
5009 +       e1000_bus_speed_reserved
5010 +};
5011 +
5012 +enum e1000_bus_width {
5013 +       e1000_bus_width_unknown = 0,
5014 +       e1000_bus_width_pcie_x1,
5015 +       e1000_bus_width_pcie_x2,
5016 +       e1000_bus_width_pcie_x4 = 4,
5017 +       e1000_bus_width_pcie_x8 = 8,
5018 +       e1000_bus_width_32,
5019 +       e1000_bus_width_64,
5020 +       e1000_bus_width_reserved
5021 +};
5022 +
5023 +enum e1000_1000t_rx_status {
5024 +       e1000_1000t_rx_status_not_ok = 0,
5025 +       e1000_1000t_rx_status_ok,
5026 +       e1000_1000t_rx_status_undefined = 0xFF
5027 +};
5028 +
5029 +enum e1000_rev_polarity {
5030 +       e1000_rev_polarity_normal = 0,
5031 +       e1000_rev_polarity_reversed,
5032 +       e1000_rev_polarity_undefined = 0xFF
5033 +};
5034 +
5035 +enum e1000_fc_mode {
5036 +       e1000_fc_none = 0,
5037 +       e1000_fc_rx_pause,
5038 +       e1000_fc_tx_pause,
5039 +       e1000_fc_full,
5040 +       e1000_fc_default = 0xFF
5041 +};
5042 +
5043 +enum e1000_ms_type {
5044 +       e1000_ms_hw_default = 0,
5045 +       e1000_ms_force_master,
5046 +       e1000_ms_force_slave,
5047 +       e1000_ms_auto
5048 +};
5049 +
5050 +enum e1000_smart_speed {
5051 +       e1000_smart_speed_default = 0,
5052 +       e1000_smart_speed_on,
5053 +       e1000_smart_speed_off
5054 +};
5055 +
5056 +enum e1000_serdes_link_state {
5057 +       e1000_serdes_link_down = 0,
5058 +       e1000_serdes_link_autoneg_progress,
5059 +       e1000_serdes_link_autoneg_complete,
5060 +       e1000_serdes_link_forced_up
5061 +};
5062 +
5063 +/* Receive Descriptor */
5064 +struct e1000_rx_desc {
5065 +       __le64 buffer_addr; /* Address of the descriptor's data buffer */
5066 +       __le16 length;      /* Length of data DMAed into data buffer */
5067 +       __le16 csum;        /* Packet checksum */
5068 +       u8  status;         /* Descriptor status */
5069 +       u8  errors;         /* Descriptor Errors */
5070 +       __le16 special;
5071 +};
5072 +
5073 +/* Receive Descriptor - Extended */
5074 +union e1000_rx_desc_extended {
5075 +       struct {
5076 +               __le64 buffer_addr;
5077 +               __le64 reserved;
5078 +       } read;
5079 +       struct {
5080 +               struct {
5081 +                       __le32 mrq;           /* Multiple Rx Queues */
5082 +                       union {
5083 +                               __le32 rss;         /* RSS Hash */
5084 +                               struct {
5085 +                                       __le16 ip_id;  /* IP id */
5086 +                                       __le16 csum;   /* Packet Checksum */
5087 +                               } csum_ip;
5088 +                       } hi_dword;
5089 +               } lower;
5090 +               struct {
5091 +                       __le32 status_error;  /* ext status/error */
5092 +                       __le16 length;
5093 +                       __le16 vlan;          /* VLAN tag */
5094 +               } upper;
5095 +       } wb;  /* writeback */
5096 +};
5097 +
5098 +#define MAX_PS_BUFFERS 4
5099 +/* Receive Descriptor - Packet Split */
5100 +union e1000_rx_desc_packet_split {
5101 +       struct {
5102 +               /* one buffer for protocol header(s), three data buffers */
5103 +               __le64 buffer_addr[MAX_PS_BUFFERS];
5104 +       } read;
5105 +       struct {
5106 +               struct {
5107 +                       __le32 mrq;           /* Multiple Rx Queues */
5108 +                       union {
5109 +                               __le32 rss;           /* RSS Hash */
5110 +                               struct {
5111 +                                       __le16 ip_id;    /* IP id */
5112 +                                       __le16 csum;     /* Packet Checksum */
5113 +                               } csum_ip;
5114 +                       } hi_dword;
5115 +               } lower;
5116 +               struct {
5117 +                       __le32 status_error;  /* ext status/error */
5118 +                       __le16 length0;       /* length of buffer 0 */
5119 +                       __le16 vlan;          /* VLAN tag */
5120 +               } middle;
5121 +               struct {
5122 +                       __le16 header_status;
5123 +                       __le16 length[3];     /* length of buffers 1-3 */
5124 +               } upper;
5125 +               __le64 reserved;
5126 +       } wb; /* writeback */
5127 +};
5128 +
5129 +/* Transmit Descriptor */
5130 +struct e1000_tx_desc {
5131 +       __le64 buffer_addr;   /* Address of the descriptor's data buffer */
5132 +       union {
5133 +               __le32 data;
5134 +               struct {
5135 +                       __le16 length;    /* Data buffer length */
5136 +                       u8 cso;           /* Checksum offset */
5137 +                       u8 cmd;           /* Descriptor control */
5138 +               } flags;
5139 +       } lower;
5140 +       union {
5141 +               __le32 data;
5142 +               struct {
5143 +                       u8 status;        /* Descriptor status */
5144 +                       u8 css;           /* Checksum start */
5145 +                       __le16 special;
5146 +               } fields;
5147 +       } upper;
5148 +};
5149 +
5150 +/* Offload Context Descriptor */
5151 +struct e1000_context_desc {
5152 +       union {
5153 +               __le32 ip_config;
5154 +               struct {
5155 +                       u8 ipcss;         /* IP checksum start */
5156 +                       u8 ipcso;         /* IP checksum offset */
5157 +                       __le16 ipcse;     /* IP checksum end */
5158 +               } ip_fields;
5159 +       } lower_setup;
5160 +       union {
5161 +               __le32 tcp_config;
5162 +               struct {
5163 +                       u8 tucss;         /* TCP checksum start */
5164 +                       u8 tucso;         /* TCP checksum offset */
5165 +                       __le16 tucse;     /* TCP checksum end */
5166 +               } tcp_fields;
5167 +       } upper_setup;
5168 +       __le32 cmd_and_length;
5169 +       union {
5170 +               __le32 data;
5171 +               struct {
5172 +                       u8 status;        /* Descriptor status */
5173 +                       u8 hdr_len;       /* Header length */
5174 +                       __le16 mss;       /* Maximum segment size */
5175 +               } fields;
5176 +       } tcp_seg_setup;
5177 +};
5178 +
5179 +/* Offload data descriptor */
5180 +struct e1000_data_desc {
5181 +       __le64 buffer_addr;   /* Address of the descriptor's buffer address */
5182 +       union {
5183 +               __le32 data;
5184 +               struct {
5185 +                       __le16 length;    /* Data buffer length */
5186 +                       u8 typ_len_ext;
5187 +                       u8 cmd;
5188 +               } flags;
5189 +       } lower;
5190 +       union {
5191 +               __le32 data;
5192 +               struct {
5193 +                       u8 status;        /* Descriptor status */
5194 +                       u8 popts;         /* Packet Options */
5195 +                       __le16 special;
5196 +               } fields;
5197 +       } upper;
5198 +};
5199 +
5200 +/* Statistics counters collected by the MAC */
5201 +struct e1000_hw_stats {
5202 +       u64 crcerrs;
5203 +       u64 algnerrc;
5204 +       u64 symerrs;
5205 +       u64 rxerrc;
5206 +       u64 mpc;
5207 +       u64 scc;
5208 +       u64 ecol;
5209 +       u64 mcc;
5210 +       u64 latecol;
5211 +       u64 colc;
5212 +       u64 dc;
5213 +       u64 tncrs;
5214 +       u64 sec;
5215 +       u64 cexterr;
5216 +       u64 rlec;
5217 +       u64 xonrxc;
5218 +       u64 xontxc;
5219 +       u64 xoffrxc;
5220 +       u64 xofftxc;
5221 +       u64 fcruc;
5222 +       u64 prc64;
5223 +       u64 prc127;
5224 +       u64 prc255;
5225 +       u64 prc511;
5226 +       u64 prc1023;
5227 +       u64 prc1522;
5228 +       u64 gprc;
5229 +       u64 bprc;
5230 +       u64 mprc;
5231 +       u64 gptc;
5232 +       u64 gorc;
5233 +       u64 gotc;
5234 +       u64 rnbc;
5235 +       u64 ruc;
5236 +       u64 rfc;
5237 +       u64 roc;
5238 +       u64 rjc;
5239 +       u64 mgprc;
5240 +       u64 mgpdc;
5241 +       u64 mgptc;
5242 +       u64 tor;
5243 +       u64 tot;
5244 +       u64 tpr;
5245 +       u64 tpt;
5246 +       u64 ptc64;
5247 +       u64 ptc127;
5248 +       u64 ptc255;
5249 +       u64 ptc511;
5250 +       u64 ptc1023;
5251 +       u64 ptc1522;
5252 +       u64 mptc;
5253 +       u64 bptc;
5254 +       u64 tsctc;
5255 +       u64 tsctfc;
5256 +       u64 iac;
5257 +       u64 icrxptc;
5258 +       u64 icrxatc;
5259 +       u64 ictxptc;
5260 +       u64 ictxatc;
5261 +       u64 ictxqec;
5262 +       u64 ictxqmtc;
5263 +       u64 icrxdmtc;
5264 +       u64 icrxoc;
5265 +       u64 cbtmpc;
5266 +       u64 htdpmc;
5267 +       u64 cbrdpc;
5268 +       u64 cbrmpc;
5269 +       u64 rpthc;
5270 +       u64 hgptc;
5271 +       u64 htcbdpc;
5272 +       u64 hgorc;
5273 +       u64 hgotc;
5274 +       u64 lenerrs;
5275 +       u64 scvpc;
5276 +       u64 hrmpc;
5277 +       u64 doosync;
5278 +};
5279 +
5280 +
5281 +struct e1000_phy_stats {
5282 +       u32 idle_errors;
5283 +       u32 receive_errors;
5284 +};
5285 +
5286 +struct e1000_host_mng_dhcp_cookie {
5287 +       u32 signature;
5288 +       u8  status;
5289 +       u8  reserved0;
5290 +       u16 vlan_id;
5291 +       u32 reserved1;
5292 +       u16 reserved2;
5293 +       u8  reserved3;
5294 +       u8  checksum;
5295 +};
5296 +
5297 +/* Host Interface "Rev 1" */
5298 +struct e1000_host_command_header {
5299 +       u8 command_id;
5300 +       u8 command_length;
5301 +       u8 command_options;
5302 +       u8 checksum;
5303 +};
5304 +
5305 +#define E1000_HI_MAX_DATA_LENGTH     252
5306 +struct e1000_host_command_info {
5307 +       struct e1000_host_command_header command_header;
5308 +       u8 command_data[E1000_HI_MAX_DATA_LENGTH];
5309 +};
5310 +
5311 +/* Host Interface "Rev 2" */
5312 +struct e1000_host_mng_command_header {
5313 +       u8  command_id;
5314 +       u8  checksum;
5315 +       u16 reserved1;
5316 +       u16 reserved2;
5317 +       u16 command_length;
5318 +};
5319 +
5320 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
5321 +struct e1000_host_mng_command_info {
5322 +       struct e1000_host_mng_command_header command_header;
5323 +       u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
5324 +};
5325 +
5326 +#include "e1000_mac.h"
5327 +#include "e1000_phy.h"
5328 +#include "e1000_nvm.h"
5329 +#include "e1000_manage.h"
5330 +#include "e1000_mbx.h"
5331 +
5332 +struct e1000_mac_operations {
5333 +       /* Function pointers for the MAC. */
5334 +       s32  (*init_params)(struct e1000_hw *);
5335 +       s32  (*id_led_init)(struct e1000_hw *);
5336 +       s32  (*blink_led)(struct e1000_hw *);
5337 +       s32  (*check_for_link)(struct e1000_hw *);
5338 +       bool (*check_mng_mode)(struct e1000_hw *hw);
5339 +       s32  (*cleanup_led)(struct e1000_hw *);
5340 +       void (*clear_hw_cntrs)(struct e1000_hw *);
5341 +       void (*clear_vfta)(struct e1000_hw *);
5342 +       s32  (*get_bus_info)(struct e1000_hw *);
5343 +       void (*set_lan_id)(struct e1000_hw *);
5344 +       s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
5345 +       s32  (*led_on)(struct e1000_hw *);
5346 +       s32  (*led_off)(struct e1000_hw *);
5347 +       void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
5348 +       s32  (*reset_hw)(struct e1000_hw *);
5349 +       s32  (*init_hw)(struct e1000_hw *);
5350 +       void (*shutdown_serdes)(struct e1000_hw *);
5351 +       s32  (*setup_link)(struct e1000_hw *);
5352 +       s32  (*setup_physical_interface)(struct e1000_hw *);
5353 +       s32  (*setup_led)(struct e1000_hw *);
5354 +       void (*write_vfta)(struct e1000_hw *, u32, u32);
5355 +       void (*mta_set)(struct e1000_hw *, u32);
5356 +       void (*config_collision_dist)(struct e1000_hw *);
5357 +       void (*rar_set)(struct e1000_hw *, u8*, u32);
5358 +       s32  (*read_mac_addr)(struct e1000_hw *);
5359 +       s32  (*validate_mdi_setting)(struct e1000_hw *);
5360 +       s32  (*mng_host_if_write)(struct e1000_hw *, u8*, u16, u16, u8*);
5361 +       s32  (*mng_write_cmd_header)(struct e1000_hw *hw,
5362 +                      struct e1000_host_mng_command_header*);
5363 +       s32  (*mng_enable_host_if)(struct e1000_hw *);
5364 +       s32  (*wait_autoneg)(struct e1000_hw *);
5365 +};
5366 +
5367 +struct e1000_phy_operations {
5368 +       s32  (*init_params)(struct e1000_hw *);
5369 +       s32  (*acquire)(struct e1000_hw *);
5370 +       s32  (*check_polarity)(struct e1000_hw *);
5371 +       s32  (*check_reset_block)(struct e1000_hw *);
5372 +       s32  (*commit)(struct e1000_hw *);
5373 +       s32  (*force_speed_duplex)(struct e1000_hw *);
5374 +       s32  (*get_cfg_done)(struct e1000_hw *hw);
5375 +       s32  (*get_cable_length)(struct e1000_hw *);
5376 +       s32  (*get_info)(struct e1000_hw *);
5377 +       s32  (*read_reg)(struct e1000_hw *, u32, u16 *);
5378 +       s32  (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
5379 +       void (*release)(struct e1000_hw *);
5380 +       s32  (*reset)(struct e1000_hw *);
5381 +       s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
5382 +       s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
5383 +       s32  (*write_reg)(struct e1000_hw *, u32, u16);
5384 +       s32  (*write_reg_locked)(struct e1000_hw *, u32, u16);
5385 +       void (*power_up)(struct e1000_hw *);
5386 +       void (*power_down)(struct e1000_hw *);
5387 +};
5388 +
5389 +struct e1000_nvm_operations {
5390 +       s32  (*init_params)(struct e1000_hw *);
5391 +       s32  (*acquire)(struct e1000_hw *);
5392 +       s32  (*read)(struct e1000_hw *, u16, u16, u16 *);
5393 +       void (*release)(struct e1000_hw *);
5394 +       void (*reload)(struct e1000_hw *);
5395 +       s32  (*update)(struct e1000_hw *);
5396 +       s32  (*valid_led_default)(struct e1000_hw *, u16 *);
5397 +       s32  (*validate)(struct e1000_hw *);
5398 +       s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
5399 +};
5400 +
5401 +struct e1000_mac_info {
5402 +       struct e1000_mac_operations ops;
5403 +       u8 addr[6];
5404 +       u8 perm_addr[6];
5405 +
5406 +       enum e1000_mac_type type;
5407 +
5408 +       u32 collision_delta;
5409 +       u32 ledctl_default;
5410 +       u32 ledctl_mode1;
5411 +       u32 ledctl_mode2;
5412 +       u32 mc_filter_type;
5413 +       u32 tx_packet_delta;
5414 +       u32 txcw;
5415 +
5416 +       u16 current_ifs_val;
5417 +       u16 ifs_max_val;
5418 +       u16 ifs_min_val;
5419 +       u16 ifs_ratio;
5420 +       u16 ifs_step_size;
5421 +       u16 mta_reg_count;
5422 +       u16 uta_reg_count;
5423 +
5424 +       /* Maximum size of the MTA register table in all supported adapters */
5425 +       #define MAX_MTA_REG 128
5426 +       u32 mta_shadow[MAX_MTA_REG];
5427 +       u16 rar_entry_count;
5428 +
5429 +       u8  forced_speed_duplex;
5430 +
5431 +       bool adaptive_ifs;
5432 +       bool arc_subsystem_valid;
5433 +       bool asf_firmware_present;
5434 +       bool autoneg;
5435 +       bool autoneg_failed;
5436 +       bool get_link_status;
5437 +       bool in_ifs_mode;
5438 +       enum e1000_serdes_link_state serdes_link_state;
5439 +       bool serdes_has_link;
5440 +       bool tx_pkt_filtering;
5441 +};
5442 +
5443 +struct e1000_phy_info {
5444 +       struct e1000_phy_operations ops;
5445 +       enum e1000_phy_type type;
5446 +
5447 +       enum e1000_1000t_rx_status local_rx;
5448 +       enum e1000_1000t_rx_status remote_rx;
5449 +       enum e1000_ms_type ms_type;
5450 +       enum e1000_ms_type original_ms_type;
5451 +       enum e1000_rev_polarity cable_polarity;
5452 +       enum e1000_smart_speed smart_speed;
5453 +
5454 +       u32 addr;
5455 +       u32 id;
5456 +       u32 reset_delay_us; /* in usec */
5457 +       u32 revision;
5458 +
5459 +       enum e1000_media_type media_type;
5460 +
5461 +       u16 autoneg_advertised;
5462 +       u16 autoneg_mask;
5463 +       u16 cable_length;
5464 +       u16 max_cable_length;
5465 +       u16 min_cable_length;
5466 +
5467 +       u8 mdix;
5468 +
5469 +       bool disable_polarity_correction;
5470 +       bool is_mdix;
5471 +       bool polarity_correction;
5472 +       bool reset_disable;
5473 +       bool speed_downgraded;
5474 +       bool autoneg_wait_to_complete;
5475 +};
5476 +
5477 +struct e1000_nvm_info {
5478 +       struct e1000_nvm_operations ops;
5479 +       enum e1000_nvm_type type;
5480 +       enum e1000_nvm_override override;
5481 +
5482 +       u32 flash_bank_size;
5483 +       u32 flash_base_addr;
5484 +
5485 +       u16 word_size;
5486 +       u16 delay_usec;
5487 +       u16 address_bits;
5488 +       u16 opcode_bits;
5489 +       u16 page_size;
5490 +};
5491 +
5492 +struct e1000_bus_info {
5493 +       enum e1000_bus_type type;
5494 +       enum e1000_bus_speed speed;
5495 +       enum e1000_bus_width width;
5496 +
5497 +       u16 func;
5498 +       u16 pci_cmd_word;
5499 +};
5500 +
5501 +struct e1000_fc_info {
5502 +       u32 high_water;          /* Flow control high-water mark */
5503 +       u32 low_water;           /* Flow control low-water mark */
5504 +       u16 pause_time;          /* Flow control pause timer */
5505 +       bool send_xon;           /* Flow control send XON */
5506 +       bool strict_ieee;        /* Strict IEEE mode */
5507 +       enum e1000_fc_mode current_mode; /* FC mode in effect */
5508 +       enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
5509 +};
5510 +
5511 +struct e1000_mbx_operations {
5512 +       s32 (*init_params)(struct e1000_hw *hw);
5513 +       s32 (*read)(struct e1000_hw *, u32 *, u16,  u16);
5514 +       s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
5515 +       s32 (*read_posted)(struct e1000_hw *, u32 *, u16,  u16);
5516 +       s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
5517 +       s32 (*check_for_msg)(struct e1000_hw *, u16);
5518 +       s32 (*check_for_ack)(struct e1000_hw *, u16);
5519 +       s32 (*check_for_rst)(struct e1000_hw *, u16);
5520 +};
5521 +
5522 +struct e1000_mbx_stats {
5523 +       u32 msgs_tx;
5524 +       u32 msgs_rx;
5525 +
5526 +       u32 acks;
5527 +       u32 reqs;
5528 +       u32 rsts;
5529 +};
5530 +
5531 +struct e1000_mbx_info {
5532 +       struct e1000_mbx_operations ops;
5533 +       struct e1000_mbx_stats stats;
5534 +       u32 timeout;
5535 +       u32 usec_delay;
5536 +       u16 size;
5537 +};
5538 +
5539 +struct e1000_dev_spec_82575 {
5540 +       bool sgmii_active;
5541 +       bool global_device_reset;
5542 +};
5543 +
5544 +struct e1000_dev_spec_vf {
5545 +       u32     vf_number;
5546 +       u32     v2p_mailbox;
5547 +};
5548 +
5549 +
5550 +struct e1000_hw {
5551 +       void *back;
5552 +
5553 +       u8 __iomem *hw_addr;
5554 +       u8 __iomem *flash_address;
5555 +       unsigned long io_base;
5556 +
5557 +       struct e1000_mac_info  mac;
5558 +       struct e1000_fc_info   fc;
5559 +       struct e1000_phy_info  phy;
5560 +       struct e1000_nvm_info  nvm;
5561 +       struct e1000_bus_info  bus;
5562 +       struct e1000_mbx_info mbx;
5563 +       struct e1000_host_mng_dhcp_cookie mng_cookie;
5564 +
5565 +       union {
5566 +               struct e1000_dev_spec_82575     _82575;
5567 +               struct e1000_dev_spec_vf        vf;
5568 +       } dev_spec;
5569 +
5570 +       u16 device_id;
5571 +       u16 subsystem_vendor_id;
5572 +       u16 subsystem_device_id;
5573 +       u16 vendor_id;
5574 +
5575 +       u8  revision_id;
5576 +};
5577 +
5578 +#include "e1000_82575.h"
5579 +
5580 +/* These functions must be implemented by drivers */
5581 +s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
5582 +s32  e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
5583 +
5584 +#endif
5585 Index: linux-2.6.22/drivers/net/igb/e1000_mac.c
5586 ===================================================================
5587 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
5588 +++ linux-2.6.22/drivers/net/igb/e1000_mac.c    2009-12-18 12:39:22.000000000 -0500
5589 @@ -0,0 +1,1985 @@
5590 +/*******************************************************************************
5591 +
5592 +  Intel(R) Gigabit Ethernet Linux driver
5593 +  Copyright(c) 2007-2009 Intel Corporation.
5594 +
5595 +  This program is free software; you can redistribute it and/or modify it
5596 +  under the terms and conditions of the GNU General Public License,
5597 +  version 2, as published by the Free Software Foundation.
5598 +
5599 +  This program is distributed in the hope it will be useful, but WITHOUT
5600 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5601 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
5602 +  more details.
5603 +
5604 +  You should have received a copy of the GNU General Public License along with
5605 +  this program; if not, write to the Free Software Foundation, Inc.,
5606 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5607 +
5608 +  The full GNU General Public License is included in this distribution in
5609 +  the file called "COPYING".
5610 +
5611 +  Contact Information:
5612 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
5613 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
5614 +
5615 +*******************************************************************************/
5616 +
5617 +#include "e1000_api.h"
5618 +
5619 +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw);
5620 +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw);
5621 +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw);
5622 +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
5623 +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
5624 +
5625 +/**
5626 + *  e1000_init_mac_ops_generic - Initialize MAC function pointers
5627 + *  @hw: pointer to the HW structure
5628 + *
5629 + *  Setups up the function pointers to no-op functions
5630 + **/
5631 +void e1000_init_mac_ops_generic(struct e1000_hw *hw)
5632 +{
5633 +       struct e1000_mac_info *mac = &hw->mac;
5634 +       DEBUGFUNC("e1000_init_mac_ops_generic");
5635 +
5636 +       /* General Setup */
5637 +       mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
5638 +       mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
5639 +       mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
5640 +       /* LINK */
5641 +       mac->ops.wait_autoneg = e1000_wait_autoneg_generic;
5642 +       /* Management */
5643 +       mac->ops.mng_host_if_write = e1000_mng_host_if_write_generic;
5644 +       mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header_generic;
5645 +       mac->ops.mng_enable_host_if = e1000_mng_enable_host_if_generic;
5646 +       /* VLAN, MC, etc. */
5647 +       mac->ops.rar_set = e1000_rar_set_generic;
5648 +       mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
5649 +}
5650 +
5651 +/**
5652 + *  e1000_get_bus_info_pcie_generic - Get PCIe bus information
5653 + *  @hw: pointer to the HW structure
5654 + *
5655 + *  Determines and stores the system bus information for a particular
5656 + *  network interface.  The following bus information is determined and stored:
5657 + *  bus speed, bus width, type (PCIe), and PCIe function.
5658 + **/
5659 +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
5660 +{
5661 +       struct e1000_mac_info *mac = &hw->mac;
5662 +       struct e1000_bus_info *bus = &hw->bus;
5663 +
5664 +       s32 ret_val;
5665 +       u16 pcie_link_status;
5666 +
5667 +       DEBUGFUNC("e1000_get_bus_info_pcie_generic");
5668 +
5669 +       bus->type = e1000_bus_type_pci_express;
5670 +       bus->speed = e1000_bus_speed_2500;
5671 +
5672 +       ret_val = e1000_read_pcie_cap_reg(hw,
5673 +                                         PCIE_LINK_STATUS,
5674 +                                         &pcie_link_status);
5675 +       if (ret_val)
5676 +               bus->width = e1000_bus_width_unknown;
5677 +       else
5678 +               bus->width = (enum e1000_bus_width)((pcie_link_status &
5679 +                                               PCIE_LINK_WIDTH_MASK) >>
5680 +                                              PCIE_LINK_WIDTH_SHIFT);
5681 +
5682 +       mac->ops.set_lan_id(hw);
5683 +
5684 +       return E1000_SUCCESS;
5685 +}
5686 +
5687 +/**
5688 + *  e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
5689 + *
5690 + *  @hw: pointer to the HW structure
5691 + *
5692 + *  Determines the LAN function id by reading memory-mapped registers
5693 + *  and swaps the port value if requested.
5694 + **/
5695 +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
5696 +{
5697 +       struct e1000_bus_info *bus = &hw->bus;
5698 +       u32 reg;
5699 +
5700 +       /*
5701 +        * The status register reports the correct function number
5702 +        * for the device regardless of function swap state.
5703 +        */
5704 +       reg = E1000_READ_REG(hw, E1000_STATUS);
5705 +       bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
5706 +}
5707 +
5708 +/**
5709 + *  e1000_set_lan_id_single_port - Set LAN id for a single port device
5710 + *  @hw: pointer to the HW structure
5711 + *
5712 + *  Sets the LAN function id to zero for a single port device.
5713 + **/
5714 +void e1000_set_lan_id_single_port(struct e1000_hw *hw)
5715 +{
5716 +       struct e1000_bus_info *bus = &hw->bus;
5717 +
5718 +       bus->func = 0;
5719 +}
5720 +
5721 +/**
5722 + *  e1000_clear_vfta_generic - Clear VLAN filter table
5723 + *  @hw: pointer to the HW structure
5724 + *
5725 + *  Clears the register array which contains the VLAN filter table by
5726 + *  setting all the values to 0.
5727 + **/
5728 +void e1000_clear_vfta_generic(struct e1000_hw *hw)
5729 +{
5730 +       u32 offset;
5731 +
5732 +       DEBUGFUNC("e1000_clear_vfta_generic");
5733 +
5734 +       for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
5735 +               E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
5736 +               E1000_WRITE_FLUSH(hw);
5737 +       }
5738 +}
5739 +
5740 +/**
5741 + *  e1000_write_vfta_generic - Write value to VLAN filter table
5742 + *  @hw: pointer to the HW structure
5743 + *  @offset: register offset in VLAN filter table
5744 + *  @value: register value written to VLAN filter table
5745 + *
5746 + *  Writes value at the given offset in the register array which stores
5747 + *  the VLAN filter table.
5748 + **/
5749 +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
5750 +{
5751 +       DEBUGFUNC("e1000_write_vfta_generic");
5752 +
5753 +       E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
5754 +       E1000_WRITE_FLUSH(hw);
5755 +}
5756 +
5757 +/**
5758 + *  e1000_init_rx_addrs_generic - Initialize receive address's
5759 + *  @hw: pointer to the HW structure
5760 + *  @rar_count: receive address registers
5761 + *
5762 + *  Setups the receive address registers by setting the base receive address
5763 + *  register to the devices MAC address and clearing all the other receive
5764 + *  address registers to 0.
5765 + **/
5766 +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
5767 +{
5768 +       u32 i;
5769 +       u8 mac_addr[ETH_ADDR_LEN] = {0};
5770 +
5771 +       DEBUGFUNC("e1000_init_rx_addrs_generic");
5772 +
5773 +       /* Setup the receive address */
5774 +       DEBUGOUT("Programming MAC Address into RAR[0]\n");
5775 +
5776 +       hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
5777 +
5778 +       /* Zero out the other (rar_entry_count - 1) receive addresses */
5779 +       DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
5780 +       for (i = 1; i < rar_count; i++)
5781 +               hw->mac.ops.rar_set(hw, mac_addr, i);
5782 +}
5783 +
5784 +/**
5785 + *  e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
5786 + *  @hw: pointer to the HW structure
5787 + *
5788 + *  Checks the nvm for an alternate MAC address.  An alternate MAC address
5789 + *  can be setup by pre-boot software and must be treated like a permanent
5790 + *  address and must override the actual permanent MAC address. If an
5791 + *  alternate MAC address is found it is programmed into RAR0, replacing
5792 + *  the permanent address that was installed into RAR0 by the Si on reset.
5793 + *  This function will return SUCCESS unless it encounters an error while
5794 + *  reading the EEPROM.
5795 + **/
5796 +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
5797 +{
5798 +       u32 i;
5799 +       s32 ret_val = E1000_SUCCESS;
5800 +       u16 offset, nvm_alt_mac_addr_offset, nvm_data;
5801 +       u8 alt_mac_addr[ETH_ADDR_LEN];
5802 +
5803 +       DEBUGFUNC("e1000_check_alt_mac_addr_generic");
5804 +
5805 +       ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
5806 +                                &nvm_alt_mac_addr_offset);
5807 +       if (ret_val) {
5808 +               DEBUGOUT("NVM Read Error\n");
5809 +               goto out;
5810 +       }
5811 +
5812 +       if (nvm_alt_mac_addr_offset == 0xFFFF) {
5813 +               /* There is no Alternate MAC Address */
5814 +               goto out;
5815 +       }
5816 +
5817 +       if (hw->bus.func == E1000_FUNC_1)
5818 +               nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
5819 +       for (i = 0; i < ETH_ADDR_LEN; i += 2) {
5820 +               offset = nvm_alt_mac_addr_offset + (i >> 1);
5821 +               ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
5822 +               if (ret_val) {
5823 +                       DEBUGOUT("NVM Read Error\n");
5824 +                       goto out;
5825 +               }
5826 +
5827 +               alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
5828 +               alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
5829 +       }
5830 +
5831 +       /* if multicast bit is set, the alternate address will not be used */
5832 +       if (alt_mac_addr[0] & 0x01) {
5833 +               DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
5834 +               goto out;
5835 +       }
5836 +
5837 +       /*
5838 +        * We have a valid alternate MAC address, and we want to treat it the
5839 +        * same as the normal permanent MAC address stored by the HW into the
5840 +        * RAR. Do this by mapping this address into RAR0.
5841 +        */
5842 +       hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
5843 +
5844 +out:
5845 +       return ret_val;
5846 +}
5847 +
5848 +/**
5849 + *  e1000_rar_set_generic - Set receive address register
5850 + *  @hw: pointer to the HW structure
5851 + *  @addr: pointer to the receive address
5852 + *  @index: receive address array register
5853 + *
5854 + *  Sets the receive address array register at index to the address passed
5855 + *  in by addr.
5856 + **/
5857 +void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
5858 +{
5859 +       u32 rar_low, rar_high;
5860 +
5861 +       DEBUGFUNC("e1000_rar_set_generic");
5862 +
5863 +       /*
5864 +        * HW expects these in little endian so we reverse the byte order
5865 +        * from network order (big endian) to little endian
5866 +        */
5867 +       rar_low = ((u32) addr[0] |
5868 +                  ((u32) addr[1] << 8) |
5869 +                  ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5870 +
5871 +       rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5872 +
5873 +       /* If MAC address zero, no need to set the AV bit */
5874 +       if (rar_low || rar_high)
5875 +               rar_high |= E1000_RAH_AV;
5876 +
5877 +       /*
5878 +        * Some bridges will combine consecutive 32-bit writes into
5879 +        * a single burst write, which will malfunction on some parts.
5880 +        * The flushes avoid this.
5881 +        */
5882 +       E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
5883 +       E1000_WRITE_FLUSH(hw);
5884 +       E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
5885 +       E1000_WRITE_FLUSH(hw);
5886 +}
5887 +
5888 +/**
5889 + *  e1000_mta_set_generic - Set multicast filter table address
5890 + *  @hw: pointer to the HW structure
5891 + *  @hash_value: determines the MTA register and bit to set
5892 + *
5893 + *  The multicast table address is a register array of 32-bit registers.
5894 + *  The hash_value is used to determine what register the bit is in, the
5895 + *  current value is read, the new bit is OR'd in and the new value is
5896 + *  written back into the register.
5897 + **/
5898 +void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value)
5899 +{
5900 +       u32 hash_bit, hash_reg, mta;
5901 +
5902 +       DEBUGFUNC("e1000_mta_set_generic");
5903 +       /*
5904 +        * The MTA is a register array of 32-bit registers. It is
5905 +        * treated like an array of (32*mta_reg_count) bits.  We want to
5906 +        * set bit BitArray[hash_value]. So we figure out what register
5907 +        * the bit is in, read it, OR in the new bit, then write
5908 +        * back the new value.  The (hw->mac.mta_reg_count - 1) serves as a
5909 +        * mask to bits 31:5 of the hash value which gives us the
5910 +        * register we're modifying.  The hash bit within that register
5911 +        * is determined by the lower 5 bits of the hash value.
5912 +        */
5913 +       hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
5914 +       hash_bit = hash_value & 0x1F;
5915 +
5916 +       mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
5917 +
5918 +       mta |= (1 << hash_bit);
5919 +
5920 +       E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
5921 +       E1000_WRITE_FLUSH(hw);
5922 +}
5923 +
5924 +/**
5925 + *  e1000_update_mc_addr_list_generic - Update Multicast addresses
5926 + *  @hw: pointer to the HW structure
5927 + *  @mc_addr_list: array of multicast addresses to program
5928 + *  @mc_addr_count: number of multicast addresses to program
5929 + *
5930 + *  Updates entire Multicast Table Array.
5931 + *  The caller must have a packed mc_addr_list of multicast addresses.
5932 + **/
5933 +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
5934 +                                       u8 *mc_addr_list, u32 mc_addr_count)
5935 +{
5936 +       u32 hash_value, hash_bit, hash_reg;
5937 +       int i;
5938 +
5939 +       DEBUGFUNC("e1000_update_mc_addr_list_generic");
5940 +
5941 +       /* clear mta_shadow */
5942 +       memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
5943 +
5944 +       /* update mta_shadow from mc_addr_list */
5945 +       for (i = 0; (u32) i < mc_addr_count; i++) {
5946 +               hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
5947 +
5948 +               hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
5949 +               hash_bit = hash_value & 0x1F;
5950 +
5951 +               hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
5952 +               mc_addr_list += (ETH_ADDR_LEN);
5953 +       }
5954 +
5955 +       /* replace the entire MTA table */
5956 +       for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
5957 +               E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
5958 +       E1000_WRITE_FLUSH(hw);
5959 +}
5960 +
5961 +/**
5962 + *  e1000_hash_mc_addr_generic - Generate a multicast hash value
5963 + *  @hw: pointer to the HW structure
5964 + *  @mc_addr: pointer to a multicast address
5965 + *
5966 + *  Generates a multicast address hash value which is used to determine
5967 + *  the multicast filter table array address and new table value.  See
5968 + *  e1000_mta_set_generic()
5969 + **/
5970 +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
5971 +{
5972 +       u32 hash_value, hash_mask;
5973 +       u8 bit_shift = 0;
5974 +
5975 +       DEBUGFUNC("e1000_hash_mc_addr_generic");
5976 +
5977 +       /* Register count multiplied by bits per register */
5978 +       hash_mask = (hw->mac.mta_reg_count * 32) - 1;
5979 +
5980 +       /*
5981 +        * For a mc_filter_type of 0, bit_shift is the number of left-shifts
5982 +        * where 0xFF would still fall within the hash mask.
5983 +        */
5984 +       while (hash_mask >> bit_shift != 0xFF)
5985 +               bit_shift++;
5986 +
5987 +       /*
5988 +        * The portion of the address that is used for the hash table
5989 +        * is determined by the mc_filter_type setting.
5990 +        * The algorithm is such that there is a total of 8 bits of shifting.
5991 +        * The bit_shift for a mc_filter_type of 0 represents the number of
5992 +        * left-shifts where the MSB of mc_addr[5] would still fall within
5993 +        * the hash_mask.  Case 0 does this exactly.  Since there are a total
5994 +        * of 8 bits of shifting, then mc_addr[4] will shift right the
5995 +        * remaining number of bits. Thus 8 - bit_shift.  The rest of the
5996 +        * cases are a variation of this algorithm...essentially raising the
5997 +        * number of bits to shift mc_addr[5] left, while still keeping the
5998 +        * 8-bit shifting total.
5999 +        *
6000 +        * For example, given the following Destination MAC Address and an
6001 +        * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
6002 +        * we can see that the bit_shift for case 0 is 4.  These are the hash
6003 +        * values resulting from each mc_filter_type...
6004 +        * [0] [1] [2] [3] [4] [5]
6005 +        * 01  AA  00  12  34  56
6006 +        * LSB                 MSB
6007 +        *
6008 +        * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
6009 +        * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
6010 +        * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
6011 +        * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
6012 +        */
6013 +       switch (hw->mac.mc_filter_type) {
6014 +       default:
6015 +       case 0:
6016 +               break;
6017 +       case 1:
6018 +               bit_shift += 1;
6019 +               break;
6020 +       case 2:
6021 +               bit_shift += 2;
6022 +               break;
6023 +       case 3:
6024 +               bit_shift += 4;
6025 +               break;
6026 +       }
6027 +
6028 +       hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
6029 +                                 (((u16) mc_addr[5]) << bit_shift)));
6030 +
6031 +       return hash_value;
6032 +}
6033 +
6034 +/**
6035 + *  e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
6036 + *  @hw: pointer to the HW structure
6037 + *
6038 + *  Clears the base hardware counters by reading the counter registers.
6039 + **/
6040 +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
6041 +{
6042 +       DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
6043 +
6044 +       E1000_READ_REG(hw, E1000_CRCERRS);
6045 +       E1000_READ_REG(hw, E1000_SYMERRS);
6046 +       E1000_READ_REG(hw, E1000_MPC);
6047 +       E1000_READ_REG(hw, E1000_SCC);
6048 +       E1000_READ_REG(hw, E1000_ECOL);
6049 +       E1000_READ_REG(hw, E1000_MCC);
6050 +       E1000_READ_REG(hw, E1000_LATECOL);
6051 +       E1000_READ_REG(hw, E1000_COLC);
6052 +       E1000_READ_REG(hw, E1000_DC);
6053 +       E1000_READ_REG(hw, E1000_SEC);
6054 +       E1000_READ_REG(hw, E1000_RLEC);
6055 +       E1000_READ_REG(hw, E1000_XONRXC);
6056 +       E1000_READ_REG(hw, E1000_XONTXC);
6057 +       E1000_READ_REG(hw, E1000_XOFFRXC);
6058 +       E1000_READ_REG(hw, E1000_XOFFTXC);
6059 +       E1000_READ_REG(hw, E1000_FCRUC);
6060 +       E1000_READ_REG(hw, E1000_GPRC);
6061 +       E1000_READ_REG(hw, E1000_BPRC);
6062 +       E1000_READ_REG(hw, E1000_MPRC);
6063 +       E1000_READ_REG(hw, E1000_GPTC);
6064 +       E1000_READ_REG(hw, E1000_GORCL);
6065 +       E1000_READ_REG(hw, E1000_GORCH);
6066 +       E1000_READ_REG(hw, E1000_GOTCL);
6067 +       E1000_READ_REG(hw, E1000_GOTCH);
6068 +       E1000_READ_REG(hw, E1000_RNBC);
6069 +       E1000_READ_REG(hw, E1000_RUC);
6070 +       E1000_READ_REG(hw, E1000_RFC);
6071 +       E1000_READ_REG(hw, E1000_ROC);
6072 +       E1000_READ_REG(hw, E1000_RJC);
6073 +       E1000_READ_REG(hw, E1000_TORL);
6074 +       E1000_READ_REG(hw, E1000_TORH);
6075 +       E1000_READ_REG(hw, E1000_TOTL);
6076 +       E1000_READ_REG(hw, E1000_TOTH);
6077 +       E1000_READ_REG(hw, E1000_TPR);
6078 +       E1000_READ_REG(hw, E1000_TPT);
6079 +       E1000_READ_REG(hw, E1000_MPTC);
6080 +       E1000_READ_REG(hw, E1000_BPTC);
6081 +}
6082 +
6083 +/**
6084 + *  e1000_check_for_copper_link_generic - Check for link (Copper)
6085 + *  @hw: pointer to the HW structure
6086 + *
6087 + *  Checks to see of the link status of the hardware has changed.  If a
6088 + *  change in link status has been detected, then we read the PHY registers
6089 + *  to get the current speed/duplex if link exists.
6090 + **/
6091 +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
6092 +{
6093 +       struct e1000_mac_info *mac = &hw->mac;
6094 +       s32 ret_val;
6095 +       bool link;
6096 +
6097 +       DEBUGFUNC("e1000_check_for_copper_link");
6098 +
6099 +       /*
6100 +        * We only want to go out to the PHY registers to see if Auto-Neg
6101 +        * has completed and/or if our link status has changed.  The
6102 +        * get_link_status flag is set upon receiving a Link Status
6103 +        * Change or Rx Sequence Error interrupt.
6104 +        */
6105 +       if (!mac->get_link_status) {
6106 +               ret_val = E1000_SUCCESS;
6107 +               goto out;
6108 +       }
6109 +
6110 +       /*
6111 +        * First we want to see if the MII Status Register reports
6112 +        * link.  If so, then we want to get the current speed/duplex
6113 +        * of the PHY.
6114 +        */
6115 +       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
6116 +       if (ret_val)
6117 +               goto out;
6118 +
6119 +       if (!link)
6120 +               goto out; /* No link detected */
6121 +
6122 +       mac->get_link_status = false;
6123 +
6124 +       /*
6125 +        * Check if there was DownShift, must be checked
6126 +        * immediately after link-up
6127 +        */
6128 +       e1000_check_downshift_generic(hw);
6129 +
6130 +       /*
6131 +        * If we are forcing speed/duplex, then we simply return since
6132 +        * we have already determined whether we have link or not.
6133 +        */
6134 +       if (!mac->autoneg) {
6135 +               ret_val = -E1000_ERR_CONFIG;
6136 +               goto out;
6137 +       }
6138 +
6139 +       /*
6140 +        * Auto-Neg is enabled.  Auto Speed Detection takes care
6141 +        * of MAC speed/duplex configuration.  So we only need to
6142 +        * configure Collision Distance in the MAC.
6143 +        */
6144 +       e1000_config_collision_dist_generic(hw);
6145 +
6146 +       /*
6147 +        * Configure Flow Control now that Auto-Neg has completed.
6148 +        * First, we need to restore the desired flow control
6149 +        * settings because we may have had to re-autoneg with a
6150 +        * different link partner.
6151 +        */
6152 +       ret_val = e1000_config_fc_after_link_up_generic(hw);
6153 +       if (ret_val)
6154 +               DEBUGOUT("Error configuring flow control\n");
6155 +
6156 +out:
6157 +       return ret_val;
6158 +}
6159 +
6160 +/**
6161 + *  e1000_check_for_fiber_link_generic - Check for link (Fiber)
6162 + *  @hw: pointer to the HW structure
6163 + *
6164 + *  Checks for link up on the hardware.  If link is not up and we have
6165 + *  a signal, then we need to force link up.
6166 + **/
6167 +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
6168 +{
6169 +       struct e1000_mac_info *mac = &hw->mac;
6170 +       u32 rxcw;
6171 +       u32 ctrl;
6172 +       u32 status;
6173 +       s32 ret_val = E1000_SUCCESS;
6174 +
6175 +       DEBUGFUNC("e1000_check_for_fiber_link_generic");
6176 +
6177 +       ctrl = E1000_READ_REG(hw, E1000_CTRL);
6178 +       status = E1000_READ_REG(hw, E1000_STATUS);
6179 +       rxcw = E1000_READ_REG(hw, E1000_RXCW);
6180 +
6181 +       /*
6182 +        * If we don't have link (auto-negotiation failed or link partner
6183 +        * cannot auto-negotiate), the cable is plugged in (we have signal),
6184 +        * and our link partner is not trying to auto-negotiate with us (we
6185 +        * are receiving idles or data), we need to force link up. We also
6186 +        * need to give auto-negotiation time to complete, in case the cable
6187 +        * was just plugged in. The autoneg_failed flag does this.
6188 +        */
6189 +       /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
6190 +       if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
6191 +           (!(rxcw & E1000_RXCW_C))) {
6192 +               if (mac->autoneg_failed == 0) {
6193 +                       mac->autoneg_failed = 1;
6194 +                       goto out;
6195 +               }
6196 +               DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
6197 +
6198 +               /* Disable auto-negotiation in the TXCW register */
6199 +               E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
6200 +
6201 +               /* Force link-up and also force full-duplex. */
6202 +               ctrl = E1000_READ_REG(hw, E1000_CTRL);
6203 +               ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
6204 +               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
6205 +
6206 +               /* Configure Flow Control after forcing link up. */
6207 +               ret_val = e1000_config_fc_after_link_up_generic(hw);
6208 +               if (ret_val) {
6209 +                       DEBUGOUT("Error configuring flow control\n");
6210 +                       goto out;
6211 +               }
6212 +       } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
6213 +               /*
6214 +                * If we are forcing link and we are receiving /C/ ordered
6215 +                * sets, re-enable auto-negotiation in the TXCW register
6216 +                * and disable forced link in the Device Control register
6217 +                * in an attempt to auto-negotiate with our link partner.
6218 +                */
6219 +               DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
6220 +               E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
6221 +               E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
6222 +
6223 +               mac->serdes_has_link = true;
6224 +       }
6225 +
6226 +out:
6227 +       return ret_val;
6228 +}
6229 +
6230 +/**
6231 + *  e1000_check_for_serdes_link_generic - Check for link (Serdes)
6232 + *  @hw: pointer to the HW structure
6233 + *
6234 + *  Checks for link up on the hardware.  If link is not up and we have
6235 + *  a signal, then we need to force link up.
6236 + **/
6237 +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
6238 +{
6239 +       struct e1000_mac_info *mac = &hw->mac;
6240 +       u32 rxcw;
6241 +       u32 ctrl;
6242 +       u32 status;
6243 +       s32 ret_val = E1000_SUCCESS;
6244 +
6245 +       DEBUGFUNC("e1000_check_for_serdes_link_generic");
6246 +
6247 +       ctrl = E1000_READ_REG(hw, E1000_CTRL);
6248 +       status = E1000_READ_REG(hw, E1000_STATUS);
6249 +       rxcw = E1000_READ_REG(hw, E1000_RXCW);
6250 +
6251 +       /*
6252 +        * If we don't have link (auto-negotiation failed or link partner
6253 +        * cannot auto-negotiate), and our link partner is not trying to
6254 +        * auto-negotiate with us (we are receiving idles or data),
6255 +        * we need to force link up. We also need to give auto-negotiation
6256 +        * time to complete.
6257 +        */
6258 +       /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
6259 +       if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
6260 +               if (mac->autoneg_failed == 0) {
6261 +                       mac->autoneg_failed = 1;
6262 +                       goto out;
6263 +               }
6264 +               DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
6265 +
6266 +               /* Disable auto-negotiation in the TXCW register */
6267 +               E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
6268 +
6269 +               /* Force link-up and also force full-duplex. */
6270 +               ctrl = E1000_READ_REG(hw, E1000_CTRL);
6271 +               ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
6272 +               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
6273 +
6274 +               /* Configure Flow Control after forcing link up. */
6275 +               ret_val = e1000_config_fc_after_link_up_generic(hw);
6276 +               if (ret_val) {
6277 +                       DEBUGOUT("Error configuring flow control\n");
6278 +                       goto out;
6279 +               }
6280 +       } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
6281 +               /*
6282 +                * If we are forcing link and we are receiving /C/ ordered
6283 +                * sets, re-enable auto-negotiation in the TXCW register
6284 +                * and disable forced link in the Device Control register
6285 +                * in an attempt to auto-negotiate with our link partner.
6286 +                */
6287 +               DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
6288 +               E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
6289 +               E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
6290 +
6291 +               mac->serdes_has_link = true;
6292 +       } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
6293 +               /*
6294 +                * If we force link for non-auto-negotiation switch, check
6295 +                * link status based on MAC synchronization for internal
6296 +                * serdes media type.
6297 +                */
6298 +               /* SYNCH bit and IV bit are sticky. */
6299 +               usec_delay(10);
6300 +               rxcw = E1000_READ_REG(hw, E1000_RXCW);
6301 +               if (rxcw & E1000_RXCW_SYNCH) {
6302 +                       if (!(rxcw & E1000_RXCW_IV)) {
6303 +                               mac->serdes_has_link = true;
6304 +                               DEBUGOUT("SERDES: Link up - forced.\n");
6305 +                       }
6306 +               } else {
6307 +                       mac->serdes_has_link = false;
6308 +                       DEBUGOUT("SERDES: Link down - force failed.\n");
6309 +               }
6310 +       }
6311 +
6312 +       if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
6313 +               status = E1000_READ_REG(hw, E1000_STATUS);
6314 +               if (status & E1000_STATUS_LU) {
6315 +                       /* SYNCH bit and IV bit are sticky, so reread rxcw. */
6316 +                       usec_delay(10);
6317 +                       rxcw = E1000_READ_REG(hw, E1000_RXCW);
6318 +                       if (rxcw & E1000_RXCW_SYNCH) {
6319 +                               if (!(rxcw & E1000_RXCW_IV)) {
6320 +                                       mac->serdes_has_link = true;
6321 +                                       DEBUGOUT("SERDES: Link up - autoneg "
6322 +                                          "completed sucessfully.\n");
6323 +                               } else {
6324 +                                       mac->serdes_has_link = false;
6325 +                                       DEBUGOUT("SERDES: Link down - invalid"
6326 +                                          "codewords detected in autoneg.\n");
6327 +                               }
6328 +                       } else {
6329 +                               mac->serdes_has_link = false;
6330 +                               DEBUGOUT("SERDES: Link down - no sync.\n");
6331 +                       }
6332 +               } else {
6333 +                       mac->serdes_has_link = false;
6334 +                       DEBUGOUT("SERDES: Link down - autoneg failed\n");
6335 +               }
6336 +       }
6337 +
6338 +out:
6339 +       return ret_val;
6340 +}
6341 +
6342 +/**
6343 + *  e1000_setup_link_generic - Setup flow control and link settings
6344 + *  @hw: pointer to the HW structure
6345 + *
6346 + *  Determines which flow control settings to use, then configures flow
6347 + *  control.  Calls the appropriate media-specific link configuration
6348 + *  function.  Assuming the adapter has a valid link partner, a valid link
6349 + *  should be established.  Assumes the hardware has previously been reset
6350 + *  and the transmitter and receiver are not enabled.
6351 + **/
6352 +s32 e1000_setup_link_generic(struct e1000_hw *hw)
6353 +{
6354 +       s32 ret_val = E1000_SUCCESS;
6355 +
6356 +       DEBUGFUNC("e1000_setup_link_generic");
6357 +
6358 +       /*
6359 +        * In the case of the phy reset being blocked, we already have a link.
6360 +        * We do not need to set it up again.
6361 +        */
6362 +       if (hw->phy.ops.check_reset_block)
6363 +               if (hw->phy.ops.check_reset_block(hw))
6364 +                       goto out;
6365 +
6366 +       /*
6367 +        * If requested flow control is set to default, set flow control
6368 +        * based on the EEPROM flow control settings.
6369 +        */
6370 +       if (hw->fc.requested_mode == e1000_fc_default) {
6371 +               ret_val = e1000_set_default_fc_generic(hw);
6372 +               if (ret_val)
6373 +                       goto out;
6374 +       }
6375 +
6376 +       /*
6377 +        * Save off the requested flow control mode for use later.  Depending
6378 +        * on the link partner's capabilities, we may or may not use this mode.
6379 +        */
6380 +       hw->fc.current_mode = hw->fc.requested_mode;
6381 +
6382 +       DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
6383 +               hw->fc.current_mode);
6384 +
6385 +       /* Call the necessary media_type subroutine to configure the link. */
6386 +       ret_val = hw->mac.ops.setup_physical_interface(hw);
6387 +       if (ret_val)
6388 +               goto out;
6389 +
6390 +       /*
6391 +        * Initialize the flow control address, type, and PAUSE timer
6392 +        * registers to their default values.  This is done even if flow
6393 +        * control is disabled, because it does not hurt anything to
6394 +        * initialize these registers.
6395 +        */
6396 +       DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
6397 +       E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
6398 +       E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
6399 +       E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
6400 +
6401 +       E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
6402 +
6403 +       ret_val = e1000_set_fc_watermarks_generic(hw);
6404 +
6405 +out:
6406 +       return ret_val;
6407 +}
6408 +
6409 +/**
6410 + *  e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
6411 + *  @hw: pointer to the HW structure
6412 + *
6413 + *  Configures collision distance and flow control for fiber and serdes
6414 + *  links.  Upon successful setup, poll for link.
6415 + **/
6416 +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
6417 +{
6418 +       u32 ctrl;
6419 +       s32 ret_val = E1000_SUCCESS;
6420 +
6421 +       DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
6422 +
6423 +       ctrl = E1000_READ_REG(hw, E1000_CTRL);
6424 +
6425 +       /* Take the link out of reset */
6426 +       ctrl &= ~E1000_CTRL_LRST;
6427 +
6428 +       e1000_config_collision_dist_generic(hw);
6429 +
6430 +       ret_val = e1000_commit_fc_settings_generic(hw);
6431 +       if (ret_val)
6432 +               goto out;
6433 +
6434 +       /*
6435 +        * Since auto-negotiation is enabled, take the link out of reset (the
6436 +        * link will be in reset, because we previously reset the chip). This
6437 +        * will restart auto-negotiation.  If auto-negotiation is successful
6438 +        * then the link-up status bit will be set and the flow control enable
6439 +        * bits (RFCE and TFCE) will be set according to their negotiated value.
6440 +        */
6441 +       DEBUGOUT("Auto-negotiation enabled\n");
6442 +
6443 +       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
6444 +       E1000_WRITE_FLUSH(hw);
6445 +       msec_delay(1);
6446 +
6447 +       /*
6448 +        * For these adapters, the SW definable pin 1 is set when the optics
6449 +        * detect a signal.  If we have a signal, then poll for a "Link-Up"
6450 +        * indication.
6451 +        */
6452 +       if (hw->phy.media_type == e1000_media_type_internal_serdes ||
6453 +           (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
6454 +               ret_val = e1000_poll_fiber_serdes_link_generic(hw);
6455 +       } else {
6456 +               DEBUGOUT("No signal detected\n");
6457 +       }
6458 +
6459 +out:
6460 +       return ret_val;
6461 +}
6462 +
6463 +/**
6464 + *  e1000_config_collision_dist_generic - Configure collision distance
6465 + *  @hw: pointer to the HW structure
6466 + *
6467 + *  Configures the collision distance to the default value and is used
6468 + *  during link setup. Currently no func pointer exists and all
6469 + *  implementations are handled in the generic version of this function.
6470 + **/
6471 +void e1000_config_collision_dist_generic(struct e1000_hw *hw)
6472 +{
6473 +       u32 tctl;
6474 +
6475 +       DEBUGFUNC("e1000_config_collision_dist_generic");
6476 +
6477 +       tctl = E1000_READ_REG(hw, E1000_TCTL);
6478 +
6479 +       tctl &= ~E1000_TCTL_COLD;
6480 +       tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
6481 +
6482 +       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
6483 +       E1000_WRITE_FLUSH(hw);
6484 +}
6485 +
6486 +/**
6487 + *  e1000_poll_fiber_serdes_link_generic - Poll for link up
6488 + *  @hw: pointer to the HW structure
6489 + *
6490 + *  Polls for link up by reading the status register, if link fails to come
6491 + *  up with auto-negotiation, then the link is forced if a signal is detected.
6492 + **/
6493 +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
6494 +{
6495 +       struct e1000_mac_info *mac = &hw->mac;
6496 +       u32 i, status;
6497 +       s32 ret_val = E1000_SUCCESS;
6498 +
6499 +       DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
6500 +
6501 +       /*
6502 +        * If we have a signal (the cable is plugged in, or assumed true for
6503 +        * serdes media) then poll for a "Link-Up" indication in the Device
6504 +        * Status Register.  Time-out if a link isn't seen in 500 milliseconds
6505 +        * seconds (Auto-negotiation should complete in less than 500
6506 +        * milliseconds even if the other end is doing it in SW).
6507 +        */
6508 +       for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
6509 +               msec_delay(10);
6510 +               status = E1000_READ_REG(hw, E1000_STATUS);
6511 +               if (status & E1000_STATUS_LU)
6512 +                       break;
6513 +       }
6514 +       if (i == FIBER_LINK_UP_LIMIT) {
6515 +               DEBUGOUT("Never got a valid link from auto-neg!!!\n");
6516 +               mac->autoneg_failed = 1;
6517 +               /*
6518 +                * AutoNeg failed to achieve a link, so we'll call
6519 +                * mac->check_for_link. This routine will force the
6520 +                * link up if we detect a signal. This will allow us to
6521 +                * communicate with non-autonegotiating link partners.
6522 +                */
6523 +               ret_val = hw->mac.ops.check_for_link(hw);
6524 +               if (ret_val) {
6525 +                       DEBUGOUT("Error while checking for link\n");
6526 +                       goto out;
6527 +               }
6528 +               mac->autoneg_failed = 0;
6529 +       } else {
6530 +               mac->autoneg_failed = 0;
6531 +               DEBUGOUT("Valid Link Found\n");
6532 +       }
6533 +
6534 +out:
6535 +       return ret_val;
6536 +}
6537 +
6538 +/**
6539 + *  e1000_commit_fc_settings_generic - Configure flow control
6540 + *  @hw: pointer to the HW structure
6541 + *
6542 + *  Write the flow control settings to the Transmit Config Word Register (TXCW)
6543 + *  base on the flow control settings in e1000_mac_info.
6544 + **/
6545 +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
6546 +{
6547 +       struct e1000_mac_info *mac = &hw->mac;
6548 +       u32 txcw;
6549 +       s32 ret_val = E1000_SUCCESS;
6550 +
6551 +       DEBUGFUNC("e1000_commit_fc_settings_generic");
6552 +
6553 +       /*
6554 +        * Check for a software override of the flow control settings, and
6555 +        * setup the device accordingly.  If auto-negotiation is enabled, then
6556 +        * software will have to set the "PAUSE" bits to the correct value in
6557 +        * the Transmit Config Word Register (TXCW) and re-start auto-
6558 +        * negotiation.  However, if auto-negotiation is disabled, then
6559 +        * software will have to manually configure the two flow control enable
6560 +        * bits in the CTRL register.
6561 +        *
6562 +        * The possible values of the "fc" parameter are:
6563 +        *      0:  Flow control is completely disabled
6564 +        *      1:  Rx flow control is enabled (we can receive pause frames,
6565 +        *          but not send pause frames).
6566 +        *      2:  Tx flow control is enabled (we can send pause frames but we
6567 +        *          do not support receiving pause frames).
6568 +        *      3:  Both Rx and Tx flow control (symmetric) are enabled.
6569 +        */
6570 +       switch (hw->fc.current_mode) {
6571 +       case e1000_fc_none:
6572 +               /* Flow control completely disabled by a software over-ride. */
6573 +               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
6574 +               break;
6575 +       case e1000_fc_rx_pause:
6576 +               /*
6577 +                * Rx Flow control is enabled and Tx Flow control is disabled
6578 +                * by a software over-ride. Since there really isn't a way to
6579 +                * advertise that we are capable of Rx Pause ONLY, we will
6580 +                * advertise that we support both symmetric and asymmetric RX
6581 +                * PAUSE.  Later, we will disable the adapter's ability to send
6582 +                * PAUSE frames.
6583 +                */
6584 +               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
6585 +               break;
6586 +       case e1000_fc_tx_pause:
6587 +               /*
6588 +                * Tx Flow control is enabled, and Rx Flow control is disabled,
6589 +                * by a software over-ride.
6590 +                */
6591 +               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
6592 +               break;
6593 +       case e1000_fc_full:
6594 +               /*
6595 +                * Flow control (both Rx and Tx) is enabled by a software
6596 +                * over-ride.
6597 +                */
6598 +               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
6599 +               break;
6600 +       default:
6601 +               DEBUGOUT("Flow control param set incorrectly\n");
6602 +               ret_val = -E1000_ERR_CONFIG;
6603 +               goto out;
6604 +               break;
6605 +       }
6606 +
6607 +       E1000_WRITE_REG(hw, E1000_TXCW, txcw);
6608 +       mac->txcw = txcw;
6609 +
6610 +out:
6611 +       return ret_val;
6612 +}
6613 +
6614 +/**
6615 + *  e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
6616 + *  @hw: pointer to the HW structure
6617 + *
6618 + *  Sets the flow control high/low threshold (watermark) registers.  If
6619 + *  flow control XON frame transmission is enabled, then set XON frame
6620 + *  transmission as well.
6621 + **/
6622 +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
6623 +{
6624 +       s32 ret_val = E1000_SUCCESS;
6625 +       u32 fcrtl = 0, fcrth = 0;
6626 +
6627 +       DEBUGFUNC("e1000_set_fc_watermarks_generic");
6628 +
6629 +       /*
6630 +        * Set the flow control receive threshold registers.  Normally,
6631 +        * these registers will be set to a default threshold that may be
6632 +        * adjusted later by the driver's runtime code.  However, if the
6633 +        * ability to transmit pause frames is not enabled, then these
6634 +        * registers will be set to 0.
6635 +        */
6636 +       if (hw->fc.current_mode & e1000_fc_tx_pause) {
6637 +               /*
6638 +                * We need to set up the Receive Threshold high and low water
6639 +                * marks as well as (optionally) enabling the transmission of
6640 +                * XON frames.
6641 +                */
6642 +               fcrtl = hw->fc.low_water;
6643 +               if (hw->fc.send_xon)
6644 +                       fcrtl |= E1000_FCRTL_XONE;
6645 +
6646 +               fcrth = hw->fc.high_water;
6647 +       }
6648 +       E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
6649 +       E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
6650 +
6651 +       return ret_val;
6652 +}
6653 +
6654 +/**
6655 + *  e1000_set_default_fc_generic - Set flow control default values
6656 + *  @hw: pointer to the HW structure
6657 + *
6658 + *  Read the EEPROM for the default values for flow control and store the
6659 + *  values.
6660 + **/
6661 +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
6662 +{
6663 +       s32 ret_val = E1000_SUCCESS;
6664 +       u16 nvm_data;
6665 +
6666 +       DEBUGFUNC("e1000_set_default_fc_generic");
6667 +
6668 +       /*
6669 +        * Read and store word 0x0F of the EEPROM. This word contains bits
6670 +        * that determine the hardware's default PAUSE (flow control) mode,
6671 +        * a bit that determines whether the HW defaults to enabling or
6672 +        * disabling auto-negotiation, and the direction of the
6673 +        * SW defined pins. If there is no SW over-ride of the flow
6674 +        * control setting, then the variable hw->fc will
6675 +        * be initialized based on a value in the EEPROM.
6676 +        */
6677 +       ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
6678 +
6679 +       if (ret_val) {
6680 +               DEBUGOUT("NVM Read Error\n");
6681 +               goto out;
6682 +       }
6683 +
6684 +       if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
6685 +               hw->fc.requested_mode = e1000_fc_none;
6686 +       else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
6687 +                NVM_WORD0F_ASM_DIR)
6688 +               hw->fc.requested_mode = e1000_fc_tx_pause;
6689 +       else
6690 +               hw->fc.requested_mode = e1000_fc_full;
6691 +
6692 +out:
6693 +       return ret_val;
6694 +}
6695 +
6696 +/**
6697 + *  e1000_force_mac_fc_generic - Force the MAC's flow control settings
6698 + *  @hw: pointer to the HW structure
6699 + *
6700 + *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
6701 + *  device control register to reflect the adapter settings.  TFCE and RFCE
6702 + *  need to be explicitly set by software when a copper PHY is used because
6703 + *  autonegotiation is managed by the PHY rather than the MAC.  Software must
6704 + *  also configure these bits when link is forced on a fiber connection.
6705 + **/
6706 +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
6707 +{
6708 +       u32 ctrl;
6709 +       s32 ret_val = E1000_SUCCESS;
6710 +
6711 +       DEBUGFUNC("e1000_force_mac_fc_generic");
6712 +
6713 +       ctrl = E1000_READ_REG(hw, E1000_CTRL);
6714 +
6715 +       /*
6716 +        * Because we didn't get link via the internal auto-negotiation
6717 +        * mechanism (we either forced link or we got link via PHY
6718 +        * auto-neg), we have to manually enable/disable transmit an
6719 +        * receive flow control.
6720 +        *
6721 +        * The "Case" statement below enables/disable flow control
6722 +        * according to the "hw->fc.current_mode" parameter.
6723 +        *
6724 +        * The possible values of the "fc" parameter are:
6725 +        *      0:  Flow control is completely disabled
6726 +        *      1:  Rx flow control is enabled (we can receive pause
6727 +        *          frames but not send pause frames).
6728 +        *      2:  Tx flow control is enabled (we can send pause frames
6729 +        *          frames but we do not receive pause frames).
6730 +        *      3:  Both Rx and Tx flow control (symmetric) is enabled.
6731 +        *  other:  No other values should be possible at this point.
6732 +        */
6733 +       DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
6734 +
6735 +       switch (hw->fc.current_mode) {
6736 +       case e1000_fc_none:
6737 +               ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
6738 +               break;
6739 +       case e1000_fc_rx_pause:
6740 +               ctrl &= (~E1000_CTRL_TFCE);
6741 +               ctrl |= E1000_CTRL_RFCE;
6742 +               break;
6743 +       case e1000_fc_tx_pause:
6744 +               ctrl &= (~E1000_CTRL_RFCE);
6745 +               ctrl |= E1000_CTRL_TFCE;
6746 +               break;
6747 +       case e1000_fc_full:
6748 +               ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
6749 +               break;
6750 +       default:
6751 +               DEBUGOUT("Flow control param set incorrectly\n");
6752 +               ret_val = -E1000_ERR_CONFIG;
6753 +               goto out;
6754 +       }
6755 +
6756 +       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
6757 +
6758 +out:
6759 +       return ret_val;
6760 +}
6761 +
6762 +/**
6763 + *  e1000_config_fc_after_link_up_generic - Configures flow control after link
6764 + *  @hw: pointer to the HW structure
6765 + *
6766 + *  Checks the status of auto-negotiation after link up to ensure that the
6767 + *  speed and duplex were not forced.  If the link needed to be forced, then
6768 + *  flow control needs to be forced also.  If auto-negotiation is enabled
6769 + *  and did not fail, then we configure flow control based on our link
6770 + *  partner.
6771 + **/
6772 +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
6773 +{
6774 +       struct e1000_mac_info *mac = &hw->mac;
6775 +       s32 ret_val = E1000_SUCCESS;
6776 +       u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
6777 +       u16 speed, duplex;
6778 +
6779 +       DEBUGFUNC("e1000_config_fc_after_link_up_generic");
6780 +
6781 +       /*
6782 +        * Check for the case where we have fiber media and auto-neg failed
6783 +        * so we had to force link.  In this case, we need to force the
6784 +        * configuration of the MAC to match the "fc" parameter.
6785 +        */
6786 +       if (mac->autoneg_failed) {
6787 +               if (hw->phy.media_type == e1000_media_type_fiber ||
6788 +                   hw->phy.media_type == e1000_media_type_internal_serdes)
6789 +                       ret_val = e1000_force_mac_fc_generic(hw);
6790 +       } else {
6791 +               if (hw->phy.media_type == e1000_media_type_copper)
6792 +                       ret_val = e1000_force_mac_fc_generic(hw);
6793 +       }
6794 +
6795 +       if (ret_val) {
6796 +               DEBUGOUT("Error forcing flow control settings\n");
6797 +               goto out;
6798 +       }
6799 +
6800 +       /*
6801 +        * Check for the case where we have copper media and auto-neg is
6802 +        * enabled.  In this case, we need to check and see if Auto-Neg
6803 +        * has completed, and if so, how the PHY and link partner has
6804 +        * flow control configured.
6805 +        */
6806 +       if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
6807 +               /*
6808 +                * Read the MII Status Register and check to see if AutoNeg
6809 +                * has completed.  We read this twice because this reg has
6810 +                * some "sticky" (latched) bits.
6811 +                */
6812 +               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
6813 +               if (ret_val)
6814 +                       goto out;
6815 +               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
6816 +               if (ret_val)
6817 +                       goto out;
6818 +
6819 +               if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
6820 +                       DEBUGOUT("Copper PHY and Auto Neg "
6821 +                                "has not completed.\n");
6822 +                       goto out;
6823 +               }
6824 +
6825 +               /*
6826 +                * The AutoNeg process has completed, so we now need to
6827 +                * read both the Auto Negotiation Advertisement
6828 +                * Register (Address 4) and the Auto_Negotiation Base
6829 +                * Page Ability Register (Address 5) to determine how
6830 +                * flow control was negotiated.
6831 +                */
6832 +               ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
6833 +                                            &mii_nway_adv_reg);
6834 +               if (ret_val)
6835 +                       goto out;
6836 +               ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
6837 +                                            &mii_nway_lp_ability_reg);
6838 +               if (ret_val)
6839 +                       goto out;
6840 +
6841 +               /*
6842 +                * Two bits in the Auto Negotiation Advertisement Register
6843 +                * (Address 4) and two bits in the Auto Negotiation Base
6844 +                * Page Ability Register (Address 5) determine flow control
6845 +                * for both the PHY and the link partner.  The following
6846 +                * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
6847 +                * 1999, describes these PAUSE resolution bits and how flow
6848 +                * control is determined based upon these settings.
6849 +                * NOTE:  DC = Don't Care
6850 +                *
6851 +                *   LOCAL DEVICE  |   LINK PARTNER
6852 +                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
6853 +                *-------|---------|-------|---------|--------------------
6854 +                *   0   |    0    |  DC   |   DC    | e1000_fc_none
6855 +                *   0   |    1    |   0   |   DC    | e1000_fc_none
6856 +                *   0   |    1    |   1   |    0    | e1000_fc_none
6857 +                *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
6858 +                *   1   |    0    |   0   |   DC    | e1000_fc_none
6859 +                *   1   |   DC    |   1   |   DC    | e1000_fc_full
6860 +                *   1   |    1    |   0   |    0    | e1000_fc_none
6861 +                *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
6862 +                *
6863 +                * Are both PAUSE bits set to 1?  If so, this implies
6864 +                * Symmetric Flow Control is enabled at both ends.  The
6865 +                * ASM_DIR bits are irrelevant per the spec.
6866 +                *
6867 +                * For Symmetric Flow Control:
6868 +                *
6869 +                *   LOCAL DEVICE  |   LINK PARTNER
6870 +                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
6871 +                *-------|---------|-------|---------|--------------------
6872 +                *   1   |   DC    |   1   |   DC    | E1000_fc_full
6873 +                *
6874 +                */
6875 +               if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
6876 +                   (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
6877 +                       /*
6878 +                        * Now we need to check if the user selected Rx ONLY
6879 +                        * of pause frames.  In this case, we had to advertise
6880 +                        * FULL flow control because we could not advertise RX
6881 +                        * ONLY. Hence, we must now check to see if we need to
6882 +                        * turn OFF  the TRANSMISSION of PAUSE frames.
6883 +                        */
6884 +                       if (hw->fc.requested_mode == e1000_fc_full) {
6885 +                               hw->fc.current_mode = e1000_fc_full;
6886 +                               DEBUGOUT("Flow Control = FULL.\r\n");
6887 +                       } else {
6888 +                               hw->fc.current_mode = e1000_fc_rx_pause;
6889 +                               DEBUGOUT("Flow Control = "
6890 +                                        "RX PAUSE frames only.\r\n");
6891 +                       }
6892 +               }
6893 +               /*
6894 +                * For receiving PAUSE frames ONLY.
6895 +                *
6896 +                *   LOCAL DEVICE  |   LINK PARTNER
6897 +                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
6898 +                *-------|---------|-------|---------|--------------------
6899 +                *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
6900 +                */
6901 +               else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
6902 +                         (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
6903 +                         (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
6904 +                         (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
6905 +                       hw->fc.current_mode = e1000_fc_tx_pause;
6906 +                       DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
6907 +               }
6908 +               /*
6909 +                * For transmitting PAUSE frames ONLY.
6910 +                *
6911 +                *   LOCAL DEVICE  |   LINK PARTNER
6912 +                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
6913 +                *-------|---------|-------|---------|--------------------
6914 +                *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
6915 +                */
6916 +               else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
6917 +                        (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
6918 +                        !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
6919 +                        (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
6920 +                       hw->fc.current_mode = e1000_fc_rx_pause;
6921 +                       DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
6922 +               } else {
6923 +                       /*
6924 +                        * Per the IEEE spec, at this point flow control
6925 +                        * should be disabled.
6926 +                        */
6927 +                       hw->fc.current_mode = e1000_fc_none;
6928 +                       DEBUGOUT("Flow Control = NONE.\r\n");
6929 +               }
6930 +
6931 +               /*
6932 +                * Now we need to do one last check...  If we auto-
6933 +                * negotiated to HALF DUPLEX, flow control should not be
6934 +                * enabled per IEEE 802.3 spec.
6935 +                */
6936 +               ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
6937 +               if (ret_val) {
6938 +                       DEBUGOUT("Error getting link speed and duplex\n");
6939 +                       goto out;
6940 +               }
6941 +
6942 +               if (duplex == HALF_DUPLEX)
6943 +                       hw->fc.current_mode = e1000_fc_none;
6944 +
6945 +               /*
6946 +                * Now we call a subroutine to actually force the MAC
6947 +                * controller to use the correct flow control settings.
6948 +                */
6949 +               ret_val = e1000_force_mac_fc_generic(hw);
6950 +               if (ret_val) {
6951 +                       DEBUGOUT("Error forcing flow control settings\n");
6952 +                       goto out;
6953 +               }
6954 +       }
6955 +
6956 +out:
6957 +       return ret_val;
6958 +}
6959 +
6960 +/**
6961 + *  e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
6962 + *  @hw: pointer to the HW structure
6963 + *  @speed: stores the current speed
6964 + *  @duplex: stores the current duplex
6965 + *
6966 + *  Read the status register for the current speed/duplex and store the current
6967 + *  speed and duplex for copper connections.
6968 + **/
6969 +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
6970 +                                              u16 *duplex)
6971 +{
6972 +       u32 status;
6973 +
6974 +       DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
6975 +
6976 +       status = E1000_READ_REG(hw, E1000_STATUS);
6977 +       if (status & E1000_STATUS_SPEED_1000) {
6978 +               *speed = SPEED_1000;
6979 +               DEBUGOUT("1000 Mbs, ");
6980 +       } else if (status & E1000_STATUS_SPEED_100) {
6981 +               *speed = SPEED_100;
6982 +               DEBUGOUT("100 Mbs, ");
6983 +       } else {
6984 +               *speed = SPEED_10;
6985 +               DEBUGOUT("10 Mbs, ");
6986 +       }
6987 +
6988 +       if (status & E1000_STATUS_FD) {
6989 +               *duplex = FULL_DUPLEX;
6990 +               DEBUGOUT("Full Duplex\n");
6991 +       } else {
6992 +               *duplex = HALF_DUPLEX;
6993 +               DEBUGOUT("Half Duplex\n");
6994 +       }
6995 +
6996 +       return E1000_SUCCESS;
6997 +}
6998 +
6999 +/**
7000 + *  e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
7001 + *  @hw: pointer to the HW structure
7002 + *  @speed: stores the current speed
7003 + *  @duplex: stores the current duplex
7004 + *
7005 + *  Sets the speed and duplex to gigabit full duplex (the only possible option)
7006 + *  for fiber/serdes links.
7007 + **/
7008 +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
7009 +                                                    u16 *speed, u16 *duplex)
7010 +{
7011 +       DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
7012 +
7013 +       *speed = SPEED_1000;
7014 +       *duplex = FULL_DUPLEX;
7015 +
7016 +       return E1000_SUCCESS;
7017 +}
7018 +
7019 +/**
7020 + *  e1000_get_hw_semaphore_generic - Acquire hardware semaphore
7021 + *  @hw: pointer to the HW structure
7022 + *
7023 + *  Acquire the HW semaphore to access the PHY or NVM
7024 + **/
7025 +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
7026 +{
7027 +       u32 swsm;
7028 +       s32 ret_val = E1000_SUCCESS;
7029 +       s32 timeout = hw->nvm.word_size + 1;
7030 +       s32 i = 0;
7031 +
7032 +       DEBUGFUNC("e1000_get_hw_semaphore_generic");
7033 +
7034 +       /* Get the SW semaphore */
7035 +       while (i < timeout) {
7036 +               swsm = E1000_READ_REG(hw, E1000_SWSM);
7037 +               if (!(swsm & E1000_SWSM_SMBI))
7038 +                       break;
7039 +
7040 +               usec_delay(50);
7041 +               i++;
7042 +       }
7043 +
7044 +       if (i == timeout) {
7045 +               DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
7046 +               ret_val = -E1000_ERR_NVM;
7047 +               goto out;
7048 +       }
7049 +
7050 +       /* Get the FW semaphore. */
7051 +       for (i = 0; i < timeout; i++) {
7052 +               swsm = E1000_READ_REG(hw, E1000_SWSM);
7053 +               E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
7054 +
7055 +               /* Semaphore acquired if bit latched */
7056 +               if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
7057 +                       break;
7058 +
7059 +               usec_delay(50);
7060 +       }
7061 +
7062 +       if (i == timeout) {
7063 +               /* Release semaphores */
7064 +               e1000_put_hw_semaphore_generic(hw);
7065 +               DEBUGOUT("Driver can't access the NVM\n");
7066 +               ret_val = -E1000_ERR_NVM;
7067 +               goto out;
7068 +       }
7069 +
7070 +out:
7071 +       return ret_val;
7072 +}
7073 +
7074 +/**
7075 + *  e1000_put_hw_semaphore_generic - Release hardware semaphore
7076 + *  @hw: pointer to the HW structure
7077 + *
7078 + *  Release hardware semaphore used to access the PHY or NVM
7079 + **/
7080 +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
7081 +{
7082 +       u32 swsm;
7083 +
7084 +       DEBUGFUNC("e1000_put_hw_semaphore_generic");
7085 +
7086 +       swsm = E1000_READ_REG(hw, E1000_SWSM);
7087 +
7088 +       swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
7089 +
7090 +       E1000_WRITE_REG(hw, E1000_SWSM, swsm);
7091 +}
7092 +
7093 +/**
7094 + *  e1000_get_auto_rd_done_generic - Check for auto read completion
7095 + *  @hw: pointer to the HW structure
7096 + *
7097 + *  Check EEPROM for Auto Read done bit.
7098 + **/
7099 +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
7100 +{
7101 +       s32 i = 0;
7102 +       s32 ret_val = E1000_SUCCESS;
7103 +
7104 +       DEBUGFUNC("e1000_get_auto_rd_done_generic");
7105 +
7106 +       while (i < AUTO_READ_DONE_TIMEOUT) {
7107 +               if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
7108 +                       break;
7109 +               msec_delay(1);
7110 +               i++;
7111 +       }
7112 +
7113 +       if (i == AUTO_READ_DONE_TIMEOUT) {
7114 +               DEBUGOUT("Auto read by HW from NVM has not completed.\n");
7115 +               ret_val = -E1000_ERR_RESET;
7116 +               goto out;
7117 +       }
7118 +
7119 +out:
7120 +       return ret_val;
7121 +}
7122 +
7123 +/**
7124 + *  e1000_valid_led_default_generic - Verify a valid default LED config
7125 + *  @hw: pointer to the HW structure
7126 + *  @data: pointer to the NVM (EEPROM)
7127 + *
7128 + *  Read the EEPROM for the current default LED configuration.  If the
7129 + *  LED configuration is not valid, set to a valid LED configuration.
7130 + **/
7131 +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
7132 +{
7133 +       s32 ret_val;
7134 +
7135 +       DEBUGFUNC("e1000_valid_led_default_generic");
7136 +
7137 +       ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
7138 +       if (ret_val) {
7139 +               DEBUGOUT("NVM Read Error\n");
7140 +               goto out;
7141 +       }
7142 +
7143 +       if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
7144 +               *data = ID_LED_DEFAULT;
7145 +
7146 +out:
7147 +       return ret_val;
7148 +}
7149 +
7150 +/**
7151 + *  e1000_id_led_init_generic -
7152 + *  @hw: pointer to the HW structure
7153 + *
7154 + **/
7155 +s32 e1000_id_led_init_generic(struct e1000_hw *hw)
7156 +{
7157 +       struct e1000_mac_info *mac = &hw->mac;
7158 +       s32 ret_val;
7159 +       const u32 ledctl_mask = 0x000000FF;
7160 +       const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
7161 +       const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
7162 +       u16 data, i, temp;
7163 +       const u16 led_mask = 0x0F;
7164 +
7165 +       DEBUGFUNC("e1000_id_led_init_generic");
7166 +
7167 +       ret_val = hw->nvm.ops.valid_led_default(hw, &data);
7168 +       if (ret_val)
7169 +               goto out;
7170 +
7171 +       mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
7172 +       mac->ledctl_mode1 = mac->ledctl_default;
7173 +       mac->ledctl_mode2 = mac->ledctl_default;
7174 +
7175 +       for (i = 0; i < 4; i++) {
7176 +               temp = (data >> (i << 2)) & led_mask;
7177 +               switch (temp) {
7178 +               case ID_LED_ON1_DEF2:
7179 +               case ID_LED_ON1_ON2:
7180 +               case ID_LED_ON1_OFF2:
7181 +                       mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
7182 +                       mac->ledctl_mode1 |= ledctl_on << (i << 3);
7183 +                       break;
7184 +               case ID_LED_OFF1_DEF2:
7185 +               case ID_LED_OFF1_ON2:
7186 +               case ID_LED_OFF1_OFF2:
7187 +                       mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
7188 +                       mac->ledctl_mode1 |= ledctl_off << (i << 3);
7189 +                       break;
7190 +               default:
7191 +                       /* Do nothing */
7192 +                       break;
7193 +               }
7194 +               switch (temp) {
7195 +               case ID_LED_DEF1_ON2:
7196 +               case ID_LED_ON1_ON2:
7197 +               case ID_LED_OFF1_ON2:
7198 +                       mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
7199 +                       mac->ledctl_mode2 |= ledctl_on << (i << 3);
7200 +                       break;
7201 +               case ID_LED_DEF1_OFF2:
7202 +               case ID_LED_ON1_OFF2:
7203 +               case ID_LED_OFF1_OFF2:
7204 +                       mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
7205 +                       mac->ledctl_mode2 |= ledctl_off << (i << 3);
7206 +                       break;
7207 +               default:
7208 +                       /* Do nothing */
7209 +                       break;
7210 +               }
7211 +       }
7212 +
7213 +out:
7214 +       return ret_val;
7215 +}
7216 +
7217 +/**
7218 + *  e1000_setup_led_generic - Configures SW controllable LED
7219 + *  @hw: pointer to the HW structure
7220 + *
7221 + *  This prepares the SW controllable LED for use and saves the current state
7222 + *  of the LED so it can be later restored.
7223 + **/
7224 +s32 e1000_setup_led_generic(struct e1000_hw *hw)
7225 +{
7226 +       u32 ledctl;
7227 +       s32 ret_val = E1000_SUCCESS;
7228 +
7229 +       DEBUGFUNC("e1000_setup_led_generic");
7230 +
7231 +       if (hw->mac.ops.setup_led != e1000_setup_led_generic) {
7232 +               ret_val = -E1000_ERR_CONFIG;
7233 +               goto out;
7234 +       }
7235 +
7236 +       if (hw->phy.media_type == e1000_media_type_fiber) {
7237 +               ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
7238 +               hw->mac.ledctl_default = ledctl;
7239 +               /* Turn off LED0 */
7240 +               ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
7241 +                           E1000_LEDCTL_LED0_BLINK |
7242 +                           E1000_LEDCTL_LED0_MODE_MASK);
7243 +               ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
7244 +                          E1000_LEDCTL_LED0_MODE_SHIFT);
7245 +               E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
7246 +       } else if (hw->phy.media_type == e1000_media_type_copper) {
7247 +               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
7248 +       }
7249 +
7250 +out:
7251 +       return ret_val;
7252 +}
7253 +
7254 +/**
7255 + *  e1000_cleanup_led_generic - Set LED config to default operation
7256 + *  @hw: pointer to the HW structure
7257 + *
7258 + *  Remove the current LED configuration and set the LED configuration
7259 + *  to the default value, saved from the EEPROM.
7260 + **/
7261 +s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
7262 +{
7263 +       s32 ret_val = E1000_SUCCESS;
7264 +
7265 +       DEBUGFUNC("e1000_cleanup_led_generic");
7266 +
7267 +       if (hw->mac.ops.cleanup_led != e1000_cleanup_led_generic) {
7268 +               ret_val = -E1000_ERR_CONFIG;
7269 +               goto out;
7270 +       }
7271 +
7272 +       E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
7273 +
7274 +out:
7275 +       return ret_val;
7276 +}
7277 +
7278 +/**
7279 + *  e1000_blink_led_generic - Blink LED
7280 + *  @hw: pointer to the HW structure
7281 + *
7282 + *  Blink the LEDs which are set to be on.
7283 + **/
7284 +s32 e1000_blink_led_generic(struct e1000_hw *hw)
7285 +{
7286 +       u32 ledctl_blink = 0;
7287 +       u32 i;
7288 +
7289 +       DEBUGFUNC("e1000_blink_led_generic");
7290 +
7291 +       if (hw->phy.media_type == e1000_media_type_fiber) {
7292 +               /* always blink LED0 for PCI-E fiber */
7293 +               ledctl_blink = E1000_LEDCTL_LED0_BLINK |
7294 +                    (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
7295 +       } else {
7296 +               /*
7297 +                * set the blink bit for each LED that's "on" (0x0E)
7298 +                * in ledctl_mode2
7299 +                */
7300 +               ledctl_blink = hw->mac.ledctl_mode2;
7301 +               for (i = 0; i < 4; i++)
7302 +                       if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
7303 +                           E1000_LEDCTL_MODE_LED_ON)
7304 +                               ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
7305 +                                                (i * 8));
7306 +       }
7307 +
7308 +       E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
7309 +
7310 +       return E1000_SUCCESS;
7311 +}
7312 +
7313 +/**
7314 + *  e1000_led_on_generic - Turn LED on
7315 + *  @hw: pointer to the HW structure
7316 + *
7317 + *  Turn LED on.
7318 + **/
7319 +s32 e1000_led_on_generic(struct e1000_hw *hw)
7320 +{
7321 +       u32 ctrl;
7322 +
7323 +       DEBUGFUNC("e1000_led_on_generic");
7324 +
7325 +       switch (hw->phy.media_type) {
7326 +       case e1000_media_type_fiber:
7327 +               ctrl = E1000_READ_REG(hw, E1000_CTRL);
7328 +               ctrl &= ~E1000_CTRL_SWDPIN0;
7329 +               ctrl |= E1000_CTRL_SWDPIO0;
7330 +               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
7331 +               break;
7332 +       case e1000_media_type_copper:
7333 +               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
7334 +               break;
7335 +       default:
7336 +               break;
7337 +       }
7338 +
7339 +       return E1000_SUCCESS;
7340 +}
7341 +
7342 +/**
7343 + *  e1000_led_off_generic - Turn LED off
7344 + *  @hw: pointer to the HW structure
7345 + *
7346 + *  Turn LED off.
7347 + **/
7348 +s32 e1000_led_off_generic(struct e1000_hw *hw)
7349 +{
7350 +       u32 ctrl;
7351 +
7352 +       DEBUGFUNC("e1000_led_off_generic");
7353 +
7354 +       switch (hw->phy.media_type) {
7355 +       case e1000_media_type_fiber:
7356 +               ctrl = E1000_READ_REG(hw, E1000_CTRL);
7357 +               ctrl |= E1000_CTRL_SWDPIN0;
7358 +               ctrl |= E1000_CTRL_SWDPIO0;
7359 +               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
7360 +               break;
7361 +       case e1000_media_type_copper:
7362 +               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
7363 +               break;
7364 +       default:
7365 +               break;
7366 +       }
7367 +
7368 +       return E1000_SUCCESS;
7369 +}
7370 +
7371 +/**
7372 + *  e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
7373 + *  @hw: pointer to the HW structure
7374 + *  @no_snoop: bitmap of snoop events
7375 + *
7376 + *  Set the PCI-express register to snoop for events enabled in 'no_snoop'.
7377 + **/
7378 +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
7379 +{
7380 +       u32 gcr;
7381 +
7382 +       DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
7383 +
7384 +       if (hw->bus.type != e1000_bus_type_pci_express)
7385 +               goto out;
7386 +
7387 +       if (no_snoop) {
7388 +               gcr = E1000_READ_REG(hw, E1000_GCR);
7389 +               gcr &= ~(PCIE_NO_SNOOP_ALL);
7390 +               gcr |= no_snoop;
7391 +               E1000_WRITE_REG(hw, E1000_GCR, gcr);
7392 +       }
7393 +out:
7394 +       return;
7395 +}
7396 +
7397 +/**
7398 + *  e1000_disable_pcie_master_generic - Disables PCI-express master access
7399 + *  @hw: pointer to the HW structure
7400 + *
7401 + *  Returns 0 (E1000_SUCCESS) if successful, else returns -10
7402 + *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
7403 + *  the master requests to be disabled.
7404 + *
7405 + *  Disables PCI-Express master access and verifies there are no pending
7406 + *  requests.
7407 + **/
7408 +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
7409 +{
7410 +       u32 ctrl;
7411 +       s32 timeout = MASTER_DISABLE_TIMEOUT;
7412 +       s32 ret_val = E1000_SUCCESS;
7413 +
7414 +       DEBUGFUNC("e1000_disable_pcie_master_generic");
7415 +
7416 +       if (hw->bus.type != e1000_bus_type_pci_express)
7417 +               goto out;
7418 +
7419 +       ctrl = E1000_READ_REG(hw, E1000_CTRL);
7420 +       ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
7421 +       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
7422 +
7423 +       while (timeout) {
7424 +               if (!(E1000_READ_REG(hw, E1000_STATUS) &
7425 +                     E1000_STATUS_GIO_MASTER_ENABLE))
7426 +                       break;
7427 +               usec_delay(100);
7428 +               timeout--;
7429 +       }
7430 +
7431 +       if (!timeout) {
7432 +               DEBUGOUT("Master requests are pending.\n");
7433 +               ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
7434 +               goto out;
7435 +       }
7436 +
7437 +out:
7438 +       return ret_val;
7439 +}
7440 +
7441 +/**
7442 + *  e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
7443 + *  @hw: pointer to the HW structure
7444 + *
7445 + *  Reset the Adaptive Interframe Spacing throttle to default values.
7446 + **/
7447 +void e1000_reset_adaptive_generic(struct e1000_hw *hw)
7448 +{
7449 +       struct e1000_mac_info *mac = &hw->mac;
7450 +
7451 +       DEBUGFUNC("e1000_reset_adaptive_generic");
7452 +
7453 +       if (!mac->adaptive_ifs) {
7454 +               DEBUGOUT("Not in Adaptive IFS mode!\n");
7455 +               goto out;
7456 +       }
7457 +
7458 +       mac->current_ifs_val = 0;
7459 +       mac->ifs_min_val = IFS_MIN;
7460 +       mac->ifs_max_val = IFS_MAX;
7461 +       mac->ifs_step_size = IFS_STEP;
7462 +       mac->ifs_ratio = IFS_RATIO;
7463 +
7464 +       mac->in_ifs_mode = false;
7465 +       E1000_WRITE_REG(hw, E1000_AIT, 0);
7466 +out:
7467 +       return;
7468 +}
7469 +
7470 +/**
7471 + *  e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
7472 + *  @hw: pointer to the HW structure
7473 + *
7474 + *  Update the Adaptive Interframe Spacing Throttle value based on the
7475 + *  time between transmitted packets and time between collisions.
7476 + **/
7477 +void e1000_update_adaptive_generic(struct e1000_hw *hw)
7478 +{
7479 +       struct e1000_mac_info *mac = &hw->mac;
7480 +
7481 +       DEBUGFUNC("e1000_update_adaptive_generic");
7482 +
7483 +       if (!mac->adaptive_ifs) {
7484 +               DEBUGOUT("Not in Adaptive IFS mode!\n");
7485 +               goto out;
7486 +       }
7487 +
7488 +       if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
7489 +               if (mac->tx_packet_delta > MIN_NUM_XMITS) {
7490 +                       mac->in_ifs_mode = true;
7491 +                       if (mac->current_ifs_val < mac->ifs_max_val) {
7492 +                               if (!mac->current_ifs_val)
7493 +                                       mac->current_ifs_val = mac->ifs_min_val;
7494 +                               else
7495 +                                       mac->current_ifs_val +=
7496 +                                               mac->ifs_step_size;
7497 +                               E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val);
7498 +                       }
7499 +               }
7500 +       } else {
7501 +               if (mac->in_ifs_mode &&
7502 +                   (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
7503 +                       mac->current_ifs_val = 0;
7504 +                       mac->in_ifs_mode = false;
7505 +                       E1000_WRITE_REG(hw, E1000_AIT, 0);
7506 +               }
7507 +       }
7508 +out:
7509 +       return;
7510 +}
7511 +
7512 +/**
7513 + *  e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
7514 + *  @hw: pointer to the HW structure
7515 + *
7516 + *  Verify that when not using auto-negotiation that MDI/MDIx is correctly
7517 + *  set, which is forced to MDI mode only.
7518 + **/
7519 +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
7520 +{
7521 +       s32 ret_val = E1000_SUCCESS;
7522 +
7523 +       DEBUGFUNC("e1000_validate_mdi_setting_generic");
7524 +
7525 +       if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
7526 +               DEBUGOUT("Invalid MDI setting detected\n");
7527 +               hw->phy.mdix = 1;
7528 +               ret_val = -E1000_ERR_CONFIG;
7529 +               goto out;
7530 +       }
7531 +
7532 +out:
7533 +       return ret_val;
7534 +}
7535 +
7536 +/**
7537 + *  e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
7538 + *  @hw: pointer to the HW structure
7539 + *  @reg: 32bit register offset such as E1000_SCTL
7540 + *  @offset: register offset to write to
7541 + *  @data: data to write at register offset
7542 + *
7543 + *  Writes an address/data control type register.  There are several of these
7544 + *  and they all have the format address << 8 | data and bit 31 is polled for
7545 + *  completion.
7546 + **/
7547 +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
7548 +                                      u32 offset, u8 data)
7549 +{
7550 +       u32 i, regvalue = 0;
7551 +       s32 ret_val = E1000_SUCCESS;
7552 +
7553 +       DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
7554 +
7555 +       /* Set up the address and data */
7556 +       regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
7557 +       E1000_WRITE_REG(hw, reg, regvalue);
7558 +
7559 +       /* Poll the ready bit to see if the MDI read completed */
7560 +       for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
7561 +               usec_delay(5);
7562 +               regvalue = E1000_READ_REG(hw, reg);
7563 +               if (regvalue & E1000_GEN_CTL_READY)
7564 +                       break;
7565 +       }
7566 +       if (!(regvalue & E1000_GEN_CTL_READY)) {
7567 +               DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
7568 +               ret_val = -E1000_ERR_PHY;
7569 +               goto out;
7570 +       }
7571 +
7572 +out:
7573 +       return ret_val;
7574 +}
7575 Index: linux-2.6.22/drivers/net/igb/e1000_mac.h
7576 ===================================================================
7577 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
7578 +++ linux-2.6.22/drivers/net/igb/e1000_mac.h    2009-12-18 12:39:22.000000000 -0500
7579 @@ -0,0 +1,80 @@
7580 +/*******************************************************************************
7581 +
7582 +  Intel(R) Gigabit Ethernet Linux driver
7583 +  Copyright(c) 2007-2009 Intel Corporation.
7584 +
7585 +  This program is free software; you can redistribute it and/or modify it
7586 +  under the terms and conditions of the GNU General Public License,
7587 +  version 2, as published by the Free Software Foundation.
7588 +
7589 +  This program is distributed in the hope it will be useful, but WITHOUT
7590 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7591 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
7592 +  more details.
7593 +
7594 +  You should have received a copy of the GNU General Public License along with
7595 +  this program; if not, write to the Free Software Foundation, Inc.,
7596 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7597 +
7598 +  The full GNU General Public License is included in this distribution in
7599 +  the file called "COPYING".
7600 +
7601 +  Contact Information:
7602 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
7603 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
7604 +
7605 +*******************************************************************************/
7606 +
7607 +#ifndef _E1000_MAC_H_
7608 +#define _E1000_MAC_H_
7609 +
7610 +/*
7611 + * Functions that should not be called directly from drivers but can be used
7612 + * by other files in this 'shared code'
7613 + */
7614 +void e1000_init_mac_ops_generic(struct e1000_hw *hw);
7615 +s32  e1000_blink_led_generic(struct e1000_hw *hw);
7616 +s32  e1000_check_for_copper_link_generic(struct e1000_hw *hw);
7617 +s32  e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
7618 +s32  e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
7619 +s32  e1000_cleanup_led_generic(struct e1000_hw *hw);
7620 +s32  e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
7621 +s32  e1000_disable_pcie_master_generic(struct e1000_hw *hw);
7622 +s32  e1000_force_mac_fc_generic(struct e1000_hw *hw);
7623 +s32  e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
7624 +s32  e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
7625 +void e1000_set_lan_id_single_port(struct e1000_hw *hw);
7626 +s32  e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
7627 +s32  e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
7628 +                                               u16 *duplex);
7629 +s32  e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
7630 +                                                     u16 *speed, u16 *duplex);
7631 +s32  e1000_id_led_init_generic(struct e1000_hw *hw);
7632 +s32  e1000_led_on_generic(struct e1000_hw *hw);
7633 +s32  e1000_led_off_generic(struct e1000_hw *hw);
7634 +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
7635 +                                      u8 *mc_addr_list, u32 mc_addr_count);
7636 +s32  e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
7637 +s32  e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
7638 +s32  e1000_setup_led_generic(struct e1000_hw *hw);
7639 +s32  e1000_setup_link_generic(struct e1000_hw *hw);
7640 +s32  e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
7641 +                                       u32 offset, u8 data);
7642 +
7643 +u32  e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
7644 +
7645 +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
7646 +void e1000_clear_vfta_generic(struct e1000_hw *hw);
7647 +void e1000_config_collision_dist_generic(struct e1000_hw *hw);
7648 +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
7649 +void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value);
7650 +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
7651 +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
7652 +void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
7653 +s32  e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
7654 +void e1000_reset_adaptive_generic(struct e1000_hw *hw);
7655 +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
7656 +void e1000_update_adaptive_generic(struct e1000_hw *hw);
7657 +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
7658 +
7659 +#endif
7660 Index: linux-2.6.22/drivers/net/igb/e1000_manage.c
7661 ===================================================================
7662 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
7663 +++ linux-2.6.22/drivers/net/igb/e1000_manage.c 2009-12-18 12:39:22.000000000 -0500
7664 @@ -0,0 +1,383 @@
7665 +/*******************************************************************************
7666 +
7667 +  Intel(R) Gigabit Ethernet Linux driver
7668 +  Copyright(c) 2007-2009 Intel Corporation.
7669 +
7670 +  This program is free software; you can redistribute it and/or modify it
7671 +  under the terms and conditions of the GNU General Public License,
7672 +  version 2, as published by the Free Software Foundation.
7673 +
7674 +  This program is distributed in the hope it will be useful, but WITHOUT
7675 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7676 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
7677 +  more details.
7678 +
7679 +  You should have received a copy of the GNU General Public License along with
7680 +  this program; if not, write to the Free Software Foundation, Inc.,
7681 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7682 +
7683 +  The full GNU General Public License is included in this distribution in
7684 +  the file called "COPYING".
7685 +
7686 +  Contact Information:
7687 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
7688 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
7689 +
7690 +*******************************************************************************/
7691 +
7692 +#include "e1000_api.h"
7693 +
7694 +static u8 e1000_calculate_checksum(u8 *buffer, u32 length);
7695 +
7696 +/**
7697 + *  e1000_calculate_checksum - Calculate checksum for buffer
7698 + *  @buffer: pointer to EEPROM
7699 + *  @length: size of EEPROM to calculate a checksum for
7700 + *
7701 + *  Calculates the checksum for some buffer on a specified length.  The
7702 + *  checksum calculated is returned.
7703 + **/
7704 +static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
7705 +{
7706 +       u32 i;
7707 +       u8  sum = 0;
7708 +
7709 +       DEBUGFUNC("e1000_calculate_checksum");
7710 +
7711 +       if (!buffer)
7712 +               return 0;
7713 +
7714 +       for (i = 0; i < length; i++)
7715 +               sum += buffer[i];
7716 +
7717 +       return (u8) (0 - sum);
7718 +}
7719 +
7720 +/**
7721 + *  e1000_mng_enable_host_if_generic - Checks host interface is enabled
7722 + *  @hw: pointer to the HW structure
7723 + *
7724 + *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
7725 + *
7726 + *  This function checks whether the HOST IF is enabled for command operation
7727 + *  and also checks whether the previous command is completed.  It busy waits
7728 + *  in case of previous command is not completed.
7729 + **/
7730 +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
7731 +{
7732 +       u32 hicr;
7733 +       s32 ret_val = E1000_SUCCESS;
7734 +       u8  i;
7735 +
7736 +       DEBUGFUNC("e1000_mng_enable_host_if_generic");
7737 +
7738 +       /* Check that the host interface is enabled. */
7739 +       hicr = E1000_READ_REG(hw, E1000_HICR);
7740 +       if ((hicr & E1000_HICR_EN) == 0) {
7741 +               DEBUGOUT("E1000_HOST_EN bit disabled.\n");
7742 +               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
7743 +               goto out;
7744 +       }
7745 +       /* check the previous command is completed */
7746 +       for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
7747 +               hicr = E1000_READ_REG(hw, E1000_HICR);
7748 +               if (!(hicr & E1000_HICR_C))
7749 +                       break;
7750 +               msec_delay_irq(1);
7751 +       }
7752 +
7753 +       if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
7754 +               DEBUGOUT("Previous command timeout failed .\n");
7755 +               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
7756 +               goto out;
7757 +       }
7758 +
7759 +out:
7760 +       return ret_val;
7761 +}
7762 +
7763 +/**
7764 + *  e1000_check_mng_mode_generic - Generic check management mode
7765 + *  @hw: pointer to the HW structure
7766 + *
7767 + *  Reads the firmware semaphore register and returns true (>0) if
7768 + *  manageability is enabled, else false (0).
7769 + **/
7770 +bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
7771 +{
7772 +       u32 fwsm;
7773 +
7774 +       DEBUGFUNC("e1000_check_mng_mode_generic");
7775 +
7776 +       fwsm = E1000_READ_REG(hw, E1000_FWSM);
7777 +
7778 +       return (fwsm & E1000_FWSM_MODE_MASK) ==
7779 +               (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
7780 +}
7781 +
7782 +/**
7783 + *  e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on TX
7784 + *  @hw: pointer to the HW structure
7785 + *
7786 + *  Enables packet filtering on transmit packets if manageability is enabled
7787 + *  and host interface is enabled.
7788 + **/
7789 +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
7790 +{
7791 +       struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
7792 +       u32 *buffer = (u32 *)&hw->mng_cookie;
7793 +       u32 offset;
7794 +       s32 ret_val, hdr_csum, csum;
7795 +       u8 i, len;
7796 +       bool tx_filter = true;
7797 +
7798 +       DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
7799 +
7800 +       /* No manageability, no filtering */
7801 +       if (!hw->mac.ops.check_mng_mode(hw)) {
7802 +               tx_filter = false;
7803 +               goto out;
7804 +       }
7805 +
7806 +       /*
7807 +        * If we can't read from the host interface for whatever
7808 +        * reason, disable filtering.
7809 +        */
7810 +       ret_val = hw->mac.ops.mng_enable_host_if(hw);
7811 +       if (ret_val != E1000_SUCCESS) {
7812 +               tx_filter = false;
7813 +               goto out;
7814 +       }
7815 +
7816 +       /* Read in the header.  Length and offset are in dwords. */
7817 +       len    = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
7818 +       offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
7819 +       for (i = 0; i < len; i++) {
7820 +               *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
7821 +                                                          E1000_HOST_IF,
7822 +                                                          offset + i);
7823 +       }
7824 +       hdr_csum = hdr->checksum;
7825 +       hdr->checksum = 0;
7826 +       csum = e1000_calculate_checksum((u8 *)hdr,
7827 +                                       E1000_MNG_DHCP_COOKIE_LENGTH);
7828 +       /*
7829 +        * If either the checksums or signature don't match, then
7830 +        * the cookie area isn't considered valid, in which case we
7831 +        * take the safe route of assuming Tx filtering is enabled.
7832 +        */
7833 +       if (hdr_csum != csum)
7834 +               goto out;
7835 +       if (hdr->signature != E1000_IAMT_SIGNATURE)
7836 +               goto out;
7837 +
7838 +       /* Cookie area is valid, make the final check for filtering. */
7839 +       if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
7840 +               tx_filter = false;
7841 +
7842 +out:
7843 +       hw->mac.tx_pkt_filtering = tx_filter;
7844 +       return tx_filter;
7845 +}
7846 +
7847 +/**
7848 + *  e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
7849 + *  @hw: pointer to the HW structure
7850 + *  @buffer: pointer to the host interface
7851 + *  @length: size of the buffer
7852 + *
7853 + *  Writes the DHCP information to the host interface.
7854 + **/
7855 +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
7856 +                                      u16 length)
7857 +{
7858 +       struct e1000_host_mng_command_header hdr;
7859 +       s32 ret_val;
7860 +       u32 hicr;
7861 +
7862 +       DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
7863 +
7864 +       hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
7865 +       hdr.command_length = length;
7866 +       hdr.reserved1 = 0;
7867 +       hdr.reserved2 = 0;
7868 +       hdr.checksum = 0;
7869 +
7870 +       /* Enable the host interface */
7871 +       ret_val = hw->mac.ops.mng_enable_host_if(hw);
7872 +       if (ret_val)
7873 +               goto out;
7874 +
7875 +       /* Populate the host interface with the contents of "buffer". */
7876 +       ret_val = hw->mac.ops.mng_host_if_write(hw, buffer, length,
7877 +                                         sizeof(hdr), &(hdr.checksum));
7878 +       if (ret_val)
7879 +               goto out;
7880 +
7881 +       /* Write the manageability command header */
7882 +       ret_val = hw->mac.ops.mng_write_cmd_header(hw, &hdr);
7883 +       if (ret_val)
7884 +               goto out;
7885 +
7886 +       /* Tell the ARC a new command is pending. */
7887 +       hicr = E1000_READ_REG(hw, E1000_HICR);
7888 +       E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
7889 +
7890 +out:
7891 +       return ret_val;
7892 +}
7893 +
7894 +/**
7895 + *  e1000_mng_write_cmd_header_generic - Writes manageability command header
7896 + *  @hw: pointer to the HW structure
7897 + *  @hdr: pointer to the host interface command header
7898 + *
7899 + *  Writes the command header after does the checksum calculation.
7900 + **/
7901 +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
7902 +                                    struct e1000_host_mng_command_header *hdr)
7903 +{
7904 +       u16 i, length = sizeof(struct e1000_host_mng_command_header);
7905 +
7906 +       DEBUGFUNC("e1000_mng_write_cmd_header_generic");
7907 +
7908 +       /* Write the whole command header structure with new checksum. */
7909 +
7910 +       hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
7911 +
7912 +       length >>= 2;
7913 +       /* Write the relevant command block into the ram area. */
7914 +       for (i = 0; i < length; i++) {
7915 +               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
7916 +                                           *((u32 *) hdr + i));
7917 +               E1000_WRITE_FLUSH(hw);
7918 +       }
7919 +
7920 +       return E1000_SUCCESS;
7921 +}
7922 +
7923 +/**
7924 + *  e1000_mng_host_if_write_generic - Write to the manageability host interface
7925 + *  @hw: pointer to the HW structure
7926 + *  @buffer: pointer to the host interface buffer
7927 + *  @length: size of the buffer
7928 + *  @offset: location in the buffer to write to
7929 + *  @sum: sum of the data (not checksum)
7930 + *
7931 + *  This function writes the buffer content at the offset given on the host if.
7932 + *  It also does alignment considerations to do the writes in most efficient
7933 + *  way.  Also fills up the sum of the buffer in *buffer parameter.
7934 + **/
7935 +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
7936 +                                    u16 length, u16 offset, u8 *sum)
7937 +{
7938 +       u8 *tmp;
7939 +       u8 *bufptr = buffer;
7940 +       u32 data = 0;
7941 +       s32 ret_val = E1000_SUCCESS;
7942 +       u16 remaining, i, j, prev_bytes;
7943 +
7944 +       DEBUGFUNC("e1000_mng_host_if_write_generic");
7945 +
7946 +       /* sum = only sum of the data and it is not checksum */
7947 +
7948 +       if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
7949 +               ret_val = -E1000_ERR_PARAM;
7950 +               goto out;
7951 +       }
7952 +
7953 +       tmp = (u8 *)&data;
7954 +       prev_bytes = offset & 0x3;
7955 +       offset >>= 2;
7956 +
7957 +       if (prev_bytes) {
7958 +               data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
7959 +               for (j = prev_bytes; j < sizeof(u32); j++) {
7960 +                       *(tmp + j) = *bufptr++;
7961 +                       *sum += *(tmp + j);
7962 +               }
7963 +               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
7964 +               length -= j - prev_bytes;
7965 +               offset++;
7966 +       }
7967 +
7968 +       remaining = length & 0x3;
7969 +       length -= remaining;
7970 +
7971 +       /* Calculate length in DWORDs */
7972 +       length >>= 2;
7973 +
7974 +       /*
7975 +        * The device driver writes the relevant command block into the
7976 +        * ram area.
7977 +        */
7978 +       for (i = 0; i < length; i++) {
7979 +               for (j = 0; j < sizeof(u32); j++) {
7980 +                       *(tmp + j) = *bufptr++;
7981 +                       *sum += *(tmp + j);
7982 +               }
7983 +
7984 +               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
7985 +                                           data);
7986 +       }
7987 +       if (remaining) {
7988 +               for (j = 0; j < sizeof(u32); j++) {
7989 +                       if (j < remaining)
7990 +                               *(tmp + j) = *bufptr++;
7991 +                       else
7992 +                               *(tmp + j) = 0;
7993 +
7994 +                       *sum += *(tmp + j);
7995 +               }
7996 +               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
7997 +       }
7998 +
7999 +out:
8000 +       return ret_val;
8001 +}
8002 +
8003 +/**
8004 + *  e1000_enable_mng_pass_thru - Enable processing of ARP's
8005 + *  @hw: pointer to the HW structure
8006 + *
8007 + *  Verifies the hardware needs to allow ARPs to be processed by the host.
8008 + **/
8009 +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
8010 +{
8011 +       u32 manc;
8012 +       u32 fwsm, factps;
8013 +       bool ret_val = false;
8014 +
8015 +       DEBUGFUNC("e1000_enable_mng_pass_thru");
8016 +
8017 +       if (!hw->mac.asf_firmware_present)
8018 +               goto out;
8019 +
8020 +       manc = E1000_READ_REG(hw, E1000_MANC);
8021 +
8022 +       if (!(manc & E1000_MANC_RCV_TCO_EN) ||
8023 +           !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
8024 +               goto out;
8025 +
8026 +       if (hw->mac.arc_subsystem_valid) {
8027 +               fwsm = E1000_READ_REG(hw, E1000_FWSM);
8028 +               factps = E1000_READ_REG(hw, E1000_FACTPS);
8029 +
8030 +               if (!(factps & E1000_FACTPS_MNGCG) &&
8031 +                   ((fwsm & E1000_FWSM_MODE_MASK) ==
8032 +                    (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
8033 +                       ret_val = true;
8034 +                       goto out;
8035 +               }
8036 +       } else {
8037 +               if ((manc & E1000_MANC_SMBUS_EN) &&
8038 +                   !(manc & E1000_MANC_ASF_EN)) {
8039 +                       ret_val = true;
8040 +                       goto out;
8041 +               }
8042 +       }
8043 +
8044 +out:
8045 +       return ret_val;
8046 +}
8047 +
8048 Index: linux-2.6.22/drivers/net/igb/e1000_manage.h
8049 ===================================================================
8050 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
8051 +++ linux-2.6.22/drivers/net/igb/e1000_manage.h 2009-12-18 12:39:22.000000000 -0500
8052 @@ -0,0 +1,81 @@
8053 +/*******************************************************************************
8054 +
8055 +  Intel(R) Gigabit Ethernet Linux driver
8056 +  Copyright(c) 2007-2009 Intel Corporation.
8057 +
8058 +  This program is free software; you can redistribute it and/or modify it
8059 +  under the terms and conditions of the GNU General Public License,
8060 +  version 2, as published by the Free Software Foundation.
8061 +
8062 +  This program is distributed in the hope it will be useful, but WITHOUT
8063 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8064 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
8065 +  more details.
8066 +
8067 +  You should have received a copy of the GNU General Public License along with
8068 +  this program; if not, write to the Free Software Foundation, Inc.,
8069 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8070 +
8071 +  The full GNU General Public License is included in this distribution in
8072 +  the file called "COPYING".
8073 +
8074 +  Contact Information:
8075 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8076 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8077 +
8078 +*******************************************************************************/
8079 +
8080 +#ifndef _E1000_MANAGE_H_
8081 +#define _E1000_MANAGE_H_
8082 +
8083 +bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
8084 +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
8085 +s32  e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
8086 +s32  e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
8087 +                                     u16 length, u16 offset, u8 *sum);
8088 +s32  e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
8089 +                                    struct e1000_host_mng_command_header *hdr);
8090 +s32  e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
8091 +                                       u8 *buffer, u16 length);
8092 +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
8093 +
8094 +enum e1000_mng_mode {
8095 +       e1000_mng_mode_none = 0,
8096 +       e1000_mng_mode_asf,
8097 +       e1000_mng_mode_pt,
8098 +       e1000_mng_mode_ipmi,
8099 +       e1000_mng_mode_host_if_only
8100 +};
8101 +
8102 +#define E1000_FACTPS_MNGCG    0x20000000
8103 +
8104 +#define E1000_FWSM_MODE_MASK  0xE
8105 +#define E1000_FWSM_MODE_SHIFT 1
8106 +
8107 +#define E1000_MNG_IAMT_MODE                  0x3
8108 +#define E1000_MNG_DHCP_COOKIE_LENGTH         0x10
8109 +#define E1000_MNG_DHCP_COOKIE_OFFSET         0x6F0
8110 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT       10
8111 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD        64
8112 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
8113 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN    0x2
8114 +
8115 +#define E1000_VFTA_ENTRY_SHIFT               5
8116 +#define E1000_VFTA_ENTRY_MASK                0x7F
8117 +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
8118 +
8119 +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH       1792 /* Num of bytes in range */
8120 +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH      448 /* Num of dwords in range */
8121 +#define E1000_HI_COMMAND_TIMEOUT             500 /* Process HI command limit */
8122 +
8123 +#define E1000_HICR_EN              0x01  /* Enable bit - RO */
8124 +/* Driver sets this bit when done to put command in RAM */
8125 +#define E1000_HICR_C               0x02
8126 +#define E1000_HICR_SV              0x04  /* Status Validity */
8127 +#define E1000_HICR_FW_RESET_ENABLE 0x40
8128 +#define E1000_HICR_FW_RESET        0x80
8129 +
8130 +/* Intel(R) Active Management Technology signature */
8131 +#define E1000_IAMT_SIGNATURE  0x544D4149
8132 +
8133 +#endif
8134 Index: linux-2.6.22/drivers/net/igb/e1000_mbx.c
8135 ===================================================================
8136 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
8137 +++ linux-2.6.22/drivers/net/igb/e1000_mbx.c    2009-12-18 12:39:22.000000000 -0500
8138 @@ -0,0 +1,491 @@
8139 +/*******************************************************************************
8140 +
8141 +  Intel(R) Gigabit Ethernet Linux driver
8142 +  Copyright(c) 2007-2009 Intel Corporation.
8143 +
8144 +  This program is free software; you can redistribute it and/or modify it
8145 +  under the terms and conditions of the GNU General Public License,
8146 +  version 2, as published by the Free Software Foundation.
8147 +
8148 +  This program is distributed in the hope it will be useful, but WITHOUT
8149 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8150 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
8151 +  more details.
8152 +
8153 +  You should have received a copy of the GNU General Public License along with
8154 +  this program; if not, write to the Free Software Foundation, Inc.,
8155 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8156 +
8157 +  The full GNU General Public License is included in this distribution in
8158 +  the file called "COPYING".
8159 +
8160 +  Contact Information:
8161 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8162 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8163 +
8164 +*******************************************************************************/
8165 +
8166 +#include "e1000_mbx.h"
8167 +
8168 +/**
8169 + *  e1000_read_mbx - Reads a message from the mailbox
8170 + *  @hw: pointer to the HW structure
8171 + *  @msg: The message buffer
8172 + *  @size: Length of buffer
8173 + *  @mbx_id: id of mailbox to read
8174 + *
8175 + *  returns SUCCESS if it successfuly read message from buffer
8176 + **/
8177 +s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
8178 +{
8179 +       struct e1000_mbx_info *mbx = &hw->mbx;
8180 +       s32 ret_val = -E1000_ERR_MBX;
8181 +
8182 +       DEBUGFUNC("e1000_read_mbx");
8183 +
8184 +       /* limit read to size of mailbox */
8185 +       if (size > mbx->size)
8186 +               size = mbx->size;
8187 +
8188 +       if (mbx->ops.read)
8189 +               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
8190 +
8191 +       return ret_val;
8192 +}
8193 +
8194 +/**
8195 + *  e1000_write_mbx - Write a message to the mailbox
8196 + *  @hw: pointer to the HW structure
8197 + *  @msg: The message buffer
8198 + *  @size: Length of buffer
8199 + *  @mbx_id: id of mailbox to write
8200 + *
8201 + *  returns SUCCESS if it successfully copied message into the buffer
8202 + **/
8203 +s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
8204 +{
8205 +       struct e1000_mbx_info *mbx = &hw->mbx;
8206 +       s32 ret_val = E1000_SUCCESS;
8207 +
8208 +       DEBUGFUNC("e1000_write_mbx");
8209 +
8210 +       if (size > mbx->size)
8211 +               ret_val = -E1000_ERR_MBX;
8212 +
8213 +       else if (mbx->ops.write)
8214 +               ret_val = mbx->ops.write(hw, msg, size, mbx_id);
8215 +
8216 +       return ret_val;
8217 +}
8218 +
8219 +/**
8220 + *  e1000_check_for_msg - checks to see if someone sent us mail
8221 + *  @hw: pointer to the HW structure
8222 + *  @mbx_id: id of mailbox to check
8223 + *
8224 + *  returns SUCCESS if the Status bit was found or else ERR_MBX
8225 + **/
8226 +s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
8227 +{
8228 +       struct e1000_mbx_info *mbx = &hw->mbx;
8229 +       s32 ret_val = -E1000_ERR_MBX;
8230 +
8231 +       DEBUGFUNC("e1000_check_for_msg");
8232 +
8233 +       if (mbx->ops.check_for_msg)
8234 +               ret_val = mbx->ops.check_for_msg(hw, mbx_id);
8235 +
8236 +       return ret_val;
8237 +}
8238 +
8239 +/**
8240 + *  e1000_check_for_ack - checks to see if someone sent us ACK
8241 + *  @hw: pointer to the HW structure
8242 + *  @mbx_id: id of mailbox to check
8243 + *
8244 + *  returns SUCCESS if the Status bit was found or else ERR_MBX
8245 + **/
8246 +s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
8247 +{
8248 +       struct e1000_mbx_info *mbx = &hw->mbx;
8249 +       s32 ret_val = -E1000_ERR_MBX;
8250 +
8251 +       DEBUGFUNC("e1000_check_for_ack");
8252 +
8253 +       if (mbx->ops.check_for_ack)
8254 +               ret_val = mbx->ops.check_for_ack(hw, mbx_id);
8255 +
8256 +       return ret_val;
8257 +}
8258 +
8259 +/**
8260 + *  e1000_check_for_rst - checks to see if other side has reset
8261 + *  @hw: pointer to the HW structure
8262 + *  @mbx_id: id of mailbox to check
8263 + *
8264 + *  returns SUCCESS if the Status bit was found or else ERR_MBX
8265 + **/
8266 +s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
8267 +{
8268 +       struct e1000_mbx_info *mbx = &hw->mbx;
8269 +       s32 ret_val = -E1000_ERR_MBX;
8270 +
8271 +       DEBUGFUNC("e1000_check_for_rst");
8272 +
8273 +       if (mbx->ops.check_for_rst)
8274 +               ret_val = mbx->ops.check_for_rst(hw, mbx_id);
8275 +
8276 +       return ret_val;
8277 +}
8278 +
8279 +/**
8280 + *  e1000_poll_for_msg - Wait for message notification
8281 + *  @hw: pointer to the HW structure
8282 + *  @mbx_id: id of mailbox to write
8283 + *
8284 + *  returns SUCCESS if it successfully received a message notification
8285 + **/
8286 +static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
8287 +{
8288 +       struct e1000_mbx_info *mbx = &hw->mbx;
8289 +       int countdown = mbx->timeout;
8290 +
8291 +       DEBUGFUNC("e1000_poll_for_msg");
8292 +
8293 +       if (!countdown || !mbx->ops.check_for_msg)
8294 +               goto out;
8295 +
8296 +       while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
8297 +               countdown--;
8298 +               if (!countdown)
8299 +                       break;
8300 +               usec_delay(mbx->usec_delay);
8301 +       }
8302 +
8303 +       /* if we failed, all future posted messages fail until reset */
8304 +       if (!countdown)
8305 +               mbx->timeout = 0;
8306 +out:
8307 +       return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
8308 +}
8309 +
8310 +/**
8311 + *  e1000_poll_for_ack - Wait for message acknowledgement
8312 + *  @hw: pointer to the HW structure
8313 + *  @mbx_id: id of mailbox to write
8314 + *
8315 + *  returns SUCCESS if it successfully received a message acknowledgement
8316 + **/
8317 +static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
8318 +{
8319 +       struct e1000_mbx_info *mbx = &hw->mbx;
8320 +       int countdown = mbx->timeout;
8321 +
8322 +       DEBUGFUNC("e1000_poll_for_ack");
8323 +
8324 +       if (!countdown || !mbx->ops.check_for_ack)
8325 +               goto out;
8326 +
8327 +       while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
8328 +               countdown--;
8329 +               if (!countdown)
8330 +                       break;
8331 +               usec_delay(mbx->usec_delay);
8332 +       }
8333 +
8334 +       /* if we failed, all future posted messages fail until reset */
8335 +       if (!countdown)
8336 +               mbx->timeout = 0;
8337 +out:
8338 +       return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
8339 +}
8340 +
8341 +/**
8342 + *  e1000_read_posted_mbx - Wait for message notification and receive message
8343 + *  @hw: pointer to the HW structure
8344 + *  @msg: The message buffer
8345 + *  @size: Length of buffer
8346 + *  @mbx_id: id of mailbox to write
8347 + *
8348 + *  returns SUCCESS if it successfully received a message notification and
8349 + *  copied it into the receive buffer.
8350 + **/
8351 +s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
8352 +{
8353 +       struct e1000_mbx_info *mbx = &hw->mbx;
8354 +       s32 ret_val = -E1000_ERR_MBX;
8355 +
8356 +       DEBUGFUNC("e1000_read_posted_mbx");
8357 +
8358 +       if (!mbx->ops.read)
8359 +               goto out;
8360 +
8361 +       ret_val = e1000_poll_for_msg(hw, mbx_id);
8362 +
8363 +       /* if ack received read message, otherwise we timed out */
8364 +       if (!ret_val)
8365 +               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
8366 +out:
8367 +       return ret_val;
8368 +}
8369 +
8370 +/**
8371 + *  e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
8372 + *  @hw: pointer to the HW structure
8373 + *  @msg: The message buffer
8374 + *  @size: Length of buffer
8375 + *  @mbx_id: id of mailbox to write
8376 + *
8377 + *  returns SUCCESS if it successfully copied message into the buffer and
8378 + *  received an ack to that message within delay * timeout period
8379 + **/
8380 +s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
8381 +{
8382 +       struct e1000_mbx_info *mbx = &hw->mbx;
8383 +       s32 ret_val = -E1000_ERR_MBX;
8384 +
8385 +       DEBUGFUNC("e1000_write_posted_mbx");
8386 +
8387 +       /* exit if either we can't write or there isn't a defined timeout */
8388 +       if (!mbx->ops.write || !mbx->timeout)
8389 +               goto out;
8390 +
8391 +       /* send msg */
8392 +       ret_val = mbx->ops.write(hw, msg, size, mbx_id);
8393 +
8394 +       /* if msg sent wait until we receive an ack */
8395 +       if (!ret_val)
8396 +               ret_val = e1000_poll_for_ack(hw, mbx_id);
8397 +out:
8398 +       return ret_val;
8399 +}
8400 +
8401 +/**
8402 + *  e1000_init_mbx_ops_generic - Initialize NVM function pointers
8403 + *  @hw: pointer to the HW structure
8404 + *
8405 + *  Setups up the function pointers to no-op functions
8406 + **/
8407 +void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
8408 +{
8409 +       struct e1000_mbx_info *mbx = &hw->mbx;
8410 +       mbx->ops.read_posted = e1000_read_posted_mbx;
8411 +       mbx->ops.write_posted = e1000_write_posted_mbx;
8412 +}
8413 +
8414 +static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
8415 +{
8416 +       u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
8417 +       s32 ret_val = -E1000_ERR_MBX;
8418 +
8419 +       if (mbvficr & mask) {
8420 +               ret_val = E1000_SUCCESS;
8421 +               E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
8422 +       }
8423 +
8424 +       return ret_val;
8425 +}
8426 +
8427 +/**
8428 + *  e1000_check_for_msg_pf - checks to see if the VF has sent mail
8429 + *  @hw: pointer to the HW structure
8430 + *  @vf_number: the VF index
8431 + *
8432 + *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
8433 + **/
8434 +static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
8435 +{
8436 +       s32 ret_val = -E1000_ERR_MBX;
8437 +
8438 +       DEBUGFUNC("e1000_check_for_msg_pf");
8439 +
8440 +       if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
8441 +               ret_val = E1000_SUCCESS;
8442 +               hw->mbx.stats.reqs++;
8443 +       }
8444 +
8445 +       return ret_val;
8446 +}
8447 +
8448 +/**
8449 + *  e1000_check_for_ack_pf - checks to see if the VF has ACKed
8450 + *  @hw: pointer to the HW structure
8451 + *  @vf_number: the VF index
8452 + *
8453 + *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
8454 + **/
8455 +static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
8456 +{
8457 +       s32 ret_val = -E1000_ERR_MBX;
8458 +
8459 +       DEBUGFUNC("e1000_check_for_ack_pf");
8460 +
8461 +       if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
8462 +               ret_val = E1000_SUCCESS;
8463 +               hw->mbx.stats.acks++;
8464 +       }
8465 +
8466 +       return ret_val;
8467 +}
8468 +
8469 +/**
8470 + *  e1000_check_for_rst_pf - checks to see if the VF has reset
8471 + *  @hw: pointer to the HW structure
8472 + *  @vf_number: the VF index
8473 + *
8474 + *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
8475 + **/
8476 +static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
8477 +{
8478 +       u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
8479 +       s32 ret_val = -E1000_ERR_MBX;
8480 +
8481 +       DEBUGFUNC("e1000_check_for_rst_pf");
8482 +
8483 +       if (vflre & (1 << vf_number)) {
8484 +               ret_val = E1000_SUCCESS;
8485 +               E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
8486 +               hw->mbx.stats.rsts++;
8487 +       }
8488 +
8489 +       return ret_val;
8490 +}
8491 +
8492 +/**
8493 + *  e1000_obtain_mbx_lock_pf - obtain mailbox lock
8494 + *  @hw: pointer to the HW structure
8495 + *  @vf_number: the VF index
8496 + *
8497 + *  return SUCCESS if we obtained the mailbox lock
8498 + **/
8499 +static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
8500 +{
8501 +       s32 ret_val = -E1000_ERR_MBX;
8502 +       u32 p2v_mailbox;
8503 +
8504 +       DEBUGFUNC("e1000_obtain_mbx_lock_pf");
8505 +
8506 +       /* Take ownership of the buffer */
8507 +       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
8508 +
8509 +       /* reserve mailbox for vf use */
8510 +       p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
8511 +       if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
8512 +               ret_val = E1000_SUCCESS;
8513 +
8514 +       return ret_val;
8515 +}
8516 +
8517 +/**
8518 + *  e1000_write_mbx_pf - Places a message in the mailbox
8519 + *  @hw: pointer to the HW structure
8520 + *  @msg: The message buffer
8521 + *  @size: Length of buffer
8522 + *  @vf_number: the VF index
8523 + *
8524 + *  returns SUCCESS if it successfully copied message into the buffer
8525 + **/
8526 +static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
8527 +                              u16 vf_number)
8528 +{
8529 +       s32 ret_val;
8530 +       u16 i;
8531 +
8532 +       DEBUGFUNC("e1000_write_mbx_pf");
8533 +
8534 +       /* lock the mailbox to prevent pf/vf race condition */
8535 +       ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
8536 +       if (ret_val)
8537 +               goto out_no_write;
8538 +
8539 +       /* flush msg and acks as we are overwriting the message buffer */
8540 +       e1000_check_for_msg_pf(hw, vf_number);
8541 +       e1000_check_for_ack_pf(hw, vf_number);
8542 +
8543 +       /* copy the caller specified message to the mailbox memory buffer */
8544 +       for (i = 0; i < size; i++)
8545 +               E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
8546 +
8547 +       /* Interrupt VF to tell it a message has been sent and release buffer*/
8548 +       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
8549 +
8550 +       /* update stats */
8551 +       hw->mbx.stats.msgs_tx++;
8552 +
8553 +out_no_write:
8554 +       return ret_val;
8555 +
8556 +}
8557 +
8558 +/**
8559 + *  e1000_read_mbx_pf - Read a message from the mailbox
8560 + *  @hw: pointer to the HW structure
8561 + *  @msg: The message buffer
8562 + *  @size: Length of buffer
8563 + *  @vf_number: the VF index
8564 + *
8565 + *  This function copies a message from the mailbox buffer to the caller's
8566 + *  memory buffer.  The presumption is that the caller knows that there was
8567 + *  a message due to a VF request so no polling for message is needed.
8568 + **/
8569 +static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
8570 +                             u16 vf_number)
8571 +{
8572 +       s32 ret_val;
8573 +       u16 i;
8574 +
8575 +       DEBUGFUNC("e1000_read_mbx_pf");
8576 +
8577 +       /* lock the mailbox to prevent pf/vf race condition */
8578 +       ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
8579 +       if (ret_val)
8580 +               goto out_no_read;
8581 +
8582 +       /* copy the message to the mailbox memory buffer */
8583 +       for (i = 0; i < size; i++)
8584 +               msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
8585 +
8586 +       /* Acknowledge the message and release buffer */
8587 +       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
8588 +
8589 +       /* update stats */
8590 +       hw->mbx.stats.msgs_rx++;
8591 +
8592 +out_no_read:
8593 +       return ret_val;
8594 +}
8595 +
8596 +/**
8597 + *  e1000_init_mbx_params_pf - set initial values for pf mailbox
8598 + *  @hw: pointer to the HW structure
8599 + *
8600 + *  Initializes the hw->mbx struct to correct values for pf mailbox
8601 + */
8602 +s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
8603 +{
8604 +       struct e1000_mbx_info *mbx = &hw->mbx;
8605 +
8606 +       if (hw->mac.type == e1000_82576) {
8607 +               mbx->timeout = 0;
8608 +               mbx->usec_delay = 0;
8609 +
8610 +               mbx->size = E1000_VFMAILBOX_SIZE;
8611 +
8612 +               mbx->ops.read = e1000_read_mbx_pf;
8613 +               mbx->ops.write = e1000_write_mbx_pf;
8614 +               mbx->ops.read_posted = e1000_read_posted_mbx;
8615 +               mbx->ops.write_posted = e1000_write_posted_mbx;
8616 +               mbx->ops.check_for_msg = e1000_check_for_msg_pf;
8617 +               mbx->ops.check_for_ack = e1000_check_for_ack_pf;
8618 +               mbx->ops.check_for_rst = e1000_check_for_rst_pf;
8619 +
8620 +               mbx->stats.msgs_tx = 0;
8621 +               mbx->stats.msgs_rx = 0;
8622 +               mbx->stats.reqs = 0;
8623 +               mbx->stats.acks = 0;
8624 +               mbx->stats.rsts = 0;
8625 +       }
8626 +
8627 +       return E1000_SUCCESS;
8628 +}
8629 +
8630 Index: linux-2.6.22/drivers/net/igb/e1000_mbx.h
8631 ===================================================================
8632 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
8633 +++ linux-2.6.22/drivers/net/igb/e1000_mbx.h    2009-12-18 12:39:22.000000000 -0500
8634 @@ -0,0 +1,87 @@
8635 +/*******************************************************************************
8636 +
8637 +  Intel(R) Gigabit Ethernet Linux driver
8638 +  Copyright(c) 2007-2009 Intel Corporation.
8639 +
8640 +  This program is free software; you can redistribute it and/or modify it
8641 +  under the terms and conditions of the GNU General Public License,
8642 +  version 2, as published by the Free Software Foundation.
8643 +
8644 +  This program is distributed in the hope it will be useful, but WITHOUT
8645 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8646 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
8647 +  more details.
8648 +
8649 +  You should have received a copy of the GNU General Public License along with
8650 +  this program; if not, write to the Free Software Foundation, Inc.,
8651 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8652 +
8653 +  The full GNU General Public License is included in this distribution in
8654 +  the file called "COPYING".
8655 +
8656 +  Contact Information:
8657 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8658 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8659 +
8660 +*******************************************************************************/
8661 +
8662 +#ifndef _E1000_MBX_H_
8663 +#define _E1000_MBX_H_
8664 +
8665 +#include "e1000_api.h"
8666 +
8667 +#define E1000_P2VMAILBOX_STS   0x00000001 /* Initiate message send to VF */
8668 +#define E1000_P2VMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
8669 +#define E1000_P2VMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
8670 +#define E1000_P2VMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
8671 +#define E1000_P2VMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */
8672 +
8673 +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
8674 +#define E1000_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */
8675 +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
8676 +#define E1000_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */
8677 +
8678 +#define E1000_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */
8679 +
8680 +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
8681 + * PF.  The reverse is true if it is E1000_PF_*.
8682 + * Message ACK's are the value or'd with 0xF0000000
8683 + */
8684 +#define E1000_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
8685 +                                               * this are the ACK */
8686 +#define E1000_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
8687 +                                               * this are the NACK */
8688 +#define E1000_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
8689 +                                                 clear to send requests */
8690 +#define E1000_VT_MSGINFO_SHIFT    16
8691 +/* bits 23:16 are used for exra info for certain messages */
8692 +#define E1000_VT_MSGINFO_MASK     (0xFF << E1000_VT_MSGINFO_SHIFT)
8693 +
8694 +#define E1000_VF_RESET            0x01 /* VF requests reset */
8695 +#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests to set MAC addr */
8696 +#define E1000_VF_SET_MULTICAST    0x03 /* VF requests to set MC addr */
8697 +#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
8698 +#define E1000_VF_SET_MULTICAST_OVERFLOW   (0x80 << E1000_VT_MSGINFO_SHIFT)
8699 +#define E1000_VF_SET_VLAN         0x04 /* VF requests to set VLAN */
8700 +#define E1000_VF_SET_VLAN_ADD             (0x01 << E1000_VT_MSGINFO_SHIFT)
8701 +#define E1000_VF_SET_LPE          0x05 /* VF requests to set VMOLR.LPE */
8702 +#define E1000_VF_SET_PROMISC      0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
8703 +#define E1000_VF_SET_PROMISC_UNICAST      (0x01 << E1000_VT_MSGINFO_SHIFT)
8704 +#define E1000_VF_SET_PROMISC_MULTICAST    (0x02 << E1000_VT_MSGINFO_SHIFT)
8705 +
8706 +#define E1000_PF_CONTROL_MSG      0x0100 /* PF control message */
8707 +
8708 +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
8709 +#define E1000_VF_MBX_INIT_DELAY   500  /* microseconds between retries */
8710 +
8711 +s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
8712 +s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
8713 +s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
8714 +s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
8715 +s32 e1000_check_for_msg(struct e1000_hw *, u16);
8716 +s32 e1000_check_for_ack(struct e1000_hw *, u16);
8717 +s32 e1000_check_for_rst(struct e1000_hw *, u16);
8718 +void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
8719 +s32 e1000_init_mbx_params_pf(struct e1000_hw *);
8720 +
8721 +#endif /* _E1000_MBX_H_ */
8722 Index: linux-2.6.22/drivers/net/igb/e1000_nvm.c
8723 ===================================================================
8724 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
8725 +++ linux-2.6.22/drivers/net/igb/e1000_nvm.c    2009-12-18 12:39:22.000000000 -0500
8726 @@ -0,0 +1,625 @@
8727 +/*******************************************************************************
8728 +
8729 +  Intel(R) Gigabit Ethernet Linux driver
8730 +  Copyright(c) 2007-2009 Intel Corporation.
8731 +
8732 +  This program is free software; you can redistribute it and/or modify it
8733 +  under the terms and conditions of the GNU General Public License,
8734 +  version 2, as published by the Free Software Foundation.
8735 +
8736 +  This program is distributed in the hope it will be useful, but WITHOUT
8737 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8738 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
8739 +  more details.
8740 +
8741 +  You should have received a copy of the GNU General Public License along with
8742 +  this program; if not, write to the Free Software Foundation, Inc.,
8743 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8744 +
8745 +  The full GNU General Public License is included in this distribution in
8746 +  the file called "COPYING".
8747 +
8748 +  Contact Information:
8749 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8750 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8751 +
8752 +*******************************************************************************/
8753 +
8754 +#include "e1000_api.h"
8755 +
8756 +static void e1000_stop_nvm(struct e1000_hw *hw);
8757 +static void e1000_reload_nvm_generic(struct e1000_hw *hw);
8758 +
8759 +/**
8760 + *  e1000_init_nvm_ops_generic - Initialize NVM function pointers
8761 + *  @hw: pointer to the HW structure
8762 + *
8763 + *  Setups up the function pointers to no-op functions
8764 + **/
8765 +void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
8766 +{
8767 +       struct e1000_nvm_info *nvm = &hw->nvm;
8768 +       DEBUGFUNC("e1000_init_nvm_ops_generic");
8769 +
8770 +       /* Initialize function pointers */
8771 +       nvm->ops.reload = e1000_reload_nvm_generic;
8772 +}
8773 +
8774 +/**
8775 + *  e1000_raise_eec_clk - Raise EEPROM clock
8776 + *  @hw: pointer to the HW structure
8777 + *  @eecd: pointer to the EEPROM
8778 + *
8779 + *  Enable/Raise the EEPROM clock bit.
8780 + **/
8781 +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
8782 +{
8783 +       *eecd = *eecd | E1000_EECD_SK;
8784 +       E1000_WRITE_REG(hw, E1000_EECD, *eecd);
8785 +       E1000_WRITE_FLUSH(hw);
8786 +       usec_delay(hw->nvm.delay_usec);
8787 +}
8788 +
8789 +/**
8790 + *  e1000_lower_eec_clk - Lower EEPROM clock
8791 + *  @hw: pointer to the HW structure
8792 + *  @eecd: pointer to the EEPROM
8793 + *
8794 + *  Clear/Lower the EEPROM clock bit.
8795 + **/
8796 +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
8797 +{
8798 +       *eecd = *eecd & ~E1000_EECD_SK;
8799 +       E1000_WRITE_REG(hw, E1000_EECD, *eecd);
8800 +       E1000_WRITE_FLUSH(hw);
8801 +       usec_delay(hw->nvm.delay_usec);
8802 +}
8803 +
8804 +/**
8805 + *  e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
8806 + *  @hw: pointer to the HW structure
8807 + *  @data: data to send to the EEPROM
8808 + *  @count: number of bits to shift out
8809 + *
8810 + *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
8811 + *  "data" parameter will be shifted out to the EEPROM one bit at a time.
8812 + *  In order to do this, "data" must be broken down into bits.
8813 + **/
8814 +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
8815 +{
8816 +       struct e1000_nvm_info *nvm = &hw->nvm;
8817 +       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
8818 +       u32 mask;
8819 +
8820 +       DEBUGFUNC("e1000_shift_out_eec_bits");
8821 +
8822 +       mask = 0x01 << (count - 1);
8823 +       if (nvm->type == e1000_nvm_eeprom_spi)
8824 +               eecd |= E1000_EECD_DO;
8825 +
8826 +       do {
8827 +               eecd &= ~E1000_EECD_DI;
8828 +
8829 +               if (data & mask)
8830 +                       eecd |= E1000_EECD_DI;
8831 +
8832 +               E1000_WRITE_REG(hw, E1000_EECD, eecd);
8833 +               E1000_WRITE_FLUSH(hw);
8834 +
8835 +               usec_delay(nvm->delay_usec);
8836 +
8837 +               e1000_raise_eec_clk(hw, &eecd);
8838 +               e1000_lower_eec_clk(hw, &eecd);
8839 +
8840 +               mask >>= 1;
8841 +       } while (mask);
8842 +
8843 +       eecd &= ~E1000_EECD_DI;
8844 +       E1000_WRITE_REG(hw, E1000_EECD, eecd);
8845 +}
8846 +
8847 +/**
8848 + *  e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
8849 + *  @hw: pointer to the HW structure
8850 + *  @count: number of bits to shift in
8851 + *
8852 + *  In order to read a register from the EEPROM, we need to shift 'count' bits
8853 + *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
8854 + *  the EEPROM (setting the SK bit), and then reading the value of the data out
8855 + *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
8856 + *  always be clear.
8857 + **/
8858 +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
8859 +{
8860 +       u32 eecd;
8861 +       u32 i;
8862 +       u16 data;
8863 +
8864 +       DEBUGFUNC("e1000_shift_in_eec_bits");
8865 +
8866 +       eecd = E1000_READ_REG(hw, E1000_EECD);
8867 +
8868 +       eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
8869 +       data = 0;
8870 +
8871 +       for (i = 0; i < count; i++) {
8872 +               data <<= 1;
8873 +               e1000_raise_eec_clk(hw, &eecd);
8874 +
8875 +               eecd = E1000_READ_REG(hw, E1000_EECD);
8876 +
8877 +               eecd &= ~E1000_EECD_DI;
8878 +               if (eecd & E1000_EECD_DO)
8879 +                       data |= 1;
8880 +
8881 +               e1000_lower_eec_clk(hw, &eecd);
8882 +       }
8883 +
8884 +       return data;
8885 +}
8886 +
8887 +/**
8888 + *  e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
8889 + *  @hw: pointer to the HW structure
8890 + *  @ee_reg: EEPROM flag for polling
8891 + *
8892 + *  Polls the EEPROM status bit for either read or write completion based
8893 + *  upon the value of 'ee_reg'.
8894 + **/
8895 +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
8896 +{
8897 +       u32 attempts = 100000;
8898 +       u32 i, reg = 0;
8899 +       s32 ret_val = -E1000_ERR_NVM;
8900 +
8901 +       DEBUGFUNC("e1000_poll_eerd_eewr_done");
8902 +
8903 +       for (i = 0; i < attempts; i++) {
8904 +               if (ee_reg == E1000_NVM_POLL_READ)
8905 +                       reg = E1000_READ_REG(hw, E1000_EERD);
8906 +               else
8907 +                       reg = E1000_READ_REG(hw, E1000_EEWR);
8908 +
8909 +               if (reg & E1000_NVM_RW_REG_DONE) {
8910 +                       ret_val = E1000_SUCCESS;
8911 +                       break;
8912 +               }
8913 +
8914 +               usec_delay(5);
8915 +       }
8916 +
8917 +       return ret_val;
8918 +}
8919 +
8920 +/**
8921 + *  e1000_acquire_nvm_generic - Generic request for access to EEPROM
8922 + *  @hw: pointer to the HW structure
8923 + *
8924 + *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
8925 + *  Return successful if access grant bit set, else clear the request for
8926 + *  EEPROM access and return -E1000_ERR_NVM (-1).
8927 + **/
8928 +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
8929 +{
8930 +       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
8931 +       s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
8932 +       s32 ret_val = E1000_SUCCESS;
8933 +
8934 +       DEBUGFUNC("e1000_acquire_nvm_generic");
8935 +
8936 +       E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
8937 +       eecd = E1000_READ_REG(hw, E1000_EECD);
8938 +
8939 +       while (timeout) {
8940 +               if (eecd & E1000_EECD_GNT)
8941 +                       break;
8942 +               usec_delay(5);
8943 +               eecd = E1000_READ_REG(hw, E1000_EECD);
8944 +               timeout--;
8945 +       }
8946 +
8947 +       if (!timeout) {
8948 +               eecd &= ~E1000_EECD_REQ;
8949 +               E1000_WRITE_REG(hw, E1000_EECD, eecd);
8950 +               DEBUGOUT("Could not acquire NVM grant\n");
8951 +               ret_val = -E1000_ERR_NVM;
8952 +       }
8953 +
8954 +       return ret_val;
8955 +}
8956 +
8957 +/**
8958 + *  e1000_standby_nvm - Return EEPROM to standby state
8959 + *  @hw: pointer to the HW structure
8960 + *
8961 + *  Return the EEPROM to a standby state.
8962 + **/
8963 +static void e1000_standby_nvm(struct e1000_hw *hw)
8964 +{
8965 +       struct e1000_nvm_info *nvm = &hw->nvm;
8966 +       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
8967 +
8968 +       DEBUGFUNC("e1000_standby_nvm");
8969 +
8970 +       if (nvm->type == e1000_nvm_eeprom_spi) {
8971 +               /* Toggle CS to flush commands */
8972 +               eecd |= E1000_EECD_CS;
8973 +               E1000_WRITE_REG(hw, E1000_EECD, eecd);
8974 +               E1000_WRITE_FLUSH(hw);
8975 +               usec_delay(nvm->delay_usec);
8976 +               eecd &= ~E1000_EECD_CS;
8977 +               E1000_WRITE_REG(hw, E1000_EECD, eecd);
8978 +               E1000_WRITE_FLUSH(hw);
8979 +               usec_delay(nvm->delay_usec);
8980 +       }
8981 +}
8982 +
8983 +/**
8984 + *  e1000_stop_nvm - Terminate EEPROM command
8985 + *  @hw: pointer to the HW structure
8986 + *
8987 + *  Terminates the current command by inverting the EEPROM's chip select pin.
8988 + **/
8989 +static void e1000_stop_nvm(struct e1000_hw *hw)
8990 +{
8991 +       u32 eecd;
8992 +
8993 +       DEBUGFUNC("e1000_stop_nvm");
8994 +
8995 +       eecd = E1000_READ_REG(hw, E1000_EECD);
8996 +       if (hw->nvm.type == e1000_nvm_eeprom_spi) {
8997 +               /* Pull CS high */
8998 +               eecd |= E1000_EECD_CS;
8999 +               e1000_lower_eec_clk(hw, &eecd);
9000 +       }
9001 +}
9002 +
9003 +/**
9004 + *  e1000_release_nvm_generic - Release exclusive access to EEPROM
9005 + *  @hw: pointer to the HW structure
9006 + *
9007 + *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
9008 + **/
9009 +void e1000_release_nvm_generic(struct e1000_hw *hw)
9010 +{
9011 +       u32 eecd;
9012 +
9013 +       DEBUGFUNC("e1000_release_nvm_generic");
9014 +
9015 +       e1000_stop_nvm(hw);
9016 +
9017 +       eecd = E1000_READ_REG(hw, E1000_EECD);
9018 +       eecd &= ~E1000_EECD_REQ;
9019 +       E1000_WRITE_REG(hw, E1000_EECD, eecd);
9020 +}
9021 +
9022 +/**
9023 + *  e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
9024 + *  @hw: pointer to the HW structure
9025 + *
9026 + *  Setups the EEPROM for reading and writing.
9027 + **/
9028 +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
9029 +{
9030 +       struct e1000_nvm_info *nvm = &hw->nvm;
9031 +       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
9032 +       s32 ret_val = E1000_SUCCESS;
9033 +       u16 timeout = 0;
9034 +       u8 spi_stat_reg;
9035 +
9036 +       DEBUGFUNC("e1000_ready_nvm_eeprom");
9037 +
9038 +       if (nvm->type == e1000_nvm_eeprom_spi) {
9039 +               /* Clear SK and CS */
9040 +               eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
9041 +               E1000_WRITE_REG(hw, E1000_EECD, eecd);
9042 +               usec_delay(1);
9043 +               timeout = NVM_MAX_RETRY_SPI;
9044 +
9045 +               /*
9046 +                * Read "Status Register" repeatedly until the LSB is cleared.
9047 +                * The EEPROM will signal that the command has been completed
9048 +                * by clearing bit 0 of the internal status register.  If it's
9049 +                * not cleared within 'timeout', then error out.
9050 +                */
9051 +               while (timeout) {
9052 +                       e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
9053 +                                                hw->nvm.opcode_bits);
9054 +                       spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
9055 +                       if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
9056 +                               break;
9057 +
9058 +                       usec_delay(5);
9059 +                       e1000_standby_nvm(hw);
9060 +                       timeout--;
9061 +               }
9062 +
9063 +               if (!timeout) {
9064 +                       DEBUGOUT("SPI NVM Status error\n");
9065 +                       ret_val = -E1000_ERR_NVM;
9066 +                       goto out;
9067 +               }
9068 +       }
9069 +
9070 +out:
9071 +       return ret_val;
9072 +}
9073 +
9074 +/**
9075 + *  e1000_read_nvm_eerd - Reads EEPROM using EERD register
9076 + *  @hw: pointer to the HW structure
9077 + *  @offset: offset of word in the EEPROM to read
9078 + *  @words: number of words to read
9079 + *  @data: word read from the EEPROM
9080 + *
9081 + *  Reads a 16 bit word from the EEPROM using the EERD register.
9082 + **/
9083 +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
9084 +{
9085 +       struct e1000_nvm_info *nvm = &hw->nvm;
9086 +       u32 i, eerd = 0;
9087 +       s32 ret_val = E1000_SUCCESS;
9088 +
9089 +       DEBUGFUNC("e1000_read_nvm_eerd");
9090 +
9091 +       /*
9092 +        * A check for invalid values:  offset too large, too many words,
9093 +        * too many words for the offset, and not enough words.
9094 +        */
9095 +       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
9096 +           (words == 0)) {
9097 +               DEBUGOUT("nvm parameter(s) out of bounds\n");
9098 +               ret_val = -E1000_ERR_NVM;
9099 +               goto out;
9100 +       }
9101 +
9102 +       for (i = 0; i < words; i++) {
9103 +               eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
9104 +                      E1000_NVM_RW_REG_START;
9105 +
9106 +               E1000_WRITE_REG(hw, E1000_EERD, eerd);
9107 +               ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
9108 +               if (ret_val)
9109 +                       break;
9110 +
9111 +               data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
9112 +                          E1000_NVM_RW_REG_DATA);
9113 +       }
9114 +
9115 +out:
9116 +       return ret_val;
9117 +}
9118 +
9119 +/**
9120 + *  e1000_write_nvm_spi - Write to EEPROM using SPI
9121 + *  @hw: pointer to the HW structure
9122 + *  @offset: offset within the EEPROM to be written to
9123 + *  @words: number of words to write
9124 + *  @data: 16 bit word(s) to be written to the EEPROM
9125 + *
9126 + *  Writes data to EEPROM at offset using SPI interface.
9127 + *
9128 + *  If e1000_update_nvm_checksum is not called after this function , the
9129 + *  EEPROM will most likely contain an invalid checksum.
9130 + **/
9131 +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
9132 +{
9133 +       struct e1000_nvm_info *nvm = &hw->nvm;
9134 +       s32 ret_val;
9135 +       u16 widx = 0;
9136 +
9137 +       DEBUGFUNC("e1000_write_nvm_spi");
9138 +
9139 +       /*
9140 +        * A check for invalid values:  offset too large, too many words,
9141 +        * and not enough words.
9142 +        */
9143 +       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
9144 +           (words == 0)) {
9145 +               DEBUGOUT("nvm parameter(s) out of bounds\n");
9146 +               ret_val = -E1000_ERR_NVM;
9147 +               goto out;
9148 +       }
9149 +
9150 +       ret_val = nvm->ops.acquire(hw);
9151 +       if (ret_val)
9152 +               goto out;
9153 +
9154 +       while (widx < words) {
9155 +               u8 write_opcode = NVM_WRITE_OPCODE_SPI;
9156 +
9157 +               ret_val = e1000_ready_nvm_eeprom(hw);
9158 +               if (ret_val)
9159 +                       goto release;
9160 +
9161 +               e1000_standby_nvm(hw);
9162 +
9163 +               /* Send the WRITE ENABLE command (8 bit opcode) */
9164 +               e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
9165 +                                        nvm->opcode_bits);
9166 +
9167 +               e1000_standby_nvm(hw);
9168 +
9169 +               /*
9170 +                * Some SPI eeproms use the 8th address bit embedded in the
9171 +                * opcode
9172 +                */
9173 +               if ((nvm->address_bits == 8) && (offset >= 128))
9174 +                       write_opcode |= NVM_A8_OPCODE_SPI;
9175 +
9176 +               /* Send the Write command (8-bit opcode + addr) */
9177 +               e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
9178 +               e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
9179 +                                        nvm->address_bits);
9180 +
9181 +               /* Loop to allow for up to whole page write of eeprom */
9182 +               while (widx < words) {
9183 +                       u16 word_out = data[widx];
9184 +                       word_out = (word_out >> 8) | (word_out << 8);
9185 +                       e1000_shift_out_eec_bits(hw, word_out, 16);
9186 +                       widx++;
9187 +
9188 +                       if ((((offset + widx) * 2) % nvm->page_size) == 0) {
9189 +                               e1000_standby_nvm(hw);
9190 +                               break;
9191 +                       }
9192 +               }
9193 +       }
9194 +
9195 +       msec_delay(10);
9196 +release:
9197 +       nvm->ops.release(hw);
9198 +
9199 +out:
9200 +       return ret_val;
9201 +}
9202 +
9203 +/**
9204 + *  e1000_read_pba_num_generic - Read device part number
9205 + *  @hw: pointer to the HW structure
9206 + *  @pba_num: pointer to device part number
9207 + *
9208 + *  Reads the product board assembly (PBA) number from the EEPROM and stores
9209 + *  the value in pba_num.
9210 + **/
9211 +s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
9212 +{
9213 +       s32  ret_val;
9214 +       u16 nvm_data;
9215 +
9216 +       DEBUGFUNC("e1000_read_pba_num_generic");
9217 +
9218 +       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
9219 +       if (ret_val) {
9220 +               DEBUGOUT("NVM Read Error\n");
9221 +               goto out;
9222 +       }
9223 +       *pba_num = (u32)(nvm_data << 16);
9224 +
9225 +       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
9226 +       if (ret_val) {
9227 +               DEBUGOUT("NVM Read Error\n");
9228 +               goto out;
9229 +       }
9230 +       *pba_num |= nvm_data;
9231 +
9232 +out:
9233 +       return ret_val;
9234 +}
9235 +
9236 +/**
9237 + *  e1000_read_mac_addr_generic - Read device MAC address
9238 + *  @hw: pointer to the HW structure
9239 + *
9240 + *  Reads the device MAC address from the EEPROM and stores the value.
9241 + *  Since devices with two ports use the same EEPROM, we increment the
9242 + *  last bit in the MAC address for the second port.
9243 + **/
9244 +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
9245 +{
9246 +       u32 rar_high;
9247 +       u32 rar_low;
9248 +       u16 i;
9249 +
9250 +       rar_high = E1000_READ_REG(hw, E1000_RAH(0));
9251 +       rar_low = E1000_READ_REG(hw, E1000_RAL(0));
9252 +
9253 +       for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
9254 +               hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
9255 +
9256 +       for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
9257 +               hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
9258 +
9259 +       for (i = 0; i < ETH_ADDR_LEN; i++)
9260 +               hw->mac.addr[i] = hw->mac.perm_addr[i];
9261 +
9262 +       return E1000_SUCCESS;
9263 +}
9264 +
9265 +/**
9266 + *  e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
9267 + *  @hw: pointer to the HW structure
9268 + *
9269 + *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
9270 + *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
9271 + **/
9272 +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
9273 +{
9274 +       s32 ret_val = E1000_SUCCESS;
9275 +       u16 checksum = 0;
9276 +       u16 i, nvm_data;
9277 +
9278 +       DEBUGFUNC("e1000_validate_nvm_checksum_generic");
9279 +
9280 +       for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
9281 +               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
9282 +               if (ret_val) {
9283 +                       DEBUGOUT("NVM Read Error\n");
9284 +                       goto out;
9285 +               }
9286 +               checksum += nvm_data;
9287 +       }
9288 +
9289 +       if (checksum != (u16) NVM_SUM) {
9290 +               DEBUGOUT("NVM Checksum Invalid\n");
9291 +               ret_val = -E1000_ERR_NVM;
9292 +               goto out;
9293 +       }
9294 +
9295 +out:
9296 +       return ret_val;
9297 +}
9298 +
9299 +/**
9300 + *  e1000_update_nvm_checksum_generic - Update EEPROM checksum
9301 + *  @hw: pointer to the HW structure
9302 + *
9303 + *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
9304 + *  up to the checksum.  Then calculates the EEPROM checksum and writes the
9305 + *  value to the EEPROM.
9306 + **/
9307 +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
9308 +{
9309 +       s32  ret_val;
9310 +       u16 checksum = 0;
9311 +       u16 i, nvm_data;
9312 +
9313 +       DEBUGFUNC("e1000_update_nvm_checksum");
9314 +
9315 +       for (i = 0; i < NVM_CHECKSUM_REG; i++) {
9316 +               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
9317 +               if (ret_val) {
9318 +                       DEBUGOUT("NVM Read Error while updating checksum.\n");
9319 +                       goto out;
9320 +               }
9321 +               checksum += nvm_data;
9322 +       }
9323 +       checksum = (u16) NVM_SUM - checksum;
9324 +       ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
9325 +       if (ret_val)
9326 +               DEBUGOUT("NVM Write Error while updating checksum.\n");
9327 +
9328 +out:
9329 +       return ret_val;
9330 +}
9331 +
9332 +/**
9333 + *  e1000_reload_nvm_generic - Reloads EEPROM
9334 + *  @hw: pointer to the HW structure
9335 + *
9336 + *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
9337 + *  extended control register.
9338 + **/
9339 +static void e1000_reload_nvm_generic(struct e1000_hw *hw)
9340 +{
9341 +       u32 ctrl_ext;
9342 +
9343 +       DEBUGFUNC("e1000_reload_nvm_generic");
9344 +
9345 +       usec_delay(10);
9346 +       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
9347 +       ctrl_ext |= E1000_CTRL_EXT_EE_RST;
9348 +       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
9349 +       E1000_WRITE_FLUSH(hw);
9350 +}
9351 +
9352 Index: linux-2.6.22/drivers/net/igb/e1000_nvm.h
9353 ===================================================================
9354 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
9355 +++ linux-2.6.22/drivers/net/igb/e1000_nvm.h    2009-12-18 12:39:22.000000000 -0500
9356 @@ -0,0 +1,50 @@
9357 +/*******************************************************************************
9358 +
9359 +  Intel(R) Gigabit Ethernet Linux driver
9360 +  Copyright(c) 2007-2009 Intel Corporation.
9361 +
9362 +  This program is free software; you can redistribute it and/or modify it
9363 +  under the terms and conditions of the GNU General Public License,
9364 +  version 2, as published by the Free Software Foundation.
9365 +
9366 +  This program is distributed in the hope it will be useful, but WITHOUT
9367 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9368 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
9369 +  more details.
9370 +
9371 +  You should have received a copy of the GNU General Public License along with
9372 +  this program; if not, write to the Free Software Foundation, Inc.,
9373 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9374 +
9375 +  The full GNU General Public License is included in this distribution in
9376 +  the file called "COPYING".
9377 +
9378 +  Contact Information:
9379 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9380 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9381 +
9382 +*******************************************************************************/
9383 +
9384 +#ifndef _E1000_NVM_H_
9385 +#define _E1000_NVM_H_
9386 +
9387 +void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
9388 +s32  e1000_acquire_nvm_generic(struct e1000_hw *hw);
9389 +
9390 +s32  e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
9391 +s32  e1000_read_mac_addr_generic(struct e1000_hw *hw);
9392 +s32  e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num);
9393 +s32  e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
9394 +                         u16 *data);
9395 +s32  e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
9396 +s32  e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
9397 +s32  e1000_write_nvm_eewr(struct e1000_hw *hw, u16 offset,
9398 +                          u16 words, u16 *data);
9399 +s32  e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
9400 +                         u16 *data);
9401 +s32  e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
9402 +void e1000_release_nvm_generic(struct e1000_hw *hw);
9403 +
9404 +#define E1000_STM_OPCODE  0xDB00
9405 +
9406 +#endif
9407 Index: linux-2.6.22/drivers/net/igb/e1000_osdep.h
9408 ===================================================================
9409 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
9410 +++ linux-2.6.22/drivers/net/igb/e1000_osdep.h  2009-12-18 12:39:22.000000000 -0500
9411 @@ -0,0 +1,122 @@
9412 +/*******************************************************************************
9413 +
9414 +  Intel(R) Gigabit Ethernet Linux driver
9415 +  Copyright(c) 2007-2009 Intel Corporation.
9416 +
9417 +  This program is free software; you can redistribute it and/or modify it
9418 +  under the terms and conditions of the GNU General Public License,
9419 +  version 2, as published by the Free Software Foundation.
9420 +
9421 +  This program is distributed in the hope it will be useful, but WITHOUT
9422 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9423 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
9424 +  more details.
9425 +
9426 +  You should have received a copy of the GNU General Public License along with
9427 +  this program; if not, write to the Free Software Foundation, Inc.,
9428 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9429 +
9430 +  The full GNU General Public License is included in this distribution in
9431 +  the file called "COPYING".
9432 +
9433 +  Contact Information:
9434 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9435 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9436 +
9437 +*******************************************************************************/
9438 +
9439 +
9440 +/* glue for the OS independent part of e1000
9441 + * includes register access macros
9442 + */
9443 +
9444 +#ifndef _E1000_OSDEP_H_
9445 +#define _E1000_OSDEP_H_
9446 +
9447 +#include <linux/pci.h>
9448 +#include <linux/delay.h>
9449 +#include <linux/interrupt.h>
9450 +#include <linux/if_ether.h>
9451 +#include <linux/sched.h>
9452 +#include "kcompat.h"
9453 +
9454 +#define usec_delay(x) udelay(x)
9455 +#ifndef msec_delay
9456 +#define msec_delay(x) do { \
9457 +       /* Don't mdelay in interrupt context! */ \
9458 +       if (in_interrupt()) \
9459 +               BUG(); \
9460 +       else \
9461 +               msleep(x); \
9462 +} while (0)
9463 +
9464 +/* Some workarounds require millisecond delays and are run during interrupt
9465 + * context.  Most notably, when establishing link, the phy may need tweaking
9466 + * but cannot process phy register reads/writes faster than millisecond
9467 + * intervals...and we establish link due to a "link status change" interrupt.
9468 + */
9469 +#define msec_delay_irq(x) mdelay(x)
9470 +#endif
9471 +
9472 +#define PCI_COMMAND_REGISTER   PCI_COMMAND
9473 +#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
9474 +#define ETH_ADDR_LEN           ETH_ALEN
9475 +
9476 +#ifdef __BIG_ENDIAN
9477 +#define E1000_BIG_ENDIAN __BIG_ENDIAN
9478 +#endif
9479 +
9480 +
9481 +#define DEBUGOUT(S)
9482 +#define DEBUGOUT1(S, A...)
9483 +
9484 +#define DEBUGFUNC(F) DEBUGOUT(F "\n")
9485 +#define DEBUGOUT2 DEBUGOUT1
9486 +#define DEBUGOUT3 DEBUGOUT2
9487 +#define DEBUGOUT7 DEBUGOUT3
9488 +
9489 +#define E1000_REGISTER(a, reg) reg
9490 +
9491 +#define E1000_WRITE_REG(a, reg, value) ( \
9492 +    writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg))))
9493 +
9494 +#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg)))
9495 +
9496 +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
9497 +    writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))))
9498 +
9499 +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
9500 +    readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))
9501 +
9502 +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
9503 +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
9504 +
9505 +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
9506 +    writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))))
9507 +
9508 +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
9509 +    readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
9510 +
9511 +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
9512 +    writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
9513 +
9514 +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
9515 +    readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
9516 +
9517 +#define E1000_WRITE_REG_IO(a, reg, offset) do { \
9518 +    outl(reg, ((a)->io_base));                  \
9519 +    outl(offset, ((a)->io_base + 4));      } while (0)
9520 +
9521 +#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
9522 +
9523 +#define E1000_WRITE_FLASH_REG(a, reg, value) ( \
9524 +    writel((value), ((a)->flash_address + reg)))
9525 +
9526 +#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
9527 +    writew((value), ((a)->flash_address + reg)))
9528 +
9529 +#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
9530 +
9531 +#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
9532 +
9533 +#endif /* _E1000_OSDEP_H_ */
9534 Index: linux-2.6.22/drivers/net/igb/e1000_phy.c
9535 ===================================================================
9536 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
9537 +++ linux-2.6.22/drivers/net/igb/e1000_phy.c    2009-12-18 12:39:22.000000000 -0500
9538 @@ -0,0 +1,2445 @@
9539 +/*******************************************************************************
9540 +
9541 +  Intel(R) Gigabit Ethernet Linux driver
9542 +  Copyright(c) 2007-2009 Intel Corporation.
9543 +
9544 +  This program is free software; you can redistribute it and/or modify it
9545 +  under the terms and conditions of the GNU General Public License,
9546 +  version 2, as published by the Free Software Foundation.
9547 +
9548 +  This program is distributed in the hope it will be useful, but WITHOUT
9549 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9550 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
9551 +  more details.
9552 +
9553 +  You should have received a copy of the GNU General Public License along with
9554 +  this program; if not, write to the Free Software Foundation, Inc.,
9555 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9556 +
9557 +  The full GNU General Public License is included in this distribution in
9558 +  the file called "COPYING".
9559 +
9560 +  Contact Information:
9561 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9562 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9563 +
9564 +*******************************************************************************/
9565 +
9566 +#include "e1000_api.h"
9567 +
9568 +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
9569 +/* Cable length tables */
9570 +static const u16 e1000_m88_cable_length_table[] =
9571 +       { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
9572 +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
9573 +                (sizeof(e1000_m88_cable_length_table) / \
9574 +                 sizeof(e1000_m88_cable_length_table[0]))
9575 +
9576 +static const u16 e1000_igp_2_cable_length_table[] =
9577 +    { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
9578 +      0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
9579 +      6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
9580 +      21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
9581 +      40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
9582 +      60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
9583 +      83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
9584 +      104, 109, 114, 118, 121, 124};
9585 +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
9586 +                (sizeof(e1000_igp_2_cable_length_table) / \
9587 +                 sizeof(e1000_igp_2_cable_length_table[0]))
9588 +
9589 +/**
9590 + *  e1000_check_reset_block_generic - Check if PHY reset is blocked
9591 + *  @hw: pointer to the HW structure
9592 + *
9593 + *  Read the PHY management control register and check whether a PHY reset
9594 + *  is blocked.  If a reset is not blocked return E1000_SUCCESS, otherwise
9595 + *  return E1000_BLK_PHY_RESET (12).
9596 + **/
9597 +s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
9598 +{
9599 +       u32 manc;
9600 +
9601 +       DEBUGFUNC("e1000_check_reset_block");
9602 +
9603 +       manc = E1000_READ_REG(hw, E1000_MANC);
9604 +
9605 +       return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
9606 +              E1000_BLK_PHY_RESET : E1000_SUCCESS;
9607 +}
9608 +
9609 +/**
9610 + *  e1000_get_phy_id - Retrieve the PHY ID and revision
9611 + *  @hw: pointer to the HW structure
9612 + *
9613 + *  Reads the PHY registers and stores the PHY ID and possibly the PHY
9614 + *  revision in the hardware structure.
9615 + **/
9616 +s32 e1000_get_phy_id(struct e1000_hw *hw)
9617 +{
9618 +       struct e1000_phy_info *phy = &hw->phy;
9619 +       s32 ret_val = E1000_SUCCESS;
9620 +       u16 phy_id;
9621 +
9622 +       DEBUGFUNC("e1000_get_phy_id");
9623 +
9624 +       if (!(phy->ops.read_reg))
9625 +               goto out;
9626 +
9627 +               ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
9628 +               if (ret_val)
9629 +                       goto out;
9630 +
9631 +               phy->id = (u32)(phy_id << 16);
9632 +               usec_delay(20);
9633 +               ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
9634 +               if (ret_val)
9635 +                       goto out;
9636 +
9637 +               phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
9638 +               phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
9639 +
9640 +out:
9641 +       return ret_val;
9642 +}
9643 +
9644 +/**
9645 + *  e1000_phy_reset_dsp_generic - Reset PHY DSP
9646 + *  @hw: pointer to the HW structure
9647 + *
9648 + *  Reset the digital signal processor.
9649 + **/
9650 +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
9651 +{
9652 +       s32 ret_val = E1000_SUCCESS;
9653 +
9654 +       DEBUGFUNC("e1000_phy_reset_dsp_generic");
9655 +
9656 +       if (!(hw->phy.ops.write_reg))
9657 +               goto out;
9658 +
9659 +       ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
9660 +       if (ret_val)
9661 +               goto out;
9662 +
9663 +       ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
9664 +
9665 +out:
9666 +       return ret_val;
9667 +}
9668 +
9669 +/**
9670 + *  e1000_read_phy_reg_mdic - Read MDI control register
9671 + *  @hw: pointer to the HW structure
9672 + *  @offset: register offset to be read
9673 + *  @data: pointer to the read data
9674 + *
9675 + *  Reads the MDI control register in the PHY at offset and stores the
9676 + *  information read to data.
9677 + **/
9678 +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
9679 +{
9680 +       struct e1000_phy_info *phy = &hw->phy;
9681 +       u32 i, mdic = 0;
9682 +       s32 ret_val = E1000_SUCCESS;
9683 +
9684 +       DEBUGFUNC("e1000_read_phy_reg_mdic");
9685 +
9686 +       /*
9687 +        * Set up Op-code, Phy Address, and register offset in the MDI
9688 +        * Control register.  The MAC will take care of interfacing with the
9689 +        * PHY to retrieve the desired data.
9690 +        */
9691 +       mdic = ((offset << E1000_MDIC_REG_SHIFT) |
9692 +               (phy->addr << E1000_MDIC_PHY_SHIFT) |
9693 +               (E1000_MDIC_OP_READ));
9694 +
9695 +       E1000_WRITE_REG(hw, E1000_MDIC, mdic);
9696 +
9697 +       /*
9698 +        * Poll the ready bit to see if the MDI read completed
9699 +        * Increasing the time out as testing showed failures with
9700 +        * the lower time out
9701 +        */
9702 +       for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
9703 +               usec_delay(50);
9704 +               mdic = E1000_READ_REG(hw, E1000_MDIC);
9705 +               if (mdic & E1000_MDIC_READY)
9706 +                       break;
9707 +       }
9708 +       if (!(mdic & E1000_MDIC_READY)) {
9709 +               DEBUGOUT("MDI Read did not complete\n");
9710 +               ret_val = -E1000_ERR_PHY;
9711 +               goto out;
9712 +       }
9713 +       if (mdic & E1000_MDIC_ERROR) {
9714 +               DEBUGOUT("MDI Error\n");
9715 +               ret_val = -E1000_ERR_PHY;
9716 +               goto out;
9717 +       }
9718 +       *data = (u16) mdic;
9719 +
9720 +out:
9721 +       return ret_val;
9722 +}
9723 +
9724 +/**
9725 + *  e1000_write_phy_reg_mdic - Write MDI control register
9726 + *  @hw: pointer to the HW structure
9727 + *  @offset: register offset to write to
9728 + *  @data: data to write to register at offset
9729 + *
9730 + *  Writes data to MDI control register in the PHY at offset.
9731 + **/
9732 +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
9733 +{
9734 +       struct e1000_phy_info *phy = &hw->phy;
9735 +       u32 i, mdic = 0;
9736 +       s32 ret_val = E1000_SUCCESS;
9737 +
9738 +       DEBUGFUNC("e1000_write_phy_reg_mdic");
9739 +
9740 +       /*
9741 +        * Set up Op-code, Phy Address, and register offset in the MDI
9742 +        * Control register.  The MAC will take care of interfacing with the
9743 +        * PHY to retrieve the desired data.
9744 +        */
9745 +       mdic = (((u32)data) |
9746 +               (offset << E1000_MDIC_REG_SHIFT) |
9747 +               (phy->addr << E1000_MDIC_PHY_SHIFT) |
9748 +               (E1000_MDIC_OP_WRITE));
9749 +
9750 +       E1000_WRITE_REG(hw, E1000_MDIC, mdic);
9751 +
9752 +       /*
9753 +        * Poll the ready bit to see if the MDI read completed
9754 +        * Increasing the time out as testing showed failures with
9755 +        * the lower time out
9756 +        */
9757 +       for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
9758 +               usec_delay(50);
9759 +               mdic = E1000_READ_REG(hw, E1000_MDIC);
9760 +               if (mdic & E1000_MDIC_READY)
9761 +                       break;
9762 +       }
9763 +       if (!(mdic & E1000_MDIC_READY)) {
9764 +               DEBUGOUT("MDI Write did not complete\n");
9765 +               ret_val = -E1000_ERR_PHY;
9766 +               goto out;
9767 +       }
9768 +       if (mdic & E1000_MDIC_ERROR) {
9769 +               DEBUGOUT("MDI Error\n");
9770 +               ret_val = -E1000_ERR_PHY;
9771 +               goto out;
9772 +       }
9773 +
9774 +out:
9775 +       return ret_val;
9776 +}
9777 +
9778 +/**
9779 + *  e1000_read_phy_reg_i2c - Read PHY register using i2c
9780 + *  @hw: pointer to the HW structure
9781 + *  @offset: register offset to be read
9782 + *  @data: pointer to the read data
9783 + *
9784 + *  Reads the PHY register at offset using the i2c interface and stores the
9785 + *  retrieved information in data.
9786 + **/
9787 +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
9788 +{
9789 +       struct e1000_phy_info *phy = &hw->phy;
9790 +       u32 i, i2ccmd = 0;
9791 +
9792 +       DEBUGFUNC("e1000_read_phy_reg_i2c");
9793 +
9794 +       /*
9795 +        * Set up Op-code, Phy Address, and register address in the I2CCMD
9796 +        * register.  The MAC will take care of interfacing with the
9797 +        * PHY to retrieve the desired data.
9798 +        */
9799 +       i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
9800 +                 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
9801 +                 (E1000_I2CCMD_OPCODE_READ));
9802 +
9803 +       E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
9804 +
9805 +       /* Poll the ready bit to see if the I2C read completed */
9806 +       for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
9807 +               usec_delay(50);
9808 +               i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
9809 +               if (i2ccmd & E1000_I2CCMD_READY)
9810 +                       break;
9811 +       }
9812 +       if (!(i2ccmd & E1000_I2CCMD_READY)) {
9813 +               DEBUGOUT("I2CCMD Read did not complete\n");
9814 +               return -E1000_ERR_PHY;
9815 +       }
9816 +       if (i2ccmd & E1000_I2CCMD_ERROR) {
9817 +               DEBUGOUT("I2CCMD Error bit set\n");
9818 +               return -E1000_ERR_PHY;
9819 +       }
9820 +
9821 +       /* Need to byte-swap the 16-bit value. */
9822 +       *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
9823 +
9824 +       return E1000_SUCCESS;
9825 +}
9826 +
9827 +/**
9828 + *  e1000_write_phy_reg_i2c - Write PHY register using i2c
9829 + *  @hw: pointer to the HW structure
9830 + *  @offset: register offset to write to
9831 + *  @data: data to write at register offset
9832 + *
9833 + *  Writes the data to PHY register at the offset using the i2c interface.
9834 + **/
9835 +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
9836 +{
9837 +       struct e1000_phy_info *phy = &hw->phy;
9838 +       u32 i, i2ccmd = 0;
9839 +       u16 phy_data_swapped;
9840 +
9841 +       DEBUGFUNC("e1000_write_phy_reg_i2c");
9842 +
9843 +       /* Swap the data bytes for the I2C interface */
9844 +       phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
9845 +
9846 +       /*
9847 +        * Set up Op-code, Phy Address, and register address in the I2CCMD
9848 +        * register.  The MAC will take care of interfacing with the
9849 +        * PHY to retrieve the desired data.
9850 +        */
9851 +       i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
9852 +                 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
9853 +                 E1000_I2CCMD_OPCODE_WRITE |
9854 +                 phy_data_swapped);
9855 +
9856 +       E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
9857 +
9858 +       /* Poll the ready bit to see if the I2C read completed */
9859 +       for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
9860 +               usec_delay(50);
9861 +               i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
9862 +               if (i2ccmd & E1000_I2CCMD_READY)
9863 +                       break;
9864 +       }
9865 +       if (!(i2ccmd & E1000_I2CCMD_READY)) {
9866 +               DEBUGOUT("I2CCMD Write did not complete\n");
9867 +               return -E1000_ERR_PHY;
9868 +       }
9869 +       if (i2ccmd & E1000_I2CCMD_ERROR) {
9870 +               DEBUGOUT("I2CCMD Error bit set\n");
9871 +               return -E1000_ERR_PHY;
9872 +       }
9873 +
9874 +       return E1000_SUCCESS;
9875 +}
9876 +
9877 +/**
9878 + *  e1000_read_phy_reg_m88 - Read m88 PHY register
9879 + *  @hw: pointer to the HW structure
9880 + *  @offset: register offset to be read
9881 + *  @data: pointer to the read data
9882 + *
9883 + *  Acquires semaphore, if necessary, then reads the PHY register at offset
9884 + *  and storing the retrieved information in data.  Release any acquired
9885 + *  semaphores before exiting.
9886 + **/
9887 +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
9888 +{
9889 +       s32 ret_val = E1000_SUCCESS;
9890 +
9891 +       DEBUGFUNC("e1000_read_phy_reg_m88");
9892 +
9893 +       if (!(hw->phy.ops.acquire))
9894 +               goto out;
9895 +
9896 +       ret_val = hw->phy.ops.acquire(hw);
9897 +       if (ret_val)
9898 +               goto out;
9899 +
9900 +       ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
9901 +                                         data);
9902 +
9903 +       hw->phy.ops.release(hw);
9904 +
9905 +out:
9906 +       return ret_val;
9907 +}
9908 +
9909 +/**
9910 + *  e1000_write_phy_reg_m88 - Write m88 PHY register
9911 + *  @hw: pointer to the HW structure
9912 + *  @offset: register offset to write to
9913 + *  @data: data to write at register offset
9914 + *
9915 + *  Acquires semaphore, if necessary, then writes the data to PHY register
9916 + *  at the offset.  Release any acquired semaphores before exiting.
9917 + **/
9918 +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
9919 +{
9920 +       s32 ret_val = E1000_SUCCESS;
9921 +
9922 +       DEBUGFUNC("e1000_write_phy_reg_m88");
9923 +
9924 +       if (!(hw->phy.ops.acquire))
9925 +               goto out;
9926 +
9927 +       ret_val = hw->phy.ops.acquire(hw);
9928 +       if (ret_val)
9929 +               goto out;
9930 +
9931 +       ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
9932 +                                          data);
9933 +
9934 +       hw->phy.ops.release(hw);
9935 +
9936 +out:
9937 +       return ret_val;
9938 +}
9939 +
9940 +/**
9941 + *  __e1000_read_phy_reg_igp - Read igp PHY register
9942 + *  @hw: pointer to the HW structure
9943 + *  @offset: register offset to be read
9944 + *  @data: pointer to the read data
9945 + *  @locked: semaphore has already been acquired or not
9946 + *
9947 + *  Acquires semaphore, if necessary, then reads the PHY register at offset
9948 + *  and stores the retrieved information in data.  Release any acquired
9949 + *  semaphores before exiting.
9950 + **/
9951 +static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
9952 +                                    bool locked)
9953 +{
9954 +       s32 ret_val = E1000_SUCCESS;
9955 +
9956 +       DEBUGFUNC("__e1000_read_phy_reg_igp");
9957 +
9958 +       if (!locked) {
9959 +               if (!(hw->phy.ops.acquire))
9960 +                       goto out;
9961 +
9962 +               ret_val = hw->phy.ops.acquire(hw);
9963 +               if (ret_val)
9964 +                       goto out;
9965 +       }
9966 +
9967 +       if (offset > MAX_PHY_MULTI_PAGE_REG) {
9968 +               ret_val = e1000_write_phy_reg_mdic(hw,
9969 +                                                  IGP01E1000_PHY_PAGE_SELECT,
9970 +                                                  (u16)offset);
9971 +               if (ret_val)
9972 +                       goto release;
9973 +       }
9974 +
9975 +       ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
9976 +                                         data);
9977 +
9978 +release:
9979 +       if (!locked)
9980 +               hw->phy.ops.release(hw);
9981 +out:
9982 +       return ret_val;
9983 +}
9984 +/**
9985 + *  e1000_read_phy_reg_igp - Read igp PHY register
9986 + *  @hw: pointer to the HW structure
9987 + *  @offset: register offset to be read
9988 + *  @data: pointer to the read data
9989 + *
9990 + *  Acquires semaphore then reads the PHY register at offset and stores the
9991 + *  retrieved information in data.
9992 + *  Release the acquired semaphore before exiting.
9993 + **/
9994 +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
9995 +{
9996 +       return __e1000_read_phy_reg_igp(hw, offset, data, false);
9997 +}
9998 +
9999 +/**
10000 + *  e1000_read_phy_reg_igp_locked - Read igp PHY register
10001 + *  @hw: pointer to the HW structure
10002 + *  @offset: register offset to be read
10003 + *  @data: pointer to the read data
10004 + *
10005 + *  Reads the PHY register at offset and stores the retrieved information
10006 + *  in data.  Assumes semaphore already acquired.
10007 + **/
10008 +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
10009 +{
10010 +       return __e1000_read_phy_reg_igp(hw, offset, data, true);
10011 +}
10012 +
10013 +/**
10014 + *  e1000_write_phy_reg_igp - Write igp PHY register
10015 + *  @hw: pointer to the HW structure
10016 + *  @offset: register offset to write to
10017 + *  @data: data to write at register offset
10018 + *  @locked: semaphore has already been acquired or not
10019 + *
10020 + *  Acquires semaphore, if necessary, then writes the data to PHY register
10021 + *  at the offset.  Release any acquired semaphores before exiting.
10022 + **/
10023 +static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
10024 +                                     bool locked)
10025 +{
10026 +       s32 ret_val = E1000_SUCCESS;
10027 +
10028 +       DEBUGFUNC("e1000_write_phy_reg_igp");
10029 +
10030 +       if (!locked) {
10031 +               if (!(hw->phy.ops.acquire))
10032 +                       goto out;
10033 +
10034 +               ret_val = hw->phy.ops.acquire(hw);
10035 +               if (ret_val)
10036 +                       goto out;
10037 +       }
10038 +
10039 +       if (offset > MAX_PHY_MULTI_PAGE_REG) {
10040 +               ret_val = e1000_write_phy_reg_mdic(hw,
10041 +                                                  IGP01E1000_PHY_PAGE_SELECT,
10042 +                                                  (u16)offset);
10043 +               if (ret_val)
10044 +                       goto release;
10045 +       }
10046 +
10047 +       ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
10048 +                                          data);
10049 +
10050 +release:
10051 +       if (!locked)
10052 +               hw->phy.ops.release(hw);
10053 +
10054 +out:
10055 +       return ret_val;
10056 +}
10057 +
10058 +/**
10059 + *  e1000_write_phy_reg_igp - Write igp PHY register
10060 + *  @hw: pointer to the HW structure
10061 + *  @offset: register offset to write to
10062 + *  @data: data to write at register offset
10063 + *
10064 + *  Acquires semaphore then writes the data to PHY register
10065 + *  at the offset.  Release any acquired semaphores before exiting.
10066 + **/
10067 +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
10068 +{
10069 +       return __e1000_write_phy_reg_igp(hw, offset, data, false);
10070 +}
10071 +
10072 +/**
10073 + *  e1000_write_phy_reg_igp_locked - Write igp PHY register
10074 + *  @hw: pointer to the HW structure
10075 + *  @offset: register offset to write to
10076 + *  @data: data to write at register offset
10077 + *
10078 + *  Writes the data to PHY register at the offset.
10079 + *  Assumes semaphore already acquired.
10080 + **/
10081 +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
10082 +{
10083 +       return __e1000_write_phy_reg_igp(hw, offset, data, true);
10084 +}
10085 +
10086 +/**
10087 + *  __e1000_read_kmrn_reg - Read kumeran register
10088 + *  @hw: pointer to the HW structure
10089 + *  @offset: register offset to be read
10090 + *  @data: pointer to the read data
10091 + *  @locked: semaphore has already been acquired or not
10092 + *
10093 + *  Acquires semaphore, if necessary.  Then reads the PHY register at offset
10094 + *  using the kumeran interface.  The information retrieved is stored in data.
10095 + *  Release any acquired semaphores before exiting.
10096 + **/
10097 +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
10098 +                                 bool locked)
10099 +{
10100 +       u32 kmrnctrlsta;
10101 +       s32 ret_val = E1000_SUCCESS;
10102 +
10103 +       DEBUGFUNC("__e1000_read_kmrn_reg");
10104 +
10105 +       if (!locked) {
10106 +               if (!(hw->phy.ops.acquire))
10107 +                       goto out;
10108 +
10109 +               ret_val = hw->phy.ops.acquire(hw);
10110 +               if (ret_val)
10111 +                       goto out;
10112 +       }
10113 +
10114 +       kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
10115 +                      E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
10116 +       E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
10117 +
10118 +       usec_delay(2);
10119 +
10120 +       kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
10121 +       *data = (u16)kmrnctrlsta;
10122 +
10123 +       if (!locked)
10124 +               hw->phy.ops.release(hw);
10125 +
10126 +out:
10127 +       return ret_val;
10128 +}
10129 +
10130 +/**
10131 + *  e1000_read_kmrn_reg_generic -  Read kumeran register
10132 + *  @hw: pointer to the HW structure
10133 + *  @offset: register offset to be read
10134 + *  @data: pointer to the read data
10135 + *
10136 + *  Acquires semaphore then reads the PHY register at offset using the
10137 + *  kumeran interface.  The information retrieved is stored in data.
10138 + *  Release the acquired semaphore before exiting.
10139 + **/
10140 +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
10141 +{
10142 +       return __e1000_read_kmrn_reg(hw, offset, data, false);
10143 +}
10144 +
10145 +/**
10146 + *  e1000_read_kmrn_reg_locked -  Read kumeran register
10147 + *  @hw: pointer to the HW structure
10148 + *  @offset: register offset to be read
10149 + *  @data: pointer to the read data
10150 + *
10151 + *  Reads the PHY register at offset using the kumeran interface.  The
10152 + *  information retrieved is stored in data.
10153 + *  Assumes semaphore already acquired.
10154 + **/
10155 +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
10156 +{
10157 +       return __e1000_read_kmrn_reg(hw, offset, data, true);
10158 +}
10159 +
10160 +/**
10161 + *  __e1000_write_kmrn_reg - Write kumeran register
10162 + *  @hw: pointer to the HW structure
10163 + *  @offset: register offset to write to
10164 + *  @data: data to write at register offset
10165 + *  @locked: semaphore has already been acquired or not
10166 + *
10167 + *  Acquires semaphore, if necessary.  Then write the data to PHY register
10168 + *  at the offset using the kumeran interface.  Release any acquired semaphores
10169 + *  before exiting.
10170 + **/
10171 +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
10172 +                                  bool locked)
10173 +{
10174 +       u32 kmrnctrlsta;
10175 +       s32 ret_val = E1000_SUCCESS;
10176 +
10177 +       DEBUGFUNC("e1000_write_kmrn_reg_generic");
10178 +
10179 +       if (!locked) {
10180 +               if (!(hw->phy.ops.acquire))
10181 +                       goto out;
10182 +
10183 +               ret_val = hw->phy.ops.acquire(hw);
10184 +               if (ret_val)
10185 +                       goto out;
10186 +       }
10187 +
10188 +       kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
10189 +                      E1000_KMRNCTRLSTA_OFFSET) | data;
10190 +       E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
10191 +
10192 +       usec_delay(2);
10193 +
10194 +       if (!locked)
10195 +               hw->phy.ops.release(hw);
10196 +
10197 +out:
10198 +       return ret_val;
10199 +}
10200 +
10201 +/**
10202 + *  e1000_write_kmrn_reg_generic -  Write kumeran register
10203 + *  @hw: pointer to the HW structure
10204 + *  @offset: register offset to write to
10205 + *  @data: data to write at register offset
10206 + *
10207 + *  Acquires semaphore then writes the data to the PHY register at the offset
10208 + *  using the kumeran interface.  Release the acquired semaphore before exiting.
10209 + **/
10210 +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
10211 +{
10212 +       return __e1000_write_kmrn_reg(hw, offset, data, false);
10213 +}
10214 +
10215 +/**
10216 + *  e1000_write_kmrn_reg_locked -  Write kumeran register
10217 + *  @hw: pointer to the HW structure
10218 + *  @offset: register offset to write to
10219 + *  @data: data to write at register offset
10220 + *
10221 + *  Write the data to PHY register at the offset using the kumeran interface.
10222 + *  Assumes semaphore already acquired.
10223 + **/
10224 +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
10225 +{
10226 +       return __e1000_write_kmrn_reg(hw, offset, data, true);
10227 +}
10228 +
10229 +/**
10230 + *  e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
10231 + *  @hw: pointer to the HW structure
10232 + *
10233 + *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
10234 + *  and downshift values are set also.
10235 + **/
10236 +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
10237 +{
10238 +       struct e1000_phy_info *phy = &hw->phy;
10239 +       s32 ret_val;
10240 +       u16 phy_data;
10241 +
10242 +       DEBUGFUNC("e1000_copper_link_setup_m88");
10243 +
10244 +       if (phy->reset_disable) {
10245 +               ret_val = E1000_SUCCESS;
10246 +               goto out;
10247 +       }
10248 +
10249 +       /* Enable CRS on TX. This must be set for half-duplex operation. */
10250 +       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
10251 +       if (ret_val)
10252 +               goto out;
10253 +
10254 +       phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
10255 +
10256 +       /*
10257 +        * Options:
10258 +        *   MDI/MDI-X = 0 (default)
10259 +        *   0 - Auto for all speeds
10260 +        *   1 - MDI mode
10261 +        *   2 - MDI-X mode
10262 +        *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
10263 +        */
10264 +       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
10265 +
10266 +       switch (phy->mdix) {
10267 +       case 1:
10268 +               phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
10269 +               break;
10270 +       case 2:
10271 +               phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
10272 +               break;
10273 +       case 3:
10274 +               phy_data |= M88E1000_PSCR_AUTO_X_1000T;
10275 +               break;
10276 +       case 0:
10277 +       default:
10278 +               phy_data |= M88E1000_PSCR_AUTO_X_MODE;
10279 +               break;
10280 +       }
10281 +
10282 +       /*
10283 +        * Options:
10284 +        *   disable_polarity_correction = 0 (default)
10285 +        *       Automatic Correction for Reversed Cable Polarity
10286 +        *   0 - Disabled
10287 +        *   1 - Enabled
10288 +        */
10289 +       phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
10290 +       if (phy->disable_polarity_correction == 1)
10291 +               phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
10292 +
10293 +       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
10294 +       if (ret_val)
10295 +               goto out;
10296 +
10297 +       if (phy->revision < E1000_REVISION_4) {
10298 +               /*
10299 +                * Force TX_CLK in the Extended PHY Specific Control Register
10300 +                * to 25MHz clock.
10301 +                */
10302 +               ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
10303 +                                            &phy_data);
10304 +               if (ret_val)
10305 +                       goto out;
10306 +
10307 +               phy_data |= M88E1000_EPSCR_TX_CLK_25;
10308 +
10309 +               if ((phy->revision == E1000_REVISION_2) &&
10310 +                   (phy->id == M88E1111_I_PHY_ID)) {
10311 +                       /* 82573L PHY - set the downshift counter to 5x. */
10312 +                       phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
10313 +                       phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
10314 +               } else {
10315 +                       /* Configure Master and Slave downshift values */
10316 +                       phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
10317 +                                    M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
10318 +                       phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
10319 +                                    M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
10320 +               }
10321 +               ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
10322 +                                            phy_data);
10323 +               if (ret_val)
10324 +                       goto out;
10325 +       }
10326 +
10327 +       /* Commit the changes. */
10328 +       ret_val = phy->ops.commit(hw);
10329 +       if (ret_val) {
10330 +               DEBUGOUT("Error committing the PHY changes\n");
10331 +               goto out;
10332 +       }
10333 +
10334 +out:
10335 +       return ret_val;
10336 +}
10337 +
10338 +/**
10339 + *  e1000_copper_link_setup_igp - Setup igp PHY's for copper link
10340 + *  @hw: pointer to the HW structure
10341 + *
10342 + *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
10343 + *  igp PHY's.
10344 + **/
10345 +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
10346 +{
10347 +       struct e1000_phy_info *phy = &hw->phy;
10348 +       s32 ret_val;
10349 +       u16 data;
10350 +
10351 +       DEBUGFUNC("e1000_copper_link_setup_igp");
10352 +
10353 +       if (phy->reset_disable) {
10354 +               ret_val = E1000_SUCCESS;
10355 +               goto out;
10356 +       }
10357 +
10358 +       ret_val = hw->phy.ops.reset(hw);
10359 +       if (ret_val) {
10360 +               DEBUGOUT("Error resetting the PHY.\n");
10361 +               goto out;
10362 +       }
10363 +
10364 +       /*
10365 +        * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
10366 +        * timeout issues when LFS is enabled.
10367 +        */
10368 +       msec_delay(100);
10369 +
10370 +       /*
10371 +        * The NVM settings will configure LPLU in D3 for
10372 +        * non-IGP1 PHYs.
10373 +        */
10374 +       if (phy->type == e1000_phy_igp) {
10375 +               /* disable lplu d3 during driver init */
10376 +               ret_val = hw->phy.ops.set_d3_lplu_state(hw, false);
10377 +               if (ret_val) {
10378 +                       DEBUGOUT("Error Disabling LPLU D3\n");
10379 +                       goto out;
10380 +               }
10381 +       }
10382 +
10383 +       /* disable lplu d0 during driver init */
10384 +       if (hw->phy.ops.set_d0_lplu_state) {
10385 +               ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
10386 +               if (ret_val) {
10387 +                       DEBUGOUT("Error Disabling LPLU D0\n");
10388 +                       goto out;
10389 +               }
10390 +       }
10391 +       /* Configure mdi-mdix settings */
10392 +       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
10393 +       if (ret_val)
10394 +               goto out;
10395 +
10396 +       data &= ~IGP01E1000_PSCR_AUTO_MDIX;
10397 +
10398 +       switch (phy->mdix) {
10399 +       case 1:
10400 +               data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
10401 +               break;
10402 +       case 2:
10403 +               data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
10404 +               break;
10405 +       case 0:
10406 +       default:
10407 +               data |= IGP01E1000_PSCR_AUTO_MDIX;
10408 +               break;
10409 +       }
10410 +       ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
10411 +       if (ret_val)
10412 +               goto out;
10413 +
10414 +       /* set auto-master slave resolution settings */
10415 +       if (hw->mac.autoneg) {
10416 +               /*
10417 +                * when autonegotiation advertisement is only 1000Mbps then we
10418 +                * should disable SmartSpeed and enable Auto MasterSlave
10419 +                * resolution as hardware default.
10420 +                */
10421 +               if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
10422 +                       /* Disable SmartSpeed */
10423 +                       ret_val = phy->ops.read_reg(hw,
10424 +                                                    IGP01E1000_PHY_PORT_CONFIG,
10425 +                                                    &data);
10426 +                       if (ret_val)
10427 +                               goto out;
10428 +
10429 +                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
10430 +                       ret_val = phy->ops.write_reg(hw,
10431 +                                                    IGP01E1000_PHY_PORT_CONFIG,
10432 +                                                    data);
10433 +                       if (ret_val)
10434 +                               goto out;
10435 +
10436 +                       /* Set auto Master/Slave resolution process */
10437 +                       ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
10438 +                       if (ret_val)
10439 +                               goto out;
10440 +
10441 +                       data &= ~CR_1000T_MS_ENABLE;
10442 +                       ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
10443 +                       if (ret_val)
10444 +                               goto out;
10445 +               }
10446 +
10447 +               ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
10448 +               if (ret_val)
10449 +                       goto out;
10450 +
10451 +               /* load defaults for future use */
10452 +               phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
10453 +                       ((data & CR_1000T_MS_VALUE) ?
10454 +                       e1000_ms_force_master :
10455 +                       e1000_ms_force_slave) :
10456 +                       e1000_ms_auto;
10457 +
10458 +               switch (phy->ms_type) {
10459 +               case e1000_ms_force_master:
10460 +                       data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
10461 +                       break;
10462 +               case e1000_ms_force_slave:
10463 +                       data |= CR_1000T_MS_ENABLE;
10464 +                       data &= ~(CR_1000T_MS_VALUE);
10465 +                       break;
10466 +               case e1000_ms_auto:
10467 +                       data &= ~CR_1000T_MS_ENABLE;
10468 +               default:
10469 +                       break;
10470 +               }
10471 +               ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
10472 +               if (ret_val)
10473 +                       goto out;
10474 +       }
10475 +
10476 +out:
10477 +       return ret_val;
10478 +}
10479 +
10480 +/**
10481 + *  e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
10482 + *  @hw: pointer to the HW structure
10483 + *
10484 + *  Performs initial bounds checking on autoneg advertisement parameter, then
10485 + *  configure to advertise the full capability.  Setup the PHY to autoneg
10486 + *  and restart the negotiation process between the link partner.  If
10487 + *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
10488 + **/
10489 +s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
10490 +{
10491 +       struct e1000_phy_info *phy = &hw->phy;
10492 +       s32 ret_val;
10493 +       u16 phy_ctrl;
10494 +
10495 +       DEBUGFUNC("e1000_copper_link_autoneg");
10496 +
10497 +       /*
10498 +        * Perform some bounds checking on the autoneg advertisement
10499 +        * parameter.
10500 +        */
10501 +       phy->autoneg_advertised &= phy->autoneg_mask;
10502 +
10503 +       /*
10504 +        * If autoneg_advertised is zero, we assume it was not defaulted
10505 +        * by the calling code so we set to advertise full capability.
10506 +        */
10507 +       if (phy->autoneg_advertised == 0)
10508 +               phy->autoneg_advertised = phy->autoneg_mask;
10509 +
10510 +       DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
10511 +       ret_val = e1000_phy_setup_autoneg(hw);
10512 +       if (ret_val) {
10513 +               DEBUGOUT("Error Setting up Auto-Negotiation\n");
10514 +               goto out;
10515 +       }
10516 +       DEBUGOUT("Restarting Auto-Neg\n");
10517 +
10518 +       /*
10519 +        * Restart auto-negotiation by setting the Auto Neg Enable bit and
10520 +        * the Auto Neg Restart bit in the PHY control register.
10521 +        */
10522 +       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
10523 +       if (ret_val)
10524 +               goto out;
10525 +
10526 +       phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
10527 +       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
10528 +       if (ret_val)
10529 +               goto out;
10530 +
10531 +       /*
10532 +        * Does the user want to wait for Auto-Neg to complete here, or
10533 +        * check at a later time (for example, callback routine).
10534 +        */
10535 +       if (phy->autoneg_wait_to_complete) {
10536 +               ret_val = hw->mac.ops.wait_autoneg(hw);
10537 +               if (ret_val) {
10538 +                       DEBUGOUT("Error while waiting for "
10539 +                                "autoneg to complete\n");
10540 +                       goto out;
10541 +               }
10542 +       }
10543 +
10544 +       hw->mac.get_link_status = true;
10545 +
10546 +out:
10547 +       return ret_val;
10548 +}
10549 +
10550 +/**
10551 + *  e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
10552 + *  @hw: pointer to the HW structure
10553 + *
10554 + *  Reads the MII auto-neg advertisement register and/or the 1000T control
10555 + *  register and if the PHY is already setup for auto-negotiation, then
10556 + *  return successful.  Otherwise, setup advertisement and flow control to
10557 + *  the appropriate values for the wanted auto-negotiation.
10558 + **/
10559 +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
10560 +{
10561 +       struct e1000_phy_info *phy = &hw->phy;
10562 +       s32 ret_val;
10563 +       u16 mii_autoneg_adv_reg;
10564 +       u16 mii_1000t_ctrl_reg = 0;
10565 +
10566 +       DEBUGFUNC("e1000_phy_setup_autoneg");
10567 +
10568 +       phy->autoneg_advertised &= phy->autoneg_mask;
10569 +
10570 +       /* Read the MII Auto-Neg Advertisement Register (Address 4). */
10571 +       ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
10572 +       if (ret_val)
10573 +               goto out;
10574 +
10575 +       if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
10576 +               /* Read the MII 1000Base-T Control Register (Address 9). */
10577 +               ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
10578 +                                           &mii_1000t_ctrl_reg);
10579 +               if (ret_val)
10580 +                       goto out;
10581 +       }
10582 +
10583 +       /*
10584 +        * Need to parse both autoneg_advertised and fc and set up
10585 +        * the appropriate PHY registers.  First we will parse for
10586 +        * autoneg_advertised software override.  Since we can advertise
10587 +        * a plethora of combinations, we need to check each bit
10588 +        * individually.
10589 +        */
10590 +
10591 +       /*
10592 +        * First we clear all the 10/100 mb speed bits in the Auto-Neg
10593 +        * Advertisement Register (Address 4) and the 1000 mb speed bits in
10594 +        * the  1000Base-T Control Register (Address 9).
10595 +        */
10596 +       mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
10597 +                                NWAY_AR_100TX_HD_CAPS |
10598 +                                NWAY_AR_10T_FD_CAPS   |
10599 +                                NWAY_AR_10T_HD_CAPS);
10600 +       mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
10601 +
10602 +       DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
10603 +
10604 +       /* Do we want to advertise 10 Mb Half Duplex? */
10605 +       if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
10606 +               DEBUGOUT("Advertise 10mb Half duplex\n");
10607 +               mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
10608 +       }
10609 +
10610 +       /* Do we want to advertise 10 Mb Full Duplex? */
10611 +       if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
10612 +               DEBUGOUT("Advertise 10mb Full duplex\n");
10613 +               mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
10614 +       }
10615 +
10616 +       /* Do we want to advertise 100 Mb Half Duplex? */
10617 +       if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
10618 +               DEBUGOUT("Advertise 100mb Half duplex\n");
10619 +               mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
10620 +       }
10621 +
10622 +       /* Do we want to advertise 100 Mb Full Duplex? */
10623 +       if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
10624 +               DEBUGOUT("Advertise 100mb Full duplex\n");
10625 +               mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
10626 +       }
10627 +
10628 +       /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
10629 +       if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
10630 +               DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
10631 +
10632 +       /* Do we want to advertise 1000 Mb Full Duplex? */
10633 +       if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
10634 +               DEBUGOUT("Advertise 1000mb Full duplex\n");
10635 +               mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
10636 +       }
10637 +
10638 +       /*
10639 +        * Check for a software override of the flow control settings, and
10640 +        * setup the PHY advertisement registers accordingly.  If
10641 +        * auto-negotiation is enabled, then software will have to set the
10642 +        * "PAUSE" bits to the correct value in the Auto-Negotiation
10643 +        * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
10644 +        * negotiation.
10645 +        *
10646 +        * The possible values of the "fc" parameter are:
10647 +        *      0:  Flow control is completely disabled
10648 +        *      1:  Rx flow control is enabled (we can receive pause frames
10649 +        *          but not send pause frames).
10650 +        *      2:  Tx flow control is enabled (we can send pause frames
10651 +        *          but we do not support receiving pause frames).
10652 +        *      3:  Both Rx and Tx flow control (symmetric) are enabled.
10653 +        *  other:  No software override.  The flow control configuration
10654 +        *          in the EEPROM is used.
10655 +        */
10656 +       switch (hw->fc.current_mode) {
10657 +       case e1000_fc_none:
10658 +               /*
10659 +                * Flow control (Rx & Tx) is completely disabled by a
10660 +                * software over-ride.
10661 +                */
10662 +               mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
10663 +               break;
10664 +       case e1000_fc_rx_pause:
10665 +               /*
10666 +                * Rx Flow control is enabled, and Tx Flow control is
10667 +                * disabled, by a software over-ride.
10668 +                *
10669 +                * Since there really isn't a way to advertise that we are
10670 +                * capable of Rx Pause ONLY, we will advertise that we
10671 +                * support both symmetric and asymmetric Rx PAUSE.  Later
10672 +                * (in e1000_config_fc_after_link_up) we will disable the
10673 +                * hw's ability to send PAUSE frames.
10674 +                */
10675 +               mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
10676 +               break;
10677 +       case e1000_fc_tx_pause:
10678 +               /*
10679 +                * Tx Flow control is enabled, and Rx Flow control is
10680 +                * disabled, by a software over-ride.
10681 +                */
10682 +               mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
10683 +               mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
10684 +               break;
10685 +       case e1000_fc_full:
10686 +               /*
10687 +                * Flow control (both Rx and Tx) is enabled by a software
10688 +                * over-ride.
10689 +                */
10690 +               mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
10691 +               break;
10692 +       default:
10693 +               DEBUGOUT("Flow control param set incorrectly\n");
10694 +               ret_val = -E1000_ERR_CONFIG;
10695 +               goto out;
10696 +       }
10697 +
10698 +       ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
10699 +       if (ret_val)
10700 +               goto out;
10701 +
10702 +       DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
10703 +
10704 +       if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
10705 +               ret_val = phy->ops.write_reg(hw,
10706 +                                             PHY_1000T_CTRL,
10707 +                                             mii_1000t_ctrl_reg);
10708 +               if (ret_val)
10709 +                       goto out;
10710 +       }
10711 +
10712 +out:
10713 +       return ret_val;
10714 +}
10715 +
10716 +/**
10717 + *  e1000_setup_copper_link_generic - Configure copper link settings
10718 + *  @hw: pointer to the HW structure
10719 + *
10720 + *  Calls the appropriate function to configure the link for auto-neg or forced
10721 + *  speed and duplex.  Then we check for link, once link is established calls
10722 + *  to configure collision distance and flow control are called.  If link is
10723 + *  not established, we return -E1000_ERR_PHY (-2).
10724 + **/
10725 +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
10726 +{
10727 +       s32 ret_val;
10728 +       bool link;
10729 +
10730 +       DEBUGFUNC("e1000_setup_copper_link_generic");
10731 +
10732 +       if (hw->mac.autoneg) {
10733 +               /*
10734 +                * Setup autoneg and flow control advertisement and perform
10735 +                * autonegotiation.
10736 +                */
10737 +               ret_val = e1000_copper_link_autoneg(hw);
10738 +               if (ret_val)
10739 +                       goto out;
10740 +       } else {
10741 +               /*
10742 +                * PHY will be set to 10H, 10F, 100H or 100F
10743 +                * depending on user settings.
10744 +                */
10745 +               DEBUGOUT("Forcing Speed and Duplex\n");
10746 +               ret_val = hw->phy.ops.force_speed_duplex(hw);
10747 +               if (ret_val) {
10748 +                       DEBUGOUT("Error Forcing Speed and Duplex\n");
10749 +                       goto out;
10750 +               }
10751 +       }
10752 +
10753 +       /*
10754 +        * Check link status. Wait up to 100 microseconds for link to become
10755 +        * valid.
10756 +        */
10757 +       ret_val = e1000_phy_has_link_generic(hw,
10758 +                                            COPPER_LINK_UP_LIMIT,
10759 +                                            10,
10760 +                                            &link);
10761 +       if (ret_val)
10762 +               goto out;
10763 +
10764 +       if (link) {
10765 +               DEBUGOUT("Valid link established!!!\n");
10766 +               e1000_config_collision_dist_generic(hw);
10767 +               ret_val = e1000_config_fc_after_link_up_generic(hw);
10768 +       } else {
10769 +               DEBUGOUT("Unable to establish link!!!\n");
10770 +       }
10771 +
10772 +out:
10773 +       return ret_val;
10774 +}
10775 +
10776 +/**
10777 + *  e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
10778 + *  @hw: pointer to the HW structure
10779 + *
10780 + *  Calls the PHY setup function to force speed and duplex.  Clears the
10781 + *  auto-crossover to force MDI manually.  Waits for link and returns
10782 + *  successful if link up is successful, else -E1000_ERR_PHY (-2).
10783 + **/
10784 +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
10785 +{
10786 +       struct e1000_phy_info *phy = &hw->phy;
10787 +       s32 ret_val;
10788 +       u16 phy_data;
10789 +       bool link;
10790 +
10791 +       DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
10792 +
10793 +       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
10794 +       if (ret_val)
10795 +               goto out;
10796 +
10797 +       e1000_phy_force_speed_duplex_setup(hw, &phy_data);
10798 +
10799 +       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
10800 +       if (ret_val)
10801 +               goto out;
10802 +
10803 +       /*
10804 +        * Clear Auto-Crossover to force MDI manually.  IGP requires MDI
10805 +        * forced whenever speed and duplex are forced.
10806 +        */
10807 +       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
10808 +       if (ret_val)
10809 +               goto out;
10810 +
10811 +       phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
10812 +       phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
10813 +
10814 +       ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
10815 +       if (ret_val)
10816 +               goto out;
10817 +
10818 +       DEBUGOUT1("IGP PSCR: %X\n", phy_data);
10819 +
10820 +       usec_delay(1);
10821 +
10822 +       if (phy->autoneg_wait_to_complete) {
10823 +               DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
10824 +
10825 +               ret_val = e1000_phy_has_link_generic(hw,
10826 +                                                    PHY_FORCE_LIMIT,
10827 +                                                    100000,
10828 +                                                    &link);
10829 +               if (ret_val)
10830 +                       goto out;
10831 +
10832 +               if (!link)
10833 +                       DEBUGOUT("Link taking longer than expected.\n");
10834 +
10835 +               /* Try once more */
10836 +               ret_val = e1000_phy_has_link_generic(hw,
10837 +                                                    PHY_FORCE_LIMIT,
10838 +                                                    100000,
10839 +                                                    &link);
10840 +               if (ret_val)
10841 +                       goto out;
10842 +       }
10843 +
10844 +out:
10845 +       return ret_val;
10846 +}
10847 +
10848 +/**
10849 + *  e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
10850 + *  @hw: pointer to the HW structure
10851 + *
10852 + *  Calls the PHY setup function to force speed and duplex.  Clears the
10853 + *  auto-crossover to force MDI manually.  Resets the PHY to commit the
10854 + *  changes.  If time expires while waiting for link up, we reset the DSP.
10855 + *  After reset, TX_CLK and CRS on Tx must be set.  Return successful upon
10856 + *  successful completion, else return corresponding error code.
10857 + **/
10858 +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
10859 +{
10860 +       struct e1000_phy_info *phy = &hw->phy;
10861 +       s32 ret_val;
10862 +       u16 phy_data;
10863 +       bool link;
10864 +
10865 +       DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
10866 +
10867 +       /*
10868 +        * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
10869 +        * forced whenever speed and duplex are forced.
10870 +        */
10871 +       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
10872 +       if (ret_val)
10873 +               goto out;
10874 +
10875 +       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
10876 +       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
10877 +       if (ret_val)
10878 +               goto out;
10879 +
10880 +       DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
10881 +
10882 +       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
10883 +       if (ret_val)
10884 +               goto out;
10885 +
10886 +       e1000_phy_force_speed_duplex_setup(hw, &phy_data);
10887 +
10888 +       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
10889 +       if (ret_val)
10890 +               goto out;
10891 +
10892 +       /* Reset the phy to commit changes. */
10893 +       ret_val = hw->phy.ops.commit(hw);
10894 +       if (ret_val)
10895 +               goto out;
10896 +
10897 +       if (phy->autoneg_wait_to_complete) {
10898 +               DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
10899 +
10900 +               ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
10901 +                                                    100000, &link);
10902 +               if (ret_val)
10903 +                       goto out;
10904 +
10905 +               if (!link) {
10906 +                       /*
10907 +                        * We didn't get link.
10908 +                        * Reset the DSP and cross our fingers.
10909 +                        */
10910 +                       ret_val = phy->ops.write_reg(hw,
10911 +                                                     M88E1000_PHY_PAGE_SELECT,
10912 +                                                     0x001d);
10913 +                       if (ret_val)
10914 +                               goto out;
10915 +                       ret_val = e1000_phy_reset_dsp_generic(hw);
10916 +                       if (ret_val)
10917 +                               goto out;
10918 +               }
10919 +
10920 +               /* Try once more */
10921 +               ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
10922 +                                                    100000, &link);
10923 +               if (ret_val)
10924 +                       goto out;
10925 +       }
10926 +
10927 +       ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
10928 +       if (ret_val)
10929 +               goto out;
10930 +
10931 +       /*
10932 +        * Resetting the phy means we need to re-force TX_CLK in the
10933 +        * Extended PHY Specific Control Register to 25MHz clock from
10934 +        * the reset value of 2.5MHz.
10935 +        */
10936 +       phy_data |= M88E1000_EPSCR_TX_CLK_25;
10937 +       ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
10938 +       if (ret_val)
10939 +               goto out;
10940 +
10941 +       /*
10942 +        * In addition, we must re-enable CRS on Tx for both half and full
10943 +        * duplex.
10944 +        */
10945 +       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
10946 +       if (ret_val)
10947 +               goto out;
10948 +
10949 +       phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
10950 +       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
10951 +
10952 +out:
10953 +       return ret_val;
10954 +}
10955 +
10956 +/**
10957 + *  e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
10958 + *  @hw: pointer to the HW structure
10959 + *
10960 + *  Forces the speed and duplex settings of the PHY.
10961 + *  This is a function pointer entry point only called by
10962 + *  PHY setup routines.
10963 + **/
10964 +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
10965 +{
10966 +       struct e1000_phy_info *phy = &hw->phy;
10967 +       s32 ret_val;
10968 +       u16 data;
10969 +       bool link;
10970 +
10971 +       DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
10972 +
10973 +       if (phy->type != e1000_phy_ife) {
10974 +               ret_val = e1000_phy_force_speed_duplex_igp(hw);
10975 +               goto out;
10976 +       }
10977 +
10978 +       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
10979 +       if (ret_val)
10980 +               goto out;
10981 +
10982 +       e1000_phy_force_speed_duplex_setup(hw, &data);
10983 +
10984 +       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
10985 +       if (ret_val)
10986 +               goto out;
10987 +
10988 +       /* Disable MDI-X support for 10/100 */
10989 +       ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
10990 +       if (ret_val)
10991 +               goto out;
10992 +
10993 +       data &= ~IFE_PMC_AUTO_MDIX;
10994 +       data &= ~IFE_PMC_FORCE_MDIX;
10995 +
10996 +       ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
10997 +       if (ret_val)
10998 +               goto out;
10999 +
11000 +       DEBUGOUT1("IFE PMC: %X\n", data);
11001 +
11002 +       usec_delay(1);
11003 +
11004 +       if (phy->autoneg_wait_to_complete) {
11005 +               DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
11006 +
11007 +               ret_val = e1000_phy_has_link_generic(hw,
11008 +                                                    PHY_FORCE_LIMIT,
11009 +                                                    100000,
11010 +                                                    &link);
11011 +               if (ret_val)
11012 +                       goto out;
11013 +
11014 +               if (!link)
11015 +                       DEBUGOUT("Link taking longer than expected.\n");
11016 +
11017 +               /* Try once more */
11018 +               ret_val = e1000_phy_has_link_generic(hw,
11019 +                                                    PHY_FORCE_LIMIT,
11020 +                                                    100000,
11021 +                                                    &link);
11022 +               if (ret_val)
11023 +                       goto out;
11024 +       }
11025 +
11026 +out:
11027 +       return ret_val;
11028 +}
11029 +
11030 +/**
11031 + *  e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
11032 + *  @hw: pointer to the HW structure
11033 + *  @phy_ctrl: pointer to current value of PHY_CONTROL
11034 + *
11035 + *  Forces speed and duplex on the PHY by doing the following: disable flow
11036 + *  control, force speed/duplex on the MAC, disable auto speed detection,
11037 + *  disable auto-negotiation, configure duplex, configure speed, configure
11038 + *  the collision distance, write configuration to CTRL register.  The
11039 + *  caller must write to the PHY_CONTROL register for these settings to
11040 + *  take affect.
11041 + **/
11042 +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
11043 +{
11044 +       struct e1000_mac_info *mac = &hw->mac;
11045 +       u32 ctrl;
11046 +
11047 +       DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
11048 +
11049 +       /* Turn off flow control when forcing speed/duplex */
11050 +       hw->fc.current_mode = e1000_fc_none;
11051 +
11052 +       /* Force speed/duplex on the mac */
11053 +       ctrl = E1000_READ_REG(hw, E1000_CTRL);
11054 +       ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
11055 +       ctrl &= ~E1000_CTRL_SPD_SEL;
11056 +
11057 +       /* Disable Auto Speed Detection */
11058 +       ctrl &= ~E1000_CTRL_ASDE;
11059 +
11060 +       /* Disable autoneg on the phy */
11061 +       *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
11062 +
11063 +       /* Forcing Full or Half Duplex? */
11064 +       if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
11065 +               ctrl &= ~E1000_CTRL_FD;
11066 +               *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
11067 +               DEBUGOUT("Half Duplex\n");
11068 +       } else {
11069 +               ctrl |= E1000_CTRL_FD;
11070 +               *phy_ctrl |= MII_CR_FULL_DUPLEX;
11071 +               DEBUGOUT("Full Duplex\n");
11072 +       }
11073 +
11074 +       /* Forcing 10mb or 100mb? */
11075 +       if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
11076 +               ctrl |= E1000_CTRL_SPD_100;
11077 +               *phy_ctrl |= MII_CR_SPEED_100;
11078 +               *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
11079 +               DEBUGOUT("Forcing 100mb\n");
11080 +       } else {
11081 +               ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
11082 +               *phy_ctrl |= MII_CR_SPEED_10;
11083 +               *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
11084 +               DEBUGOUT("Forcing 10mb\n");
11085 +       }
11086 +
11087 +       e1000_config_collision_dist_generic(hw);
11088 +
11089 +       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
11090 +}
11091 +
11092 +/**
11093 + *  e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
11094 + *  @hw: pointer to the HW structure
11095 + *  @active: boolean used to enable/disable lplu
11096 + *
11097 + *  Success returns 0, Failure returns 1
11098 + *
11099 + *  The low power link up (lplu) state is set to the power management level D3
11100 + *  and SmartSpeed is disabled when active is true, else clear lplu for D3
11101 + *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
11102 + *  is used during Dx states where the power conservation is most important.
11103 + *  During driver activity, SmartSpeed should be enabled so performance is
11104 + *  maintained.
11105 + **/
11106 +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
11107 +{
11108 +       struct e1000_phy_info *phy = &hw->phy;
11109 +       s32 ret_val = E1000_SUCCESS;
11110 +       u16 data;
11111 +
11112 +       DEBUGFUNC("e1000_set_d3_lplu_state_generic");
11113 +
11114 +       if (!(hw->phy.ops.read_reg))
11115 +               goto out;
11116 +
11117 +       ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
11118 +       if (ret_val)
11119 +               goto out;
11120 +
11121 +       if (!active) {
11122 +               data &= ~IGP02E1000_PM_D3_LPLU;
11123 +               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
11124 +                                            data);
11125 +               if (ret_val)
11126 +                       goto out;
11127 +               /*
11128 +                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
11129 +                * during Dx states where the power conservation is most
11130 +                * important.  During driver activity we should enable
11131 +                * SmartSpeed, so performance is maintained.
11132 +                */
11133 +               if (phy->smart_speed == e1000_smart_speed_on) {
11134 +                       ret_val = phy->ops.read_reg(hw,
11135 +                                                   IGP01E1000_PHY_PORT_CONFIG,
11136 +                                                   &data);
11137 +                       if (ret_val)
11138 +                               goto out;
11139 +
11140 +                       data |= IGP01E1000_PSCFR_SMART_SPEED;
11141 +                       ret_val = phy->ops.write_reg(hw,
11142 +                                                    IGP01E1000_PHY_PORT_CONFIG,
11143 +                                                    data);
11144 +                       if (ret_val)
11145 +                               goto out;
11146 +               } else if (phy->smart_speed == e1000_smart_speed_off) {
11147 +                       ret_val = phy->ops.read_reg(hw,
11148 +                                                    IGP01E1000_PHY_PORT_CONFIG,
11149 +                                                    &data);
11150 +                       if (ret_val)
11151 +                               goto out;
11152 +
11153 +                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
11154 +                       ret_val = phy->ops.write_reg(hw,
11155 +                                                    IGP01E1000_PHY_PORT_CONFIG,
11156 +                                                    data);
11157 +                       if (ret_val)
11158 +                               goto out;
11159 +               }
11160 +       } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
11161 +                  (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
11162 +                  (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
11163 +               data |= IGP02E1000_PM_D3_LPLU;
11164 +               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
11165 +                                             data);
11166 +               if (ret_val)
11167 +                       goto out;
11168 +
11169 +               /* When LPLU is enabled, we should disable SmartSpeed */
11170 +               ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
11171 +                                            &data);
11172 +               if (ret_val)
11173 +                       goto out;
11174 +
11175 +               data &= ~IGP01E1000_PSCFR_SMART_SPEED;
11176 +               ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
11177 +                                             data);
11178 +       }
11179 +
11180 +out:
11181 +       return ret_val;
11182 +}
11183 +
11184 +/**
11185 + *  e1000_check_downshift_generic - Checks whether a downshift in speed occurred
11186 + *  @hw: pointer to the HW structure
11187 + *
11188 + *  Success returns 0, Failure returns 1
11189 + *
11190 + *  A downshift is detected by querying the PHY link health.
11191 + **/
11192 +s32 e1000_check_downshift_generic(struct e1000_hw *hw)
11193 +{
11194 +       struct e1000_phy_info *phy = &hw->phy;
11195 +       s32 ret_val;
11196 +       u16 phy_data, offset, mask;
11197 +
11198 +       DEBUGFUNC("e1000_check_downshift_generic");
11199 +
11200 +       switch (phy->type) {
11201 +       case e1000_phy_m88:
11202 +       case e1000_phy_gg82563:
11203 +               offset  = M88E1000_PHY_SPEC_STATUS;
11204 +               mask    = M88E1000_PSSR_DOWNSHIFT;
11205 +               break;
11206 +       case e1000_phy_igp_2:
11207 +       case e1000_phy_igp:
11208 +       case e1000_phy_igp_3:
11209 +               offset  = IGP01E1000_PHY_LINK_HEALTH;
11210 +               mask    = IGP01E1000_PLHR_SS_DOWNGRADE;
11211 +               break;
11212 +       default:
11213 +               /* speed downshift not supported */
11214 +               phy->speed_downgraded = false;
11215 +               ret_val = E1000_SUCCESS;
11216 +               goto out;
11217 +       }
11218 +
11219 +       ret_val = phy->ops.read_reg(hw, offset, &phy_data);
11220 +
11221 +       if (!ret_val)
11222 +               phy->speed_downgraded = (phy_data & mask) ? true : false;
11223 +
11224 +out:
11225 +       return ret_val;
11226 +}
11227 +
11228 +/**
11229 + *  e1000_check_polarity_m88 - Checks the polarity.
11230 + *  @hw: pointer to the HW structure
11231 + *
11232 + *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
11233 + *
11234 + *  Polarity is determined based on the PHY specific status register.
11235 + **/
11236 +s32 e1000_check_polarity_m88(struct e1000_hw *hw)
11237 +{
11238 +       struct e1000_phy_info *phy = &hw->phy;
11239 +       s32 ret_val;
11240 +       u16 data;
11241 +
11242 +       DEBUGFUNC("e1000_check_polarity_m88");
11243 +
11244 +       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
11245 +
11246 +       if (!ret_val)
11247 +               phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
11248 +                                     ? e1000_rev_polarity_reversed
11249 +                                     : e1000_rev_polarity_normal;
11250 +
11251 +       return ret_val;
11252 +}
11253 +
11254 +/**
11255 + *  e1000_check_polarity_igp - Checks the polarity.
11256 + *  @hw: pointer to the HW structure
11257 + *
11258 + *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
11259 + *
11260 + *  Polarity is determined based on the PHY port status register, and the
11261 + *  current speed (since there is no polarity at 100Mbps).
11262 + **/
11263 +s32 e1000_check_polarity_igp(struct e1000_hw *hw)
11264 +{
11265 +       struct e1000_phy_info *phy = &hw->phy;
11266 +       s32 ret_val;
11267 +       u16 data, offset, mask;
11268 +
11269 +       DEBUGFUNC("e1000_check_polarity_igp");
11270 +
11271 +       /*
11272 +        * Polarity is determined based on the speed of
11273 +        * our connection.
11274 +        */
11275 +       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
11276 +       if (ret_val)
11277 +               goto out;
11278 +
11279 +       if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
11280 +           IGP01E1000_PSSR_SPEED_1000MBPS) {
11281 +               offset  = IGP01E1000_PHY_PCS_INIT_REG;
11282 +               mask    = IGP01E1000_PHY_POLARITY_MASK;
11283 +       } else {
11284 +               /*
11285 +                * This really only applies to 10Mbps since
11286 +                * there is no polarity for 100Mbps (always 0).
11287 +                */
11288 +               offset  = IGP01E1000_PHY_PORT_STATUS;
11289 +               mask    = IGP01E1000_PSSR_POLARITY_REVERSED;
11290 +       }
11291 +
11292 +       ret_val = phy->ops.read_reg(hw, offset, &data);
11293 +
11294 +       if (!ret_val)
11295 +               phy->cable_polarity = (data & mask)
11296 +                                     ? e1000_rev_polarity_reversed
11297 +                                     : e1000_rev_polarity_normal;
11298 +
11299 +out:
11300 +       return ret_val;
11301 +}
11302 +
11303 +/**
11304 + *  e1000_check_polarity_ife - Check cable polarity for IFE PHY
11305 + *  @hw: pointer to the HW structure
11306 + *
11307 + *  Polarity is determined on the polarity reversal feature being enabled.
11308 + **/
11309 +s32 e1000_check_polarity_ife(struct e1000_hw *hw)
11310 +{
11311 +       struct e1000_phy_info *phy = &hw->phy;
11312 +       s32 ret_val;
11313 +       u16 phy_data, offset, mask;
11314 +
11315 +       DEBUGFUNC("e1000_check_polarity_ife");
11316 +
11317 +       /*
11318 +        * Polarity is determined based on the reversal feature being enabled.
11319 +        */
11320 +       if (phy->polarity_correction) {
11321 +               offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
11322 +               mask = IFE_PESC_POLARITY_REVERSED;
11323 +       } else {
11324 +               offset = IFE_PHY_SPECIAL_CONTROL;
11325 +               mask = IFE_PSC_FORCE_POLARITY;
11326 +       }
11327 +
11328 +       ret_val = phy->ops.read_reg(hw, offset, &phy_data);
11329 +
11330 +       if (!ret_val)
11331 +               phy->cable_polarity = (phy_data & mask)
11332 +                                      ? e1000_rev_polarity_reversed
11333 +                                      : e1000_rev_polarity_normal;
11334 +
11335 +       return ret_val;
11336 +}
11337 +
11338 +/**
11339 + *  e1000_wait_autoneg_generic - Wait for auto-neg completion
11340 + *  @hw: pointer to the HW structure
11341 + *
11342 + *  Waits for auto-negotiation to complete or for the auto-negotiation time
11343 + *  limit to expire, which ever happens first.
11344 + **/
11345 +s32 e1000_wait_autoneg_generic(struct e1000_hw *hw)
11346 +{
11347 +       s32 ret_val = E1000_SUCCESS;
11348 +       u16 i, phy_status;
11349 +
11350 +       DEBUGFUNC("e1000_wait_autoneg_generic");
11351 +
11352 +       if (!(hw->phy.ops.read_reg))
11353 +               return E1000_SUCCESS;
11354 +
11355 +       /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
11356 +       for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
11357 +               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
11358 +               if (ret_val)
11359 +                       break;
11360 +               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
11361 +               if (ret_val)
11362 +                       break;
11363 +               if (phy_status & MII_SR_AUTONEG_COMPLETE)
11364 +                       break;
11365 +               msec_delay(100);
11366 +       }
11367 +
11368 +       /*
11369 +        * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
11370 +        * has completed.
11371 +        */
11372 +       return ret_val;
11373 +}
11374 +
11375 +/**
11376 + *  e1000_phy_has_link_generic - Polls PHY for link
11377 + *  @hw: pointer to the HW structure
11378 + *  @iterations: number of times to poll for link
11379 + *  @usec_interval: delay between polling attempts
11380 + *  @success: pointer to whether polling was successful or not
11381 + *
11382 + *  Polls the PHY status register for link, 'iterations' number of times.
11383 + **/
11384 +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
11385 +                               u32 usec_interval, bool *success)
11386 +{
11387 +       s32 ret_val = E1000_SUCCESS;
11388 +       u16 i, phy_status;
11389 +
11390 +       DEBUGFUNC("e1000_phy_has_link_generic");
11391 +
11392 +       if (!(hw->phy.ops.read_reg))
11393 +               return E1000_SUCCESS;
11394 +
11395 +       for (i = 0; i < iterations; i++) {
11396 +               /*
11397 +                * Some PHYs require the PHY_STATUS register to be read
11398 +                * twice due to the link bit being sticky.  No harm doing
11399 +                * it across the board.
11400 +                */
11401 +               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
11402 +               if (ret_val) {
11403 +                       /*
11404 +                        * If the first read fails, another entity may have
11405 +                        * ownership of the resources, wait and try again to
11406 +                        * see if they have relinquished the resources yet.
11407 +                        */
11408 +                       usec_delay(usec_interval);
11409 +               }
11410 +               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
11411 +               if (ret_val)
11412 +                       break;
11413 +               if (phy_status & MII_SR_LINK_STATUS)
11414 +                       break;
11415 +               if (usec_interval >= 1000)
11416 +                       msec_delay_irq(usec_interval/1000);
11417 +               else
11418 +                       usec_delay(usec_interval);
11419 +       }
11420 +
11421 +       *success = (i < iterations) ? true : false;
11422 +
11423 +       return ret_val;
11424 +}
11425 +
11426 +/**
11427 + *  e1000_get_cable_length_m88 - Determine cable length for m88 PHY
11428 + *  @hw: pointer to the HW structure
11429 + *
11430 + *  Reads the PHY specific status register to retrieve the cable length
11431 + *  information.  The cable length is determined by averaging the minimum and
11432 + *  maximum values to get the "average" cable length.  The m88 PHY has four
11433 + *  possible cable length values, which are:
11434 + *     Register Value          Cable Length
11435 + *     0                       < 50 meters
11436 + *     1                       50 - 80 meters
11437 + *     2                       80 - 110 meters
11438 + *     3                       110 - 140 meters
11439 + *     4                       > 140 meters
11440 + **/
11441 +s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
11442 +{
11443 +       struct e1000_phy_info *phy = &hw->phy;
11444 +       s32 ret_val;
11445 +       u16 phy_data, index;
11446 +
11447 +       DEBUGFUNC("e1000_get_cable_length_m88");
11448 +
11449 +       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
11450 +       if (ret_val)
11451 +               goto out;
11452 +
11453 +       index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
11454 +               M88E1000_PSSR_CABLE_LENGTH_SHIFT;
11455 +       if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
11456 +               ret_val = -E1000_ERR_PHY;
11457 +               goto out;
11458 +       }
11459 +
11460 +       phy->min_cable_length = e1000_m88_cable_length_table[index];
11461 +       phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
11462 +
11463 +       phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
11464 +
11465 +out:
11466 +       return ret_val;
11467 +}
11468 +
11469 +/**
11470 + *  e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
11471 + *  @hw: pointer to the HW structure
11472 + *
11473 + *  The automatic gain control (agc) normalizes the amplitude of the
11474 + *  received signal, adjusting for the attenuation produced by the
11475 + *  cable.  By reading the AGC registers, which represent the
11476 + *  combination of coarse and fine gain value, the value can be put
11477 + *  into a lookup table to obtain the approximate cable length
11478 + *  for each channel.
11479 + **/
11480 +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
11481 +{
11482 +       struct e1000_phy_info *phy = &hw->phy;
11483 +       s32 ret_val = E1000_SUCCESS;
11484 +       u16 phy_data, i, agc_value = 0;
11485 +       u16 cur_agc_index, max_agc_index = 0;
11486 +       u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
11487 +       u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
11488 +                                                        {IGP02E1000_PHY_AGC_A,
11489 +                                                         IGP02E1000_PHY_AGC_B,
11490 +                                                         IGP02E1000_PHY_AGC_C,
11491 +                                                         IGP02E1000_PHY_AGC_D};
11492 +
11493 +       DEBUGFUNC("e1000_get_cable_length_igp_2");
11494 +
11495 +       /* Read the AGC registers for all channels */
11496 +       for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
11497 +               ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
11498 +               if (ret_val)
11499 +                       goto out;
11500 +
11501 +               /*
11502 +                * Getting bits 15:9, which represent the combination of
11503 +                * coarse and fine gain values.  The result is a number
11504 +                * that can be put into the lookup table to obtain the
11505 +                * approximate cable length.
11506 +                */
11507 +               cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
11508 +                               IGP02E1000_AGC_LENGTH_MASK;
11509 +
11510 +               /* Array index bound check. */
11511 +               if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
11512 +                   (cur_agc_index == 0)) {
11513 +                       ret_val = -E1000_ERR_PHY;
11514 +                       goto out;
11515 +               }
11516 +
11517 +               /* Remove min & max AGC values from calculation. */
11518 +               if (e1000_igp_2_cable_length_table[min_agc_index] >
11519 +                   e1000_igp_2_cable_length_table[cur_agc_index])
11520 +                       min_agc_index = cur_agc_index;
11521 +               if (e1000_igp_2_cable_length_table[max_agc_index] <
11522 +                   e1000_igp_2_cable_length_table[cur_agc_index])
11523 +                       max_agc_index = cur_agc_index;
11524 +
11525 +               agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
11526 +       }
11527 +
11528 +       agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
11529 +                     e1000_igp_2_cable_length_table[max_agc_index]);
11530 +       agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
11531 +
11532 +       /* Calculate cable length with the error range of +/- 10 meters. */
11533 +       phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
11534 +                                (agc_value - IGP02E1000_AGC_RANGE) : 0;
11535 +       phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
11536 +
11537 +       phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
11538 +
11539 +out:
11540 +       return ret_val;
11541 +}
11542 +
11543 +/**
11544 + *  e1000_get_phy_info_m88 - Retrieve PHY information
11545 + *  @hw: pointer to the HW structure
11546 + *
11547 + *  Valid for only copper links.  Read the PHY status register (sticky read)
11548 + *  to verify that link is up.  Read the PHY special control register to
11549 + *  determine the polarity and 10base-T extended distance.  Read the PHY
11550 + *  special status register to determine MDI/MDIx and current speed.  If
11551 + *  speed is 1000, then determine cable length, local and remote receiver.
11552 + **/
11553 +s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
11554 +{
11555 +       struct e1000_phy_info *phy = &hw->phy;
11556 +       s32  ret_val;
11557 +       u16 phy_data;
11558 +       bool link;
11559 +
11560 +       DEBUGFUNC("e1000_get_phy_info_m88");
11561 +
11562 +       if (phy->media_type != e1000_media_type_copper) {
11563 +               DEBUGOUT("Phy info is only valid for copper media\n");
11564 +               ret_val = -E1000_ERR_CONFIG;
11565 +               goto out;
11566 +       }
11567 +
11568 +       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
11569 +       if (ret_val)
11570 +               goto out;
11571 +
11572 +       if (!link) {
11573 +               DEBUGOUT("Phy info is only valid if link is up\n");
11574 +               ret_val = -E1000_ERR_CONFIG;
11575 +               goto out;
11576 +       }
11577 +
11578 +       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
11579 +       if (ret_val)
11580 +               goto out;
11581 +
11582 +       phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
11583 +                                  ? true : false;
11584 +
11585 +       ret_val = e1000_check_polarity_m88(hw);
11586 +       if (ret_val)
11587 +               goto out;
11588 +
11589 +       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
11590 +       if (ret_val)
11591 +               goto out;
11592 +
11593 +       phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
11594 +
11595 +       if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
11596 +               ret_val = hw->phy.ops.get_cable_length(hw);
11597 +               if (ret_val)
11598 +                       goto out;
11599 +
11600 +               ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
11601 +               if (ret_val)
11602 +                       goto out;
11603 +
11604 +               phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
11605 +                               ? e1000_1000t_rx_status_ok
11606 +                               : e1000_1000t_rx_status_not_ok;
11607 +
11608 +               phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
11609 +                                ? e1000_1000t_rx_status_ok
11610 +                                : e1000_1000t_rx_status_not_ok;
11611 +       } else {
11612 +               /* Set values to "undefined" */
11613 +               phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
11614 +               phy->local_rx = e1000_1000t_rx_status_undefined;
11615 +               phy->remote_rx = e1000_1000t_rx_status_undefined;
11616 +       }
11617 +
11618 +out:
11619 +       return ret_val;
11620 +}
11621 +
11622 +/**
11623 + *  e1000_get_phy_info_igp - Retrieve igp PHY information
11624 + *  @hw: pointer to the HW structure
11625 + *
11626 + *  Read PHY status to determine if link is up.  If link is up, then
11627 + *  set/determine 10base-T extended distance and polarity correction.  Read
11628 + *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
11629 + *  determine on the cable length, local and remote receiver.
11630 + **/
11631 +s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
11632 +{
11633 +       struct e1000_phy_info *phy = &hw->phy;
11634 +       s32 ret_val;
11635 +       u16 data;
11636 +       bool link;
11637 +
11638 +       DEBUGFUNC("e1000_get_phy_info_igp");
11639 +
11640 +       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
11641 +       if (ret_val)
11642 +               goto out;
11643 +
11644 +       if (!link) {
11645 +               DEBUGOUT("Phy info is only valid if link is up\n");
11646 +               ret_val = -E1000_ERR_CONFIG;
11647 +               goto out;
11648 +       }
11649 +
11650 +       phy->polarity_correction = true;
11651 +
11652 +       ret_val = e1000_check_polarity_igp(hw);
11653 +       if (ret_val)
11654 +               goto out;
11655 +
11656 +       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
11657 +       if (ret_val)
11658 +               goto out;
11659 +
11660 +       phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
11661 +
11662 +       if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
11663 +           IGP01E1000_PSSR_SPEED_1000MBPS) {
11664 +               ret_val = phy->ops.get_cable_length(hw);
11665 +               if (ret_val)
11666 +                       goto out;
11667 +
11668 +               ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
11669 +               if (ret_val)
11670 +                       goto out;
11671 +
11672 +               phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
11673 +                               ? e1000_1000t_rx_status_ok
11674 +                               : e1000_1000t_rx_status_not_ok;
11675 +
11676 +               phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
11677 +                                ? e1000_1000t_rx_status_ok
11678 +                                : e1000_1000t_rx_status_not_ok;
11679 +       } else {
11680 +               phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
11681 +               phy->local_rx = e1000_1000t_rx_status_undefined;
11682 +               phy->remote_rx = e1000_1000t_rx_status_undefined;
11683 +       }
11684 +
11685 +out:
11686 +       return ret_val;
11687 +}
11688 +
11689 +/**
11690 + *  e1000_phy_sw_reset_generic - PHY software reset
11691 + *  @hw: pointer to the HW structure
11692 + *
11693 + *  Does a software reset of the PHY by reading the PHY control register and
11694 + *  setting/write the control register reset bit to the PHY.
11695 + **/
11696 +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
11697 +{
11698 +       s32 ret_val = E1000_SUCCESS;
11699 +       u16 phy_ctrl;
11700 +
11701 +       DEBUGFUNC("e1000_phy_sw_reset_generic");
11702 +
11703 +       if (!(hw->phy.ops.read_reg))
11704 +               goto out;
11705 +
11706 +       ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
11707 +       if (ret_val)
11708 +               goto out;
11709 +
11710 +       phy_ctrl |= MII_CR_RESET;
11711 +       ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
11712 +       if (ret_val)
11713 +               goto out;
11714 +
11715 +       usec_delay(1);
11716 +
11717 +out:
11718 +       return ret_val;
11719 +}
11720 +
11721 +/**
11722 + *  e1000_phy_hw_reset_generic - PHY hardware reset
11723 + *  @hw: pointer to the HW structure
11724 + *
11725 + *  Verify the reset block is not blocking us from resetting.  Acquire
11726 + *  semaphore (if necessary) and read/set/write the device control reset
11727 + *  bit in the PHY.  Wait the appropriate delay time for the device to
11728 + *  reset and release the semaphore (if necessary).
11729 + **/
11730 +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
11731 +{
11732 +       struct e1000_phy_info *phy = &hw->phy;
11733 +       s32 ret_val = E1000_SUCCESS;
11734 +       u32 ctrl;
11735 +
11736 +       DEBUGFUNC("e1000_phy_hw_reset_generic");
11737 +
11738 +       ret_val = phy->ops.check_reset_block(hw);
11739 +       if (ret_val) {
11740 +               ret_val = E1000_SUCCESS;
11741 +               goto out;
11742 +       }
11743 +
11744 +       ret_val = phy->ops.acquire(hw);
11745 +       if (ret_val)
11746 +               goto out;
11747 +
11748 +       ctrl = E1000_READ_REG(hw, E1000_CTRL);
11749 +       E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
11750 +       E1000_WRITE_FLUSH(hw);
11751 +
11752 +       usec_delay(phy->reset_delay_us);
11753 +
11754 +       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
11755 +       E1000_WRITE_FLUSH(hw);
11756 +
11757 +       usec_delay(150);
11758 +
11759 +       phy->ops.release(hw);
11760 +
11761 +       ret_val = phy->ops.get_cfg_done(hw);
11762 +
11763 +out:
11764 +       return ret_val;
11765 +}
11766 +
11767 +/**
11768 + *  e1000_get_cfg_done_generic - Generic configuration done
11769 + *  @hw: pointer to the HW structure
11770 + *
11771 + *  Generic function to wait 10 milli-seconds for configuration to complete
11772 + *  and return success.
11773 + **/
11774 +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw)
11775 +{
11776 +       DEBUGFUNC("e1000_get_cfg_done_generic");
11777 +
11778 +       msec_delay_irq(10);
11779 +
11780 +       return E1000_SUCCESS;
11781 +}
11782 +
11783 +/**
11784 + *  e1000_phy_init_script_igp3 - Inits the IGP3 PHY
11785 + *  @hw: pointer to the HW structure
11786 + *
11787 + *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
11788 + **/
11789 +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
11790 +{
11791 +       DEBUGOUT("Running IGP 3 PHY init script\n");
11792 +
11793 +       /* PHY init IGP 3 */
11794 +       /* Enable rise/fall, 10-mode work in class-A */
11795 +       hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
11796 +       /* Remove all caps from Replica path filter */
11797 +       hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
11798 +       /* Bias trimming for ADC, AFE and Driver (Default) */
11799 +       hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
11800 +       /* Increase Hybrid poly bias */
11801 +       hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
11802 +       /* Add 4% to Tx amplitude in Gig mode */
11803 +       hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
11804 +       /* Disable trimming (TTT) */
11805 +       hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
11806 +       /* Poly DC correction to 94.6% + 2% for all channels */
11807 +       hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
11808 +       /* ABS DC correction to 95.9% */
11809 +       hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
11810 +       /* BG temp curve trim */
11811 +       hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
11812 +       /* Increasing ADC OPAMP stage 1 currents to max */
11813 +       hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
11814 +       /* Force 1000 ( required for enabling PHY regs configuration) */
11815 +       hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
11816 +       /* Set upd_freq to 6 */
11817 +       hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
11818 +       /* Disable NPDFE */
11819 +       hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
11820 +       /* Disable adaptive fixed FFE (Default) */
11821 +       hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
11822 +       /* Enable FFE hysteresis */
11823 +       hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
11824 +       /* Fixed FFE for short cable lengths */
11825 +       hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
11826 +       /* Fixed FFE for medium cable lengths */
11827 +       hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
11828 +       /* Fixed FFE for long cable lengths */
11829 +       hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
11830 +       /* Enable Adaptive Clip Threshold */
11831 +       hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
11832 +       /* AHT reset limit to 1 */
11833 +       hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
11834 +       /* Set AHT master delay to 127 msec */
11835 +       hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
11836 +       /* Set scan bits for AHT */
11837 +       hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
11838 +       /* Set AHT Preset bits */
11839 +       hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
11840 +       /* Change integ_factor of channel A to 3 */
11841 +       hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
11842 +       /* Change prop_factor of channels BCD to 8 */
11843 +       hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
11844 +       /* Change cg_icount + enable integbp for channels BCD */
11845 +       hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
11846 +       /*
11847 +        * Change cg_icount + enable integbp + change prop_factor_master
11848 +        * to 8 for channel A
11849 +        */
11850 +       hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
11851 +       /* Disable AHT in Slave mode on channel A */
11852 +       hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
11853 +       /*
11854 +        * Enable LPLU and disable AN to 1000 in non-D0a states,
11855 +        * Enable SPD+B2B
11856 +        */
11857 +       hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
11858 +       /* Enable restart AN on an1000_dis change */
11859 +       hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
11860 +       /* Enable wh_fifo read clock in 10/100 modes */
11861 +       hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
11862 +       /* Restart AN, Speed selection is 1000 */
11863 +       hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
11864 +
11865 +       return E1000_SUCCESS;
11866 +}
11867 +
11868 +/**
11869 + *  e1000_get_phy_type_from_id - Get PHY type from id
11870 + *  @phy_id: phy_id read from the phy
11871 + *
11872 + *  Returns the phy type from the id.
11873 + **/
11874 +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
11875 +{
11876 +       enum e1000_phy_type phy_type = e1000_phy_unknown;
11877 +
11878 +       switch (phy_id) {
11879 +       case M88E1000_I_PHY_ID:
11880 +       case M88E1000_E_PHY_ID:
11881 +       case M88E1111_I_PHY_ID:
11882 +       case M88E1011_I_PHY_ID:
11883 +               phy_type = e1000_phy_m88;
11884 +               break;
11885 +       case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
11886 +               phy_type = e1000_phy_igp_2;
11887 +               break;
11888 +       case GG82563_E_PHY_ID:
11889 +               phy_type = e1000_phy_gg82563;
11890 +               break;
11891 +       case IGP03E1000_E_PHY_ID:
11892 +               phy_type = e1000_phy_igp_3;
11893 +               break;
11894 +       case IFE_E_PHY_ID:
11895 +       case IFE_PLUS_E_PHY_ID:
11896 +       case IFE_C_E_PHY_ID:
11897 +               phy_type = e1000_phy_ife;
11898 +               break;
11899 +       default:
11900 +               phy_type = e1000_phy_unknown;
11901 +               break;
11902 +       }
11903 +       return phy_type;
11904 +}
11905 +
11906 +/**
11907 + *  e1000_determine_phy_address - Determines PHY address.
11908 + *  @hw: pointer to the HW structure
11909 + *
11910 + *  This uses a trial and error method to loop through possible PHY
11911 + *  addresses. It tests each by reading the PHY ID registers and
11912 + *  checking for a match.
11913 + **/
11914 +s32 e1000_determine_phy_address(struct e1000_hw *hw)
11915 +{
11916 +       s32 ret_val = -E1000_ERR_PHY_TYPE;
11917 +       u32 phy_addr = 0;
11918 +       u32 i;
11919 +       enum e1000_phy_type phy_type = e1000_phy_unknown;
11920 +
11921 +       hw->phy.id = phy_type;
11922 +
11923 +       for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
11924 +               hw->phy.addr = phy_addr;
11925 +               i = 0;
11926 +
11927 +               do {
11928 +                       e1000_get_phy_id(hw);
11929 +                       phy_type = e1000_get_phy_type_from_id(hw->phy.id);
11930 +
11931 +                       /*
11932 +                        * If phy_type is valid, break - we found our
11933 +                        * PHY address
11934 +                        */
11935 +                       if (phy_type  != e1000_phy_unknown) {
11936 +                               ret_val = E1000_SUCCESS;
11937 +                               goto out;
11938 +                       }
11939 +                       msec_delay(1);
11940 +                       i++;
11941 +               } while (i < 10);
11942 +       }
11943 +
11944 +out:
11945 +       return ret_val;
11946 +}
11947 +
11948 +/**
11949 + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
11950 + * @hw: pointer to the HW structure
11951 + *
11952 + * In the case of a PHY power down to save power, or to turn off link during a
11953 + * driver unload, or wake on lan is not enabled, restore the link to previous
11954 + * settings.
11955 + **/
11956 +void e1000_power_up_phy_copper(struct e1000_hw *hw)
11957 +{
11958 +       u16 mii_reg = 0;
11959 +
11960 +       /* The PHY will retain its settings across a power down/up cycle */
11961 +       hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
11962 +       mii_reg &= ~MII_CR_POWER_DOWN;
11963 +       hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
11964 +}
11965 +
11966 +/**
11967 + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
11968 + * @hw: pointer to the HW structure
11969 + *
11970 + * In the case of a PHY power down to save power, or to turn off link during a
11971 + * driver unload, or wake on lan is not enabled, restore the link to previous
11972 + * settings.
11973 + **/
11974 +void e1000_power_down_phy_copper(struct e1000_hw *hw)
11975 +{
11976 +       u16 mii_reg = 0;
11977 +
11978 +       /* The PHY will retain its settings across a power down/up cycle */
11979 +       hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
11980 +       mii_reg |= MII_CR_POWER_DOWN;
11981 +       hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
11982 +       msec_delay(1);
11983 +}
11984 Index: linux-2.6.22/drivers/net/igb/e1000_phy.h
11985 ===================================================================
11986 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
11987 +++ linux-2.6.22/drivers/net/igb/e1000_phy.h    2009-12-18 12:39:22.000000000 -0500
11988 @@ -0,0 +1,163 @@
11989 +/*******************************************************************************
11990 +
11991 +  Intel(R) Gigabit Ethernet Linux driver
11992 +  Copyright(c) 2007-2009 Intel Corporation.
11993 +
11994 +  This program is free software; you can redistribute it and/or modify it
11995 +  under the terms and conditions of the GNU General Public License,
11996 +  version 2, as published by the Free Software Foundation.
11997 +
11998 +  This program is distributed in the hope it will be useful, but WITHOUT
11999 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12000 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12001 +  more details.
12002 +
12003 +  You should have received a copy of the GNU General Public License along with
12004 +  this program; if not, write to the Free Software Foundation, Inc.,
12005 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
12006 +
12007 +  The full GNU General Public License is included in this distribution in
12008 +  the file called "COPYING".
12009 +
12010 +  Contact Information:
12011 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
12012 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
12013 +
12014 +*******************************************************************************/
12015 +
12016 +#ifndef _E1000_PHY_H_
12017 +#define _E1000_PHY_H_
12018 +
12019 +void e1000_init_phy_ops_generic(struct e1000_hw *hw);
12020 +s32  e1000_check_downshift_generic(struct e1000_hw *hw);
12021 +s32  e1000_check_polarity_m88(struct e1000_hw *hw);
12022 +s32  e1000_check_polarity_igp(struct e1000_hw *hw);
12023 +s32  e1000_check_polarity_ife(struct e1000_hw *hw);
12024 +s32  e1000_check_reset_block_generic(struct e1000_hw *hw);
12025 +s32  e1000_copper_link_autoneg(struct e1000_hw *hw);
12026 +s32  e1000_copper_link_setup_igp(struct e1000_hw *hw);
12027 +s32  e1000_copper_link_setup_m88(struct e1000_hw *hw);
12028 +s32  e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
12029 +s32  e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
12030 +s32  e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
12031 +s32  e1000_get_cable_length_m88(struct e1000_hw *hw);
12032 +s32  e1000_get_cable_length_igp_2(struct e1000_hw *hw);
12033 +s32  e1000_get_cfg_done_generic(struct e1000_hw *hw);
12034 +s32  e1000_get_phy_id(struct e1000_hw *hw);
12035 +s32  e1000_get_phy_info_igp(struct e1000_hw *hw);
12036 +s32  e1000_get_phy_info_m88(struct e1000_hw *hw);
12037 +s32  e1000_phy_sw_reset_generic(struct e1000_hw *hw);
12038 +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
12039 +s32  e1000_phy_hw_reset_generic(struct e1000_hw *hw);
12040 +s32  e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
12041 +s32  e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
12042 +s32  e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
12043 +s32  e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
12044 +s32  e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
12045 +s32  e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
12046 +s32  e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
12047 +s32  e1000_setup_copper_link_generic(struct e1000_hw *hw);
12048 +s32  e1000_wait_autoneg_generic(struct e1000_hw *hw);
12049 +s32  e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
12050 +s32  e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
12051 +s32  e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
12052 +s32  e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
12053 +s32  e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
12054 +s32  e1000_phy_reset_dsp(struct e1000_hw *hw);
12055 +s32  e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
12056 +                                u32 usec_interval, bool *success);
12057 +s32  e1000_phy_init_script_igp3(struct e1000_hw *hw);
12058 +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
12059 +s32  e1000_determine_phy_address(struct e1000_hw *hw);
12060 +void e1000_power_up_phy_copper(struct e1000_hw *hw);
12061 +void e1000_power_down_phy_copper(struct e1000_hw *hw);
12062 +s32  e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
12063 +s32  e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
12064 +s32  e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
12065 +s32  e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
12066 +
12067 +#define E1000_MAX_PHY_ADDR                4
12068 +
12069 +/* IGP01E1000 Specific Registers */
12070 +#define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
12071 +#define IGP01E1000_PHY_PORT_STATUS        0x11 /* Status */
12072 +#define IGP01E1000_PHY_PORT_CTRL          0x12 /* Control */
12073 +#define IGP01E1000_PHY_LINK_HEALTH        0x13 /* PHY Link Health */
12074 +#define IGP01E1000_GMII_FIFO              0x14 /* GMII FIFO */
12075 +#define IGP01E1000_PHY_CHANNEL_QUALITY    0x15 /* PHY Channel Quality */
12076 +#define IGP02E1000_PHY_POWER_MGMT         0x19 /* Power Management */
12077 +#define IGP01E1000_PHY_PAGE_SELECT        0x1F /* Page Select */
12078 +#define BM_PHY_PAGE_SELECT                22   /* Page Select for BM */
12079 +#define IGP_PAGE_SHIFT                    5
12080 +#define PHY_REG_MASK                      0x1F
12081 +
12082 +#define IGP01E1000_PHY_PCS_INIT_REG       0x00B4
12083 +#define IGP01E1000_PHY_POLARITY_MASK      0x0078
12084 +
12085 +#define IGP01E1000_PSCR_AUTO_MDIX         0x1000
12086 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX    0x2000 /* 0=MDI, 1=MDIX */
12087 +
12088 +#define IGP01E1000_PSCFR_SMART_SPEED      0x0080
12089 +
12090 +/* Enable flexible speed on link-up */
12091 +#define IGP01E1000_GMII_FLEX_SPD          0x0010
12092 +#define IGP01E1000_GMII_SPD               0x0020 /* Enable SPD */
12093 +
12094 +#define IGP02E1000_PM_SPD                 0x0001 /* Smart Power Down */
12095 +#define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
12096 +#define IGP02E1000_PM_D3_LPLU             0x0004 /* For all other states */
12097 +
12098 +#define IGP01E1000_PLHR_SS_DOWNGRADE      0x8000
12099 +
12100 +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
12101 +#define IGP01E1000_PSSR_MDIX              0x0800
12102 +#define IGP01E1000_PSSR_SPEED_MASK        0xC000
12103 +#define IGP01E1000_PSSR_SPEED_1000MBPS    0xC000
12104 +
12105 +#define IGP02E1000_PHY_CHANNEL_NUM        4
12106 +#define IGP02E1000_PHY_AGC_A              0x11B1
12107 +#define IGP02E1000_PHY_AGC_B              0x12B1
12108 +#define IGP02E1000_PHY_AGC_C              0x14B1
12109 +#define IGP02E1000_PHY_AGC_D              0x18B1
12110 +
12111 +#define IGP02E1000_AGC_LENGTH_SHIFT       9   /* Course - 15:13, Fine - 12:9 */
12112 +#define IGP02E1000_AGC_LENGTH_MASK        0x7F
12113 +#define IGP02E1000_AGC_RANGE              15
12114 +
12115 +#define IGP03E1000_PHY_MISC_CTRL          0x1B
12116 +#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET  0x1000 /* Manually Set Duplex */
12117 +
12118 +#define E1000_CABLE_LENGTH_UNDEFINED      0xFF
12119 +
12120 +#define E1000_KMRNCTRLSTA_OFFSET          0x001F0000
12121 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT    16
12122 +#define E1000_KMRNCTRLSTA_REN             0x00200000
12123 +#define E1000_KMRNCTRLSTA_DIAG_OFFSET     0x3    /* Kumeran Diagnostic */
12124 +#define E1000_KMRNCTRLSTA_TIMEOUTS        0x4    /* Kumeran Timeouts */
12125 +#define E1000_KMRNCTRLSTA_INBAND_PARAM    0x9    /* Kumeran InBand Parameters */
12126 +#define E1000_KMRNCTRLSTA_DIAG_NELPBK     0x1000 /* Nearend Loopback mode */
12127 +
12128 +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
12129 +#define IFE_PHY_SPECIAL_CONTROL     0x11 /* 100BaseTx PHY Special Control */
12130 +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
12131 +#define IFE_PHY_MDIX_CONTROL        0x1C /* MDI/MDI-X Control */
12132 +
12133 +/* IFE PHY Extended Status Control */
12134 +#define IFE_PESC_POLARITY_REVERSED    0x0100
12135 +
12136 +/* IFE PHY Special Control */
12137 +#define IFE_PSC_AUTO_POLARITY_DISABLE      0x0010
12138 +#define IFE_PSC_FORCE_POLARITY             0x0020
12139 +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100
12140 +
12141 +/* IFE PHY Special Control and LED Control */
12142 +#define IFE_PSCL_PROBE_MODE            0x0020
12143 +#define IFE_PSCL_PROBE_LEDS_OFF        0x0006 /* Force LEDs 0 and 2 off */
12144 +#define IFE_PSCL_PROBE_LEDS_ON         0x0007 /* Force LEDs 0 and 2 on */
12145 +
12146 +/* IFE PHY MDIX Control */
12147 +#define IFE_PMC_MDIX_STATUS      0x0020 /* 1=MDI-X, 0=MDI */
12148 +#define IFE_PMC_FORCE_MDIX       0x0040 /* 1=force MDI-X, 0=force MDI */
12149 +#define IFE_PMC_AUTO_MDIX        0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
12150 +
12151 +#endif
12152 Index: linux-2.6.22/drivers/net/igb/e1000_regs.h
12153 ===================================================================
12154 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
12155 +++ linux-2.6.22/drivers/net/igb/e1000_regs.h   2009-12-18 12:39:22.000000000 -0500
12156 @@ -0,0 +1,484 @@
12157 +/*******************************************************************************
12158 +
12159 +  Intel(R) Gigabit Ethernet Linux driver
12160 +  Copyright(c) 2007-2009 Intel Corporation.
12161 +
12162 +  This program is free software; you can redistribute it and/or modify it
12163 +  under the terms and conditions of the GNU General Public License,
12164 +  version 2, as published by the Free Software Foundation.
12165 +
12166 +  This program is distributed in the hope it will be useful, but WITHOUT
12167 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12168 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12169 +  more details.
12170 +
12171 +  You should have received a copy of the GNU General Public License along with
12172 +  this program; if not, write to the Free Software Foundation, Inc.,
12173 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
12174 +
12175 +  The full GNU General Public License is included in this distribution in
12176 +  the file called "COPYING".
12177 +
12178 +  Contact Information:
12179 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
12180 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
12181 +
12182 +*******************************************************************************/
12183 +
12184 +#ifndef _E1000_REGS_H_
12185 +#define _E1000_REGS_H_
12186 +
12187 +#define E1000_CTRL     0x00000  /* Device Control - RW */
12188 +#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
12189 +#define E1000_STATUS   0x00008  /* Device Status - RO */
12190 +#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
12191 +#define E1000_EERD     0x00014  /* EEPROM Read - RW */
12192 +#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
12193 +#define E1000_FLA      0x0001C  /* Flash Access - RW */
12194 +#define E1000_MDIC     0x00020  /* MDI Control - RW */
12195 +#define E1000_SCTL     0x00024  /* SerDes Control - RW */
12196 +#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
12197 +#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
12198 +#define E1000_FEXT     0x0002C  /* Future Extended - RW */
12199 +#define E1000_FEXTNVM  0x00028  /* Future Extended NVM - RW */
12200 +#define E1000_FCT      0x00030  /* Flow Control Type - RW */
12201 +#define E1000_CONNSW   0x00034  /* Copper/Fiber switch control - RW */
12202 +#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
12203 +#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
12204 +#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
12205 +#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
12206 +#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
12207 +#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
12208 +#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
12209 +#define E1000_RCTL     0x00100  /* Rx Control - RW */
12210 +#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
12211 +#define E1000_TXCW     0x00178  /* Tx Configuration Word - RW */
12212 +#define E1000_RXCW     0x00180  /* Rx Configuration Word - RO */
12213 +#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
12214 +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
12215 +#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
12216 +#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
12217 +#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
12218 +#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
12219 +#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
12220 +#define E1000_GPIE     0x01514  /* General Purpose Interrupt Enable - RW */
12221 +#define E1000_IVAR0    0x01700  /* Interrupt Vector Allocation (array) - RW */
12222 +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
12223 +#define E1000_TCTL     0x00400  /* Tx Control - RW */
12224 +#define E1000_TCTL_EXT 0x00404  /* Extended Tx Control - RW */
12225 +#define E1000_TIPG     0x00410  /* Tx Inter-packet gap -RW */
12226 +#define E1000_TBT      0x00448  /* Tx Burst Timer - RW */
12227 +#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
12228 +#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
12229 +#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
12230 +#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
12231 +#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
12232 +#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
12233 +#define E1000_PBS      0x01008  /* Packet Buffer Size */
12234 +#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
12235 +#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
12236 +#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
12237 +#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
12238 +#define E1000_FLSWCTL  0x01030  /* FLASH control register */
12239 +#define E1000_FLSWDATA 0x01034  /* FLASH data register */
12240 +#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
12241 +#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
12242 +#define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
12243 +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
12244 +#define E1000_WDSTP    0x01040  /* Watchdog Setup - RW */
12245 +#define E1000_SWDSTS   0x01044  /* SW Device Status - RW */
12246 +#define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
12247 +#define E1000_TCPTIMER 0x0104C  /* TCP Timer - RW */
12248 +#define E1000_VPDDIAG  0x01060  /* VPD Diagnostic - RO */
12249 +#define E1000_ICR_V2   0x01500  /* Interrupt Cause - new location - RC */
12250 +#define E1000_ICS_V2   0x01504  /* Interrupt Cause Set - new location - WO */
12251 +#define E1000_IMS_V2   0x01508  /* Interrupt Mask Set/Read - new location - RW */
12252 +#define E1000_IMC_V2   0x0150C  /* Interrupt Mask Clear - new location - WO */
12253 +#define E1000_IAM_V2   0x01510  /* Interrupt Ack Auto Mask - new location - RW */
12254 +#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
12255 +#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
12256 +#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
12257 +#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
12258 +#define E1000_RDFPCQ(_n)  (0x02430 + (0x4 * (_n)))
12259 +#define E1000_PBRTH    0x02458  /* PB Rx Arbitration Threshold - RW */
12260 +#define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
12261 +/* Split and Replication Rx Control - RW */
12262 +#define E1000_RDPUMB   0x025CC  /* DMA Rx Descriptor uC Mailbox - RW */
12263 +#define E1000_RDPUAD   0x025D0  /* DMA Rx Descriptor uC Addr Command - RW */
12264 +#define E1000_RDPUWD   0x025D4  /* DMA Rx Descriptor uC Data Write - RW */
12265 +#define E1000_RDPURD   0x025D8  /* DMA Rx Descriptor uC Data Read - RW */
12266 +#define E1000_RDPUCTL  0x025DC  /* DMA Rx Descriptor uC Control - RW */
12267 +#define E1000_PBDIAG   0x02458  /* Packet Buffer Diagnostic - RW */
12268 +#define E1000_RXPBS    0x02404  /* Rx Packet Buffer Size - RW */
12269 +#define E1000_RDTR     0x02820  /* Rx Delay Timer - RW */
12270 +#define E1000_RADV     0x0282C  /* Rx Interrupt Absolute Delay Timer - RW */
12271 +/*
12272 + * Convenience macros
12273 + *
12274 + * Note: "_n" is the queue number of the register to be written to.
12275 + *
12276 + * Example usage:
12277 + * E1000_RDBAL_REG(current_rx_queue)
12278 + */
12279 +#define E1000_RDBAL(_n)      ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
12280 +                                         (0x0C000 + ((_n) * 0x40)))
12281 +#define E1000_RDBAH(_n)      ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
12282 +                                         (0x0C004 + ((_n) * 0x40)))
12283 +#define E1000_RDLEN(_n)      ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
12284 +                                         (0x0C008 + ((_n) * 0x40)))
12285 +#define E1000_SRRCTL(_n)     ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
12286 +                                         (0x0C00C + ((_n) * 0x40)))
12287 +#define E1000_RDH(_n)        ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
12288 +                                         (0x0C010 + ((_n) * 0x40)))
12289 +#define E1000_RXCTL(_n)      ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
12290 +                                         (0x0C014 + ((_n) * 0x40)))
12291 +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
12292 +#define E1000_RDT(_n)        ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
12293 +                                         (0x0C018 + ((_n) * 0x40)))
12294 +#define E1000_RXDCTL(_n)     ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
12295 +                                         (0x0C028 + ((_n) * 0x40)))
12296 +#define E1000_RQDPC(_n)      ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
12297 +                                         (0x0C030 + ((_n) * 0x40)))
12298 +#define E1000_TDBAL(_n)      ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
12299 +                                         (0x0E000 + ((_n) * 0x40)))
12300 +#define E1000_TDBAH(_n)      ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
12301 +                                         (0x0E004 + ((_n) * 0x40)))
12302 +#define E1000_TDLEN(_n)      ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
12303 +                                         (0x0E008 + ((_n) * 0x40)))
12304 +#define E1000_TDH(_n)        ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
12305 +                                         (0x0E010 + ((_n) * 0x40)))
12306 +#define E1000_TXCTL(_n)      ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
12307 +                                         (0x0E014 + ((_n) * 0x40)))
12308 +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
12309 +#define E1000_TDT(_n)        ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
12310 +                                         (0x0E018 + ((_n) * 0x40)))
12311 +#define E1000_TXDCTL(_n)     ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
12312 +                                         (0x0E028 + ((_n) * 0x40)))
12313 +#define E1000_TDWBAL(_n)     ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
12314 +                                         (0x0E038 + ((_n) * 0x40)))
12315 +#define E1000_TDWBAH(_n)     ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
12316 +                                         (0x0E03C + ((_n) * 0x40)))
12317 +#define E1000_TARC(_n)                   (0x03840 + ((_n) * 0x100))
12318 +#define E1000_RSRPD    0x02C00  /* Rx Small Packet Detect - RW */
12319 +#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
12320 +#define E1000_TXDMAC   0x03000  /* Tx DMA Control - RW */
12321 +#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
12322 +#define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
12323 +#define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
12324 +                                       (0x054E0 + ((_i - 16) * 8)))
12325 +#define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
12326 +                                       (0x054E4 + ((_i - 16) * 8)))
12327 +#define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
12328 +#define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
12329 +#define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
12330 +#define E1000_FFMT_REG(_i)      (0x09000 + ((_i) * 8))
12331 +#define E1000_FFVT_REG(_i)      (0x09800 + ((_i) * 8))
12332 +#define E1000_FFLT_REG(_i)      (0x05F00 + ((_i) * 8))
12333 +#define E1000_PBSLAC   0x03100  /* Packet Buffer Slave Access Control */
12334 +#define E1000_PBSLAD(_n)  (0x03110 + (0x4 * (_n)))  /* Packet Buffer DWORD (_n) */
12335 +#define E1000_TXPBS    0x03404  /* Tx Packet Buffer Size - RW */
12336 +#define E1000_TDFH     0x03410  /* Tx Data FIFO Head - RW */
12337 +#define E1000_TDFT     0x03418  /* Tx Data FIFO Tail - RW */
12338 +#define E1000_TDFHS    0x03420  /* Tx Data FIFO Head Saved - RW */
12339 +#define E1000_TDFTS    0x03428  /* Tx Data FIFO Tail Saved - RW */
12340 +#define E1000_TDFPC    0x03430  /* Tx Data FIFO Packet Count - RW */
12341 +#define E1000_TDPUMB   0x0357C  /* DMA Tx Descriptor uC Mail Box - RW */
12342 +#define E1000_TDPUAD   0x03580  /* DMA Tx Descriptor uC Addr Command - RW */
12343 +#define E1000_TDPUWD   0x03584  /* DMA Tx Descriptor uC Data Write - RW */
12344 +#define E1000_TDPURD   0x03588  /* DMA Tx Descriptor uC Data  Read  - RW */
12345 +#define E1000_TDPUCTL  0x0358C  /* DMA Tx Descriptor uC Control - RW */
12346 +#define E1000_DTXCTL   0x03590  /* DMA Tx Control - RW */
12347 +#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
12348 +#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
12349 +#define E1000_DTXMXSZRQ  0x03540 /* DMA Tx Max Total Allow Size Requests - RW */
12350 +#define E1000_TIDV     0x03820  /* Tx Interrupt Delay Value - RW */
12351 +#define E1000_TADV     0x0382C  /* Tx Interrupt Absolute Delay Val - RW */
12352 +#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
12353 +#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
12354 +#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
12355 +#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
12356 +#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
12357 +#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
12358 +#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
12359 +#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
12360 +#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
12361 +#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
12362 +#define E1000_COLC     0x04028  /* Collision Count - R/clr */
12363 +#define E1000_DC       0x04030  /* Defer Count - R/clr */
12364 +#define E1000_TNCRS    0x04034  /* Tx-No CRS - R/clr */
12365 +#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
12366 +#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
12367 +#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
12368 +#define E1000_XONRXC   0x04048  /* XON Rx Count - R/clr */
12369 +#define E1000_XONTXC   0x0404C  /* XON Tx Count - R/clr */
12370 +#define E1000_XOFFRXC  0x04050  /* XOFF Rx Count - R/clr */
12371 +#define E1000_XOFFTXC  0x04054  /* XOFF Tx Count - R/clr */
12372 +#define E1000_FCRUC    0x04058  /* Flow Control Rx Unsupported Count- R/clr */
12373 +#define E1000_PRC64    0x0405C  /* Packets Rx (64 bytes) - R/clr */
12374 +#define E1000_PRC127   0x04060  /* Packets Rx (65-127 bytes) - R/clr */
12375 +#define E1000_PRC255   0x04064  /* Packets Rx (128-255 bytes) - R/clr */
12376 +#define E1000_PRC511   0x04068  /* Packets Rx (255-511 bytes) - R/clr */
12377 +#define E1000_PRC1023  0x0406C  /* Packets Rx (512-1023 bytes) - R/clr */
12378 +#define E1000_PRC1522  0x04070  /* Packets Rx (1024-1522 bytes) - R/clr */
12379 +#define E1000_GPRC     0x04074  /* Good Packets Rx Count - R/clr */
12380 +#define E1000_BPRC     0x04078  /* Broadcast Packets Rx Count - R/clr */
12381 +#define E1000_MPRC     0x0407C  /* Multicast Packets Rx Count - R/clr */
12382 +#define E1000_GPTC     0x04080  /* Good Packets Tx Count - R/clr */
12383 +#define E1000_GORCL    0x04088  /* Good Octets Rx Count Low - R/clr */
12384 +#define E1000_GORCH    0x0408C  /* Good Octets Rx Count High - R/clr */
12385 +#define E1000_GOTCL    0x04090  /* Good Octets Tx Count Low - R/clr */
12386 +#define E1000_GOTCH    0x04094  /* Good Octets Tx Count High - R/clr */
12387 +#define E1000_RNBC     0x040A0  /* Rx No Buffers Count - R/clr */
12388 +#define E1000_RUC      0x040A4  /* Rx Undersize Count - R/clr */
12389 +#define E1000_RFC      0x040A8  /* Rx Fragment Count - R/clr */
12390 +#define E1000_ROC      0x040AC  /* Rx Oversize Count - R/clr */
12391 +#define E1000_RJC      0x040B0  /* Rx Jabber Count - R/clr */
12392 +#define E1000_MGTPRC   0x040B4  /* Management Packets Rx Count - R/clr */
12393 +#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
12394 +#define E1000_MGTPTC   0x040BC  /* Management Packets Tx Count - R/clr */
12395 +#define E1000_TORL     0x040C0  /* Total Octets Rx Low - R/clr */
12396 +#define E1000_TORH     0x040C4  /* Total Octets Rx High - R/clr */
12397 +#define E1000_TOTL     0x040C8  /* Total Octets Tx Low - R/clr */
12398 +#define E1000_TOTH     0x040CC  /* Total Octets Tx High - R/clr */
12399 +#define E1000_TPR      0x040D0  /* Total Packets Rx - R/clr */
12400 +#define E1000_TPT      0x040D4  /* Total Packets Tx - R/clr */
12401 +#define E1000_PTC64    0x040D8  /* Packets Tx (64 bytes) - R/clr */
12402 +#define E1000_PTC127   0x040DC  /* Packets Tx (65-127 bytes) - R/clr */
12403 +#define E1000_PTC255   0x040E0  /* Packets Tx (128-255 bytes) - R/clr */
12404 +#define E1000_PTC511   0x040E4  /* Packets Tx (256-511 bytes) - R/clr */
12405 +#define E1000_PTC1023  0x040E8  /* Packets Tx (512-1023 bytes) - R/clr */
12406 +#define E1000_PTC1522  0x040EC  /* Packets Tx (1024-1522 Bytes) - R/clr */
12407 +#define E1000_MPTC     0x040F0  /* Multicast Packets Tx Count - R/clr */
12408 +#define E1000_BPTC     0x040F4  /* Broadcast Packets Tx Count - R/clr */
12409 +#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context Tx - R/clr */
12410 +#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context Tx Fail - R/clr */
12411 +#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
12412 +#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Pkt Timer Expire Count */
12413 +#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Abs Timer Expire Count */
12414 +#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Pkt Timer Expire Count */
12415 +#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Abs Timer Expire Count */
12416 +#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
12417 +#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Min Thresh Count */
12418 +#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Desc Min Thresh Count */
12419 +#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
12420 +
12421 +#define E1000_LSECTXUT        0x04300  /* LinkSec Tx Untagged Packet Count - OutPktsUntagged */
12422 +#define E1000_LSECTXPKTE      0x04304  /* LinkSec Encrypted Tx Packets Count - OutPktsEncrypted */
12423 +#define E1000_LSECTXPKTP      0x04308  /* LinkSec Protected Tx Packet Count - OutPktsProtected */
12424 +#define E1000_LSECTXOCTE      0x0430C  /* LinkSec Encrypted Tx Octets Count - OutOctetsEncrypted */
12425 +#define E1000_LSECTXOCTP      0x04310  /* LinkSec Protected Tx Octets Count - OutOctetsProtected */
12426 +#define E1000_LSECRXUT        0x04314  /* LinkSec Untagged non-Strict Rx Packet Count - InPktsUntagged/InPktsNoTag */
12427 +#define E1000_LSECRXOCTD      0x0431C  /* LinkSec Rx Octets Decrypted Count - InOctetsDecrypted */
12428 +#define E1000_LSECRXOCTV      0x04320  /* LinkSec Rx Octets Validated - InOctetsValidated */
12429 +#define E1000_LSECRXBAD       0x04324  /* LinkSec Rx Bad Tag - InPktsBadTag */
12430 +#define E1000_LSECRXNOSCI     0x04328  /* LinkSec Rx Packet No SCI Count - InPktsNoSci */
12431 +#define E1000_LSECRXUNSCI     0x0432C  /* LinkSec Rx Packet Unknown SCI Count - InPktsUnknownSci */
12432 +#define E1000_LSECRXUNCH      0x04330  /* LinkSec Rx Unchecked Packets Count - InPktsUnchecked */
12433 +#define E1000_LSECRXDELAY     0x04340  /* LinkSec Rx Delayed Packet Count - InPktsDelayed */
12434 +#define E1000_LSECRXLATE      0x04350  /* LinkSec Rx Late Packets Count - InPktsLate */
12435 +#define E1000_LSECRXOK(_n)    (0x04360 + (0x04 * (_n))) /* LinkSec Rx Packet OK Count - InPktsOk */
12436 +#define E1000_LSECRXINV(_n)   (0x04380 + (0x04 * (_n))) /* LinkSec Rx Invalid Count - InPktsInvalid */
12437 +#define E1000_LSECRXNV(_n)    (0x043A0 + (0x04 * (_n))) /* LinkSec Rx Not Valid Count - InPktsNotValid */
12438 +#define E1000_LSECRXUNSA      0x043C0  /* LinkSec Rx Unused SA Count - InPktsUnusedSa */
12439 +#define E1000_LSECRXNUSA      0x043D0  /* LinkSec Rx Not Using SA Count - InPktsNotUsingSa */
12440 +#define E1000_LSECTXCAP       0x0B000  /* LinkSec Tx Capabilities Register - RO */
12441 +#define E1000_LSECRXCAP       0x0B300  /* LinkSec Rx Capabilities Register - RO */
12442 +#define E1000_LSECTXCTRL      0x0B004  /* LinkSec Tx Control - RW */
12443 +#define E1000_LSECRXCTRL      0x0B304  /* LinkSec Rx Control - RW */
12444 +#define E1000_LSECTXSCL       0x0B008  /* LinkSec Tx SCI Low - RW */
12445 +#define E1000_LSECTXSCH       0x0B00C  /* LinkSec Tx SCI High - RW */
12446 +#define E1000_LSECTXSA        0x0B010  /* LinkSec Tx SA0 - RW */
12447 +#define E1000_LSECTXPN0       0x0B018  /* LinkSec Tx SA PN 0 - RW */
12448 +#define E1000_LSECTXPN1       0x0B01C  /* LinkSec Tx SA PN 1 - RW */
12449 +#define E1000_LSECRXSCL       0x0B3D0  /* LinkSec Rx SCI Low - RW */
12450 +#define E1000_LSECRXSCH       0x0B3E0  /* LinkSec Rx SCI High - RW */
12451 +#define E1000_LSECTXKEY0(_n)  (0x0B020 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 0 - WO */
12452 +#define E1000_LSECTXKEY1(_n)  (0x0B030 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 1 - WO */
12453 +#define E1000_LSECRXSA(_n)    (0x0B310 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
12454 +#define E1000_LSECRXPN(_n)    (0x0B330 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
12455 +/*
12456 + * LinkSec Rx Keys  - where _n is the SA no. and _m the 4 dwords of the 128 bit
12457 + * key - RW.
12458 + */
12459 +#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
12460 +
12461 +#define E1000_SSVPC             0x041A0  /* Switch Security Violation Packet Count */
12462 +#define E1000_IPSCTRL           0xB430   /* IpSec Control Register */
12463 +#define E1000_IPSRXCMD          0x0B408  /* IPSec Rx Command Register - RW */
12464 +#define E1000_IPSRXIDX          0x0B400  /* IPSec Rx Index - RW */
12465 +#define E1000_IPSRXIPADDR(_n)   (0x0B420+ (0x04 * (_n)))  /* IPSec Rx IPv4/v6 Address - RW */
12466 +#define E1000_IPSRXKEY(_n)      (0x0B410 + (0x04 * (_n))) /* IPSec Rx 128-bit Key - RW */
12467 +#define E1000_IPSRXSALT         0x0B404  /* IPSec Rx Salt - RW */
12468 +#define E1000_IPSRXSPI          0x0B40C  /* IPSec Rx SPI - RW */
12469 +#define E1000_IPSTXKEY(_n)      (0x0B460 + (0x04 * (_n))) /* IPSec Tx 128-bit Key - RW */
12470 +#define E1000_IPSTXSALT         0x0B454  /* IPSec Tx Salt - RW */
12471 +#define E1000_IPSTXIDX          0x0B450  /* IPSec Tx SA IDX - RW */
12472 +#define E1000_PCS_CFG0    0x04200  /* PCS Configuration 0 - RW */
12473 +#define E1000_PCS_LCTL    0x04208  /* PCS Link Control - RW */
12474 +#define E1000_PCS_LSTAT   0x0420C  /* PCS Link Status - RO */
12475 +#define E1000_CBTMPC      0x0402C  /* Circuit Breaker Tx Packet Count */
12476 +#define E1000_HTDPMC      0x0403C  /* Host Transmit Discarded Packets */
12477 +#define E1000_CBRDPC      0x04044  /* Circuit Breaker Rx Dropped Count */
12478 +#define E1000_CBRMPC      0x040FC  /* Circuit Breaker Rx Packet Count */
12479 +#define E1000_RPTHC       0x04104  /* Rx Packets To Host */
12480 +#define E1000_HGPTC       0x04118  /* Host Good Packets Tx Count */
12481 +#define E1000_HTCBDPC     0x04124  /* Host Tx Circuit Breaker Dropped Count */
12482 +#define E1000_HGORCL      0x04128  /* Host Good Octets Received Count Low */
12483 +#define E1000_HGORCH      0x0412C  /* Host Good Octets Received Count High */
12484 +#define E1000_HGOTCL      0x04130  /* Host Good Octets Transmit Count Low */
12485 +#define E1000_HGOTCH      0x04134  /* Host Good Octets Transmit Count High */
12486 +#define E1000_LENERRS     0x04138  /* Length Errors Count */
12487 +#define E1000_SCVPC       0x04228  /* SerDes/SGMII Code Violation Pkt Count */
12488 +#define E1000_HRMPC       0x0A018  /* Header Redirection Missed Packet Count */
12489 +#define E1000_PCS_ANADV   0x04218  /* AN advertisement - RW */
12490 +#define E1000_PCS_LPAB    0x0421C  /* Link Partner Ability - RW */
12491 +#define E1000_PCS_NPTX    0x04220  /* AN Next Page Transmit - RW */
12492 +#define E1000_PCS_LPABNP  0x04224  /* Link Partner Ability Next Page - RW */
12493 +#define E1000_1GSTAT_RCV  0x04228  /* 1GSTAT Code Violation Packet Count - RW */
12494 +#define E1000_RXCSUM   0x05000  /* Rx Checksum Control - RW */
12495 +#define E1000_RLPML    0x05004  /* Rx Long Packet Max Length */
12496 +#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
12497 +#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
12498 +#define E1000_RA       0x05400  /* Receive Address - RW Array */
12499 +#define E1000_RA2      0x054E0  /* 2nd half of receive address array - RW Array */
12500 +#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
12501 +#define E1000_VT_CTL   0x0581C  /* VMDq Control - RW */
12502 +#define E1000_VFQA0    0x0B000  /* VLAN Filter Queue Array 0 - RW Array */
12503 +#define E1000_VFQA1    0x0B200  /* VLAN Filter Queue Array 1 - RW Array */
12504 +#define E1000_WUC      0x05800  /* Wakeup Control - RW */
12505 +#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
12506 +#define E1000_WUS      0x05810  /* Wakeup Status - RO */
12507 +#define E1000_MANC     0x05820  /* Management Control - RW */
12508 +#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
12509 +#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
12510 +#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
12511 +#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
12512 +#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
12513 +#define E1000_PBACL    0x05B68  /* MSIx PBA Clear - Read/Write 1's to clear */
12514 +#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
12515 +#define E1000_HOST_IF  0x08800  /* Host Interface */
12516 +#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
12517 +#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
12518 +#define E1000_FHFT(_n)  (0x09000 + (_n * 0x100)) /* Flexible Host Filter Table */
12519 +#define E1000_FHFT_EXT(_n) (0x09A00 + (_n * 0x100)) /* Ext Flexible Host Filter Table */
12520 +
12521 +
12522 +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
12523 +#define E1000_MDPHYA      0x0003C /* PHY address - RW */
12524 +#define E1000_MANC2H      0x05860 /* Management Control To Host - RW */
12525 +#define E1000_SW_FW_SYNC  0x05B5C /* Software-Firmware Synchronization - RW */
12526 +#define E1000_CCMCTL      0x05B48 /* CCM Control Register */
12527 +#define E1000_GIOCTL      0x05B44 /* GIO Analog Control Register */
12528 +#define E1000_SCCTL       0x05B4C /* PCIc PLL Configuration Register */
12529 +#define E1000_GCR         0x05B00 /* PCI-Ex Control */
12530 +#define E1000_GCR2        0x05B64 /* PCI-Ex Control #2 */
12531 +#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
12532 +#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
12533 +#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
12534 +#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
12535 +#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
12536 +#define E1000_SWSM      0x05B50 /* SW Semaphore */
12537 +#define E1000_FWSM      0x05B54 /* FW Semaphore */
12538 +#define E1000_SWSM2     0x05B58 /* Driver-only SW semaphore (not used by BOOT agents) */
12539 +#define E1000_DCA_ID    0x05B70 /* DCA Requester ID Information - RO */
12540 +#define E1000_DCA_CTRL  0x05B74 /* DCA Control - RW */
12541 +#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
12542 +#define E1000_HICR      0x08F00 /* Host Interface Control */
12543 +
12544 +/* RSS registers */
12545 +#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
12546 +#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
12547 +#define E1000_IMIR(_i)      (0x05A80 + ((_i) * 4))  /* Immediate Interrupt */
12548 +#define E1000_IMIREXT(_i)   (0x05AA0 + ((_i) * 4))  /* Immediate Interrupt Ext*/
12549 +#define E1000_IMIRVP    0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */
12550 +#define E1000_MSIXBM(_i)    (0x01600 + ((_i) * 4)) /* MSI-X Allocation Register
12551 +                                                    * (_i) - RW */
12552 +#define E1000_MSIXTADD(_i)  (0x0C000 + ((_i) * 0x10)) /* MSI-X Table entry addr
12553 +                                                       * low reg - RW */
12554 +#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) /* MSI-X Table entry addr
12555 +                                                       * upper reg - RW */
12556 +#define E1000_MSIXTMSG(_i)  (0x0C008 + ((_i) * 0x10)) /* MSI-X Table entry
12557 +                                                       * message reg - RW */
12558 +#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) /* MSI-X Table entry
12559 +                                                       * vector ctrl reg - RW */
12560 +#define E1000_MSIXPBA    0x0E000 /* MSI-X Pending bit array */
12561 +#define E1000_RETA(_i)  (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
12562 +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
12563 +#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
12564 +#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
12565 +/* VT Registers */
12566 +#define E1000_SWPBS     0x03004 /* Switch Packet Buffer Size - RW */
12567 +#define E1000_MBVFICR   0x00C80 /* Mailbox VF Cause - RWC */
12568 +#define E1000_MBVFIMR   0x00C84 /* Mailbox VF int Mask - RW */
12569 +#define E1000_VFLRE     0x00C88 /* VF Register Events - RWC */
12570 +#define E1000_VFRE      0x00C8C /* VF Receive Enables */
12571 +#define E1000_VFTE      0x00C90 /* VF Transmit Enables */
12572 +#define E1000_QDE       0x02408 /* Queue Drop Enable - RW */
12573 +#define E1000_DTXSWC    0x03500 /* DMA Tx Switch Control - RW */
12574 +#define E1000_RPLOLR    0x05AF0 /* Replication Offload - RW */
12575 +#define E1000_UTA       0x0A000 /* Unicast Table Array - RW */
12576 +#define E1000_IOVTCL    0x05BBC /* IOV Control Register */
12577 +#define E1000_VMRCTL    0X05D80 /* Virtual Mirror Rule Control */
12578 +/* These act per VF so an array friendly macro is used */
12579 +#define E1000_V2PMAILBOX(_n)   (0x00C40 + (4 * (_n)))
12580 +#define E1000_P2VMAILBOX(_n)   (0x00C00 + (4 * (_n)))
12581 +#define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
12582 +#define E1000_VFVMBMEM(_n)     (0x00800 + (_n))
12583 +#define E1000_VMOLR(_n)        (0x05AD0 + (4 * (_n)))
12584 +#define E1000_VLVF(_n)         (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
12585 +                                                       * Filter - RW */
12586 +/* Time Sync */
12587 +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
12588 +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
12589 +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
12590 +#define E1000_RXSTMPL    0x0B624 /* Rx timestamp Low - RO */
12591 +#define E1000_RXSTMPH    0x0B628 /* Rx timestamp High - RO */
12592 +#define E1000_RXSATRL    0x0B62C /* Rx timestamp attribute low - RO */
12593 +#define E1000_RXSATRH    0x0B630 /* Rx timestamp attribute high - RO */
12594 +#define E1000_TXSTMPL    0x0B618 /* Tx timestamp value Low - RO */
12595 +#define E1000_TXSTMPH    0x0B61C /* Tx timestamp value High - RO */
12596 +#define E1000_SYSTIML    0x0B600 /* System time register Low - RO */
12597 +#define E1000_SYSTIMH    0x0B604 /* System time register High - RO */
12598 +#define E1000_TIMINCA    0x0B608 /* Increment attributes register - RW */
12599 +
12600 +/* Filtering Registers */
12601 +#define E1000_SAQF(_n)  (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
12602 +#define E1000_DAQF(_n)  (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
12603 +#define E1000_SPQF(_n)  (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
12604 +#define E1000_FTQF(_n)  (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
12605 +#define E1000_TTQF(_n)  (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
12606 +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
12607 +#define E1000_ETQF(_n)  (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
12608 +
12609 +#define E1000_RTTDCS            0x3600  /* Reedtown Tx Desc plane control and status */
12610 +#define E1000_RTTPCS            0x3474  /* Reedtown Tx Packet Plane control and status */
12611 +#define E1000_RTRPCS            0x2474  /* Rx packet plane control and status */
12612 +#define E1000_RTRUP2TC          0x05AC4 /* Rx User Priority to Traffic Class */
12613 +#define E1000_RTTUP2TC          0x0418  /* Transmit User Priority to Traffic Class */
12614 +#define E1000_RTTDTCRC(_n)      (0x3610 + ((_n) * 4)) /* Tx Desc plane TC Rate-scheduler config */
12615 +#define E1000_RTTPTCRC(_n)      (0x3480 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Config */
12616 +#define E1000_RTRPTCRC(_n)      (0x2480 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Config */
12617 +#define E1000_RTTDTCRS(_n)      (0x3630 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler Status */
12618 +#define E1000_RTTDTCRM(_n)      (0x3650 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler MMW */
12619 +#define E1000_RTTPTCRS(_n)      (0x34A0 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Status */
12620 +#define E1000_RTTPTCRM(_n)      (0x34C0 + ((_n) * 4)) /* Tx Packet plane TC Rate-scheduler MMW */
12621 +#define E1000_RTRPTCRS(_n)      (0x24A0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Status */
12622 +#define E1000_RTRPTCRM(_n)      (0x24C0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler MMW */
12623 +#define E1000_RTTDVMRM(_n)      (0x3670 + ((_n) * 4)) /* Tx Desc plane VM Rate-Scheduler MMW*/
12624 +#define E1000_RTTBCNRM(_n)      (0x3690 + ((_n) * 4)) /* Tx BCN Rate-Scheduler MMW */
12625 +#define E1000_RTTDQSEL          0x3604  /* Tx Desc Plane Queue Select */
12626 +#define E1000_RTTDVMRC          0x3608  /* Tx Desc Plane VM Rate-Scheduler Config */
12627 +#define E1000_RTTDVMRS          0x360C  /* Tx Desc Plane VM Rate-Scheduler Status */
12628 +#define E1000_RTTBCNRC          0x36B0  /* Tx BCN Rate-Scheduler Config */
12629 +#define E1000_RTTBCNRS          0x36B4  /* Tx BCN Rate-Scheduler Status */
12630 +#define E1000_RTTBCNCR          0xB200  /* Tx BCN Control Register */
12631 +#define E1000_RTTBCNTG          0x35A4  /* Tx BCN Tagging */
12632 +#define E1000_RTTBCNCP          0xB208  /* Tx BCN Congestion point */
12633 +#define E1000_RTRBCNCR          0xB20C  /* Rx BCN Control Register */
12634 +#define E1000_RTTBCNRD          0x36B8  /* Tx BCN Rate Drift */
12635 +#define E1000_PFCTOP            0x1080  /* Priority Flow Control Type and Opcode */
12636 +#define E1000_RTTBCNIDX         0xB204  /* Tx BCN Congestion Point */
12637 +#define E1000_RTTBCNACH         0x0B214 /* Tx BCN Control High */
12638 +#define E1000_RTTBCNACL         0x0B210 /* Tx BCN Control Low */
12639 +
12640 +#endif
12641 Index: linux-2.6.22/drivers/net/igb/igb.h
12642 ===================================================================
12643 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
12644 +++ linux-2.6.22/drivers/net/igb/igb.h  2009-12-18 12:39:22.000000000 -0500
12645 @@ -0,0 +1,444 @@
12646 +/*******************************************************************************
12647 +
12648 +  Intel(R) Gigabit Ethernet Linux driver
12649 +  Copyright(c) 2007-2009 Intel Corporation.
12650 +
12651 +  This program is free software; you can redistribute it and/or modify it
12652 +  under the terms and conditions of the GNU General Public License,
12653 +  version 2, as published by the Free Software Foundation.
12654 +
12655 +  This program is distributed in the hope it will be useful, but WITHOUT
12656 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12657 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12658 +  more details.
12659 +
12660 +  You should have received a copy of the GNU General Public License along with
12661 +  this program; if not, write to the Free Software Foundation, Inc.,
12662 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
12663 +
12664 +  The full GNU General Public License is included in this distribution in
12665 +  the file called "COPYING".
12666 +
12667 +  Contact Information:
12668 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
12669 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
12670 +
12671 +*******************************************************************************/
12672 +
12673 +
12674 +/* Linux PRO/1000 Ethernet Driver main header file */
12675 +
12676 +#ifndef _IGB_H_
12677 +#define _IGB_H_
12678 +
12679 +#include <linux/pci.h>
12680 +#include <linux/netdevice.h>
12681 +#include <linux/vmalloc.h>
12682 +
12683 +#ifdef SIOCETHTOOL
12684 +#include <linux/ethtool.h>
12685 +#endif
12686 +
12687 +#ifdef SIOCSHWTSTAMP
12688 +#include <linux/clocksource.h>
12689 +#include <linux/timecompare.h>
12690 +#include <linux/net_tstamp.h>
12691 +#endif
12692 +struct igb_adapter;
12693 +
12694 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
12695 +#define IGB_DCA
12696 +#endif
12697 +#ifdef IGB_DCA
12698 +#include <linux/dca.h>
12699 +#endif
12700 +
12701 +
12702 +#ifdef IGB_LRO
12703 +#undef IGB_LRO
12704 +#ifdef NETIF_F_LRO
12705 +#if defined(CONFIG_INET_LRO) || defined(CONFIG_INET_LRO_MODULE)
12706 +#include <linux/inet_lro.h>
12707 +#define MAX_LRO_DESCRIPTORS               8
12708 +#define IGB_LRO
12709 +#endif
12710 +#endif
12711 +#endif /* IGB_LRO */
12712 +
12713 +#include "kcompat.h"
12714 +
12715 +#include "e1000_api.h"
12716 +#include "e1000_82575.h"
12717 +
12718 +#define IGB_ERR(args...) printk(KERN_ERR "igb: " args)
12719 +
12720 +#define PFX "igb: "
12721 +#define DPRINTK(nlevel, klevel, fmt, args...) \
12722 +       (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
12723 +       printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
12724 +               __FUNCTION__ , ## args))
12725 +
12726 +/* Interrupt defines */
12727 +#define IGB_START_ITR                    648 /* ~6000 ints/sec */
12728 +
12729 +/* Interrupt modes, as used by the IntMode paramter */
12730 +#define IGB_INT_MODE_LEGACY                0
12731 +#define IGB_INT_MODE_MSI                   1
12732 +#define IGB_INT_MODE_MSIX                  2
12733 +
12734 +#define HW_PERF
12735 +/* TX/RX descriptor defines */
12736 +#define IGB_DEFAULT_TXD                  256
12737 +#define IGB_MIN_TXD                       80
12738 +#define IGB_MAX_TXD                     4096
12739 +
12740 +#define IGB_DEFAULT_RXD                  256
12741 +#define IGB_MIN_RXD                       80
12742 +#define IGB_MAX_RXD                     4096
12743 +
12744 +#define IGB_MIN_ITR_USECS                 10 /* 100k irq/sec */
12745 +#define IGB_MAX_ITR_USECS               8191 /* 120  irq/sec */
12746 +
12747 +#define NON_Q_VECTORS                      1
12748 +#define MAX_Q_VECTORS                      8
12749 +
12750 +/* Transmit and receive queues */
12751 +#define IGB_MAX_RX_QUEUES                  (adapter->vfs_allocated_count ? 2 : \
12752 +                                           (hw->mac.type > e1000_82575 ? 8 : 4))
12753 +#define IGB_ABS_MAX_TX_QUEUES              8
12754 +#define IGB_MAX_TX_QUEUES                  IGB_MAX_RX_QUEUES
12755 +
12756 +#define IGB_MAX_VF_MC_ENTRIES              30
12757 +#define IGB_MAX_VF_FUNCTIONS               8
12758 +#define IGB_MAX_VFTA_ENTRIES               128
12759 +#define IGB_MAX_UTA_ENTRIES                128
12760 +#define MAX_EMULATION_MAC_ADDRS            16
12761 +#define OUI_LEN                            3
12762 +
12763 +struct vf_data_storage {
12764 +       unsigned char vf_mac_addresses[ETH_ALEN];
12765 +       u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
12766 +       u16 num_vf_mc_hashes;
12767 +       u16 default_vf_vlan_id;
12768 +       u16 vlans_enabled;
12769 +       unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN];
12770 +       u32 uta_table_copy[IGB_MAX_UTA_ENTRIES];
12771 +       u32 flags;
12772 +       unsigned long last_nack;
12773 +};
12774 +
12775 +#define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
12776 +#define IGB_VF_FLAG_UNI_PROMISC    0x00000002 /* VF has unicast promisc */
12777 +#define IGB_VF_FLAG_MULTI_PROMISC  0x00000004 /* VF has multicast promisc */
12778 +
12779 +/* RX descriptor control thresholds.
12780 + * PTHRESH - MAC will consider prefetch if it has fewer than this number of
12781 + *           descriptors available in its onboard memory.
12782 + *           Setting this to 0 disables RX descriptor prefetch.
12783 + * HTHRESH - MAC will only prefetch if there are at least this many descriptors
12784 + *           available in host memory.
12785 + *           If PTHRESH is 0, this should also be 0.
12786 + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
12787 + *           descriptors until either it has this many to write back, or the
12788 + *           ITR timer expires.
12789 + */
12790 +#define IGB_RX_PTHRESH                    (hw->mac.type <= e1000_82576 ? 16 : 8)
12791 +#define IGB_RX_HTHRESH                     8
12792 +#define IGB_RX_WTHRESH                     1
12793 +#define IGB_TX_PTHRESH                     8
12794 +#define IGB_TX_HTHRESH                     1
12795 +#define IGB_TX_WTHRESH                     ((hw->mac.type == e1000_82576 && \
12796 +                                             adapter->msix_entries) ? 0 : 16) 
12797 +
12798 +/* this is the size past which hardware will drop packets when setting LPE=0 */
12799 +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
12800 +
12801 +/* Supported Rx Buffer Sizes */
12802 +#define IGB_RXBUFFER_128   128    /* Used for packet split */
12803 +#define IGB_RXBUFFER_256   256    /* Used for packet split */
12804 +#define IGB_RXBUFFER_512   512
12805 +#define IGB_RXBUFFER_1024  1024
12806 +#define IGB_RXBUFFER_2048  2048
12807 +#define IGB_RXBUFFER_4096  4096
12808 +#define IGB_RXBUFFER_8192  8192
12809 +#define IGB_RXBUFFER_16384 16384
12810 +
12811 +/* Packet Buffer allocations */
12812 +#define IGB_PBA_BYTES_SHIFT 0xA
12813 +#define IGB_TX_HEAD_ADDR_SHIFT 7
12814 +#define IGB_PBA_TX_MASK 0xFFFF0000
12815 +
12816 +#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */
12817 +
12818 +/* How many Tx Descriptors do we need to call netif_wake_queue ? */
12819 +#define IGB_TX_QUEUE_WAKE      32
12820 +/* How many Rx Buffers do we bundle into one write to the hardware ? */
12821 +#define IGB_RX_BUFFER_WRITE    16      /* Must be power of 2 */
12822 +
12823 +#define AUTO_ALL_MODES            0
12824 +#define IGB_EEPROM_APME         0x0400
12825 +
12826 +#ifndef IGB_MASTER_SLAVE
12827 +/* Switch to override PHY master/slave setting */
12828 +#define IGB_MASTER_SLAVE       e1000_ms_hw_default
12829 +#endif
12830 +
12831 +#define IGB_MNG_VLAN_NONE -1
12832 +
12833 +/* wrapper around a pointer to a socket buffer,
12834 + * so a DMA handle can be stored along with the buffer */
12835 +struct igb_buffer {
12836 +       struct sk_buff *skb;
12837 +       dma_addr_t dma;
12838 +       dma_addr_t page_dma;
12839 +       union {
12840 +               /* TX */
12841 +               struct {
12842 +                       unsigned long time_stamp;
12843 +                       u16 length;
12844 +                       u16 next_to_watch;
12845 +               };
12846 +
12847 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
12848 +               /* RX */
12849 +               struct {
12850 +                       unsigned long page_offset;
12851 +                       struct page *page;
12852 +               };
12853 +#endif
12854 +       };
12855 +};
12856 +
12857 +struct igb_queue_stats {
12858 +       u64 packets;
12859 +       u64 bytes;
12860 +};
12861 +
12862 +struct igb_q_vector {
12863 +       struct igb_adapter *adapter; /* backlink */
12864 +       struct igb_ring *rx_ring;
12865 +       struct igb_ring *tx_ring;
12866 +       struct napi_struct napi;
12867 +
12868 +       u32 eims_value;
12869 +       u16 cpu;
12870 +
12871 +       u16 itr_val;
12872 +       u8 set_itr;
12873 +       u8 itr_shift;
12874 +       void __iomem *itr_register;
12875 +
12876 +       char name[IFNAMSIZ + 9];
12877 +#ifndef HAVE_NETDEV_NAPI_LIST
12878 +       struct net_device poll_dev;
12879 +#endif
12880 +};
12881 +
12882 +struct igb_ring {
12883 +       struct igb_q_vector *q_vector; /* backlink to q_vector */
12884 +       struct pci_dev *pdev;          /* pci device for dma mapping */
12885 +       dma_addr_t dma;                /* phys address of the ring */
12886 +       void *desc;                    /* descriptor ring memory */
12887 +       unsigned int size;             /* length of desc. ring in bytes */
12888 +       u16 count;                     /* number of desc. in the ring */
12889 +       u16 next_to_use;
12890 +       u16 next_to_clean;
12891 +       u8 queue_index;
12892 +       u8 reg_idx;
12893 +       void __iomem *head;
12894 +       void __iomem *tail;
12895 +       struct igb_buffer *buffer_info; /* array of buffer info structs */
12896 +
12897 +       unsigned int total_bytes;
12898 +       unsigned int total_packets;
12899 +
12900 +       struct igb_queue_stats stats;
12901 +
12902 +       union {
12903 +               /* TX */
12904 +               struct {
12905 +                       unsigned int restart_queue;
12906 +                       u32 ctx_idx;
12907 +                       bool detect_tx_hung;
12908 +               };
12909 +               /* RX */
12910 +               struct {
12911 +                       u64 hw_csum_err;
12912 +                       u64 hw_csum_good;
12913 +                       u32 rx_buffer_len;
12914 +                       u16 rx_ps_hdr_size;
12915 +                       bool rx_csum;
12916 +#ifdef IGB_LRO
12917 +                       struct net_lro_mgr lro_mgr;
12918 +                       bool lro_used;
12919 +#endif
12920 +               };
12921 +       };
12922 +};
12923 +
12924 +
12925 +#define IGB_ADVTXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
12926 +
12927 +#define IGB_DESC_UNUSED(R) \
12928 +       ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
12929 +       (R)->next_to_clean - (R)->next_to_use - 1)
12930 +
12931 +#define E1000_RX_DESC_ADV(R, i)            \
12932 +       (&(((union e1000_adv_rx_desc *)((R).desc))[i]))
12933 +#define E1000_TX_DESC_ADV(R, i)            \
12934 +       (&(((union e1000_adv_tx_desc *)((R).desc))[i]))
12935 +#define E1000_TX_CTXTDESC_ADV(R, i)        \
12936 +       (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
12937 +#define E1000_GET_DESC(R, i, type)     (&(((struct type *)((R).desc))[i]))
12938 +#define E1000_TX_DESC(R, i)            E1000_GET_DESC(R, i, e1000_tx_desc)
12939 +#define E1000_RX_DESC(R, i)            E1000_GET_DESC(R, i, e1000_rx_desc)
12940 +
12941 +#define MAX_MSIX_COUNT 10
12942 +/* board specific private data structure */
12943 +
12944 +struct igb_adapter {
12945 +       struct timer_list watchdog_timer;
12946 +       struct timer_list phy_info_timer;
12947 +       struct vlan_group *vlgrp;
12948 +       u16 mng_vlan_id;
12949 +       u32 bd_number;
12950 +       u32 wol;
12951 +       u32 en_mng_pt;
12952 +       u16 link_speed;
12953 +       u16 link_duplex;
12954 +
12955 +       unsigned int total_tx_bytes;
12956 +       unsigned int total_tx_packets;
12957 +       unsigned int total_rx_bytes;
12958 +       unsigned int total_rx_packets;
12959 +       /* Interrupt Throttle Rate */
12960 +       u32 itr;
12961 +       u32 itr_setting;
12962 +       u16 tx_itr;
12963 +       u16 rx_itr;
12964 +
12965 +       struct work_struct reset_task;
12966 +       struct work_struct watchdog_task;
12967 +       bool fc_autoneg;
12968 +       u8  tx_timeout_factor;
12969 +#ifdef ETHTOOL_PHYS_ID
12970 +       struct timer_list blink_timer;
12971 +       unsigned long led_status;
12972 +#endif
12973 +
12974 +       /* TX */
12975 +       struct igb_ring *tx_ring;      /* One per active queue */
12976 +       unsigned int restart_queue;
12977 +       unsigned long tx_queue_len;
12978 +       u32 tx_timeout_count;
12979 +
12980 +       /* RX */
12981 +       struct igb_ring *rx_ring;      /* One per active queue */
12982 +       int num_tx_queues;
12983 +       int num_rx_queues;
12984 +
12985 +       u64 hw_csum_err;
12986 +       u64 hw_csum_good;
12987 +       u32 alloc_rx_buff_failed;
12988 +       u32 max_frame_size;
12989 +       u32 min_frame_size;
12990 +
12991 +       /* OS defined structs */
12992 +       struct net_device *netdev;
12993 +       struct pci_dev *pdev;
12994 +       struct net_device_stats net_stats;
12995 +#ifdef SIOCSHWTSTAMP
12996 +       struct cyclecounter cycles;
12997 +       struct timecounter clock;
12998 +       struct timecompare compare;
12999 +       struct hwtstamp_config hwtstamp_config;
13000 +#endif
13001 +
13002 +       /* structs defined in e1000_hw.h */
13003 +       struct e1000_hw hw;
13004 +       struct e1000_hw_stats stats;
13005 +       struct e1000_phy_info phy_info;
13006 +       struct e1000_phy_stats phy_stats;
13007 +
13008 +#ifdef ETHTOOL_TEST
13009 +       u32 test_icr;
13010 +       struct igb_ring test_tx_ring;
13011 +       struct igb_ring test_rx_ring;
13012 +#endif
13013 +
13014 +
13015 +       int msg_enable;
13016 +       struct msix_entry *msix_entries;
13017 +       int int_mode;
13018 +       u32 eims_enable_mask;
13019 +       u32 eims_other;
13020 +       u32 lli_port;
13021 +       u32 lli_size;
13022 +       unsigned long state;
13023 +       unsigned int flags;
13024 +       u32 eeprom_wol;
13025 +       u32 *config_space;
13026 +#ifdef HAVE_TX_MQ
13027 +       struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
13028 +#endif /* HAVE_TX_MQ */
13029 +#ifdef IGB_LRO
13030 +       unsigned int lro_max_aggr;
13031 +       unsigned int lro_aggregated;
13032 +       unsigned int lro_flushed;
13033 +       unsigned int lro_no_desc;
13034 +#endif
13035 +       u16 tx_ring_count;
13036 +       u16 rx_ring_count;
13037 +       unsigned int vfs_allocated_count;
13038 +       struct vf_data_storage *vf_data;
13039 +       u32 RSS_queues;
13040 +       u32 VMDQ_queues;
13041 +       unsigned int num_q_vectors;
13042 +       struct igb_q_vector *q_vector[MAX_Q_VECTORS];
13043 +};
13044 +
13045 +
13046 +#define IGB_FLAG_HAS_MSI           (1 << 0)
13047 +#define IGB_FLAG_MSI_ENABLE        (1 << 1)
13048 +#define IGB_FLAG_DCA_ENABLED       (1 << 3)
13049 +#define IGB_FLAG_LLI_PUSH          (1 << 4)
13050 +#define IGB_FLAG_IN_NETPOLL        (1 << 5)
13051 +#define IGB_FLAG_QUAD_PORT_A       (1 << 6)
13052 +#define IGB_FLAG_QUEUE_PAIRS       (1 << 7)
13053 +
13054 +#define IGB_82576_TSYNC_SHIFT 19
13055 +enum e1000_state_t {
13056 +       __IGB_TESTING,
13057 +       __IGB_RESETTING,
13058 +       __IGB_DOWN
13059 +};
13060 +
13061 +extern char igb_driver_name[];
13062 +extern char igb_driver_version[];
13063 +
13064 +extern int igb_up(struct igb_adapter *);
13065 +extern void igb_down(struct igb_adapter *);
13066 +extern void igb_reinit_locked(struct igb_adapter *);
13067 +extern void igb_reset(struct igb_adapter *);
13068 +extern int igb_set_spd_dplx(struct igb_adapter *, u16);
13069 +extern int igb_setup_tx_resources(struct igb_ring *);
13070 +extern int igb_setup_rx_resources(struct igb_ring *);
13071 +extern void igb_free_tx_resources(struct igb_ring *);
13072 +extern void igb_free_rx_resources(struct igb_ring *);
13073 +extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
13074 +extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
13075 +extern void igb_setup_tctl(struct igb_adapter *);
13076 +extern void igb_setup_rctl(struct igb_adapter *);
13077 +extern int igb_alloc_rx_buffers_adv(struct igb_ring *, int);
13078 +extern void igb_update_stats(struct igb_adapter *);
13079 +extern void igb_set_ethtool_ops(struct net_device *);
13080 +extern void igb_check_options(struct igb_adapter *);
13081 +#ifdef ETHTOOL_OPS_COMPAT
13082 +extern int ethtool_ioctl(struct ifreq *);
13083 +#endif
13084 +extern int igb_set_vf_mac(struct igb_adapter *adapter,
13085 +                          int vf, unsigned char *mac_addr);
13086 +extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32);
13087 +extern void igb_configure_vt_default_pool(struct igb_adapter *adapter);
13088 +
13089 +#endif /* _IGB_H_ */
13090 Index: linux-2.6.22/drivers/net/igb/igb_ethtool.c
13091 ===================================================================
13092 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
13093 +++ linux-2.6.22/drivers/net/igb/igb_ethtool.c  2009-12-18 12:39:22.000000000 -0500
13094 @@ -0,0 +1,1953 @@
13095 +/*******************************************************************************
13096 +
13097 +  Intel(R) Gigabit Ethernet Linux driver
13098 +  Copyright(c) 2007-2009 Intel Corporation.
13099 +
13100 +  This program is free software; you can redistribute it and/or modify it
13101 +  under the terms and conditions of the GNU General Public License,
13102 +  version 2, as published by the Free Software Foundation.
13103 +
13104 +  This program is distributed in the hope it will be useful, but WITHOUT
13105 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13106 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13107 +  more details.
13108 +
13109 +  You should have received a copy of the GNU General Public License along with
13110 +  this program; if not, write to the Free Software Foundation, Inc.,
13111 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
13112 +
13113 +  The full GNU General Public License is included in this distribution in
13114 +  the file called "COPYING".
13115 +
13116 +  Contact Information:
13117 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
13118 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
13119 +
13120 +*******************************************************************************/
13121 +
13122 +/* ethtool support for igb */
13123 +
13124 +#include <linux/netdevice.h>
13125 +#include <linux/vmalloc.h>
13126 +
13127 +#ifdef SIOCETHTOOL
13128 +#include <linux/ethtool.h>
13129 +
13130 +#include "igb.h"
13131 +#include "igb_regtest.h"
13132 +#include <linux/if_vlan.h>
13133 +
13134 +#ifdef ETHTOOL_OPS_COMPAT
13135 +#include "kcompat_ethtool.c"
13136 +#endif
13137 +
13138 +#ifdef ETHTOOL_GSTATS
13139 +struct igb_stats {
13140 +       char stat_string[ETH_GSTRING_LEN];
13141 +       int sizeof_stat;
13142 +       int stat_offset;
13143 +};
13144 +
13145 +#define IGB_STAT(m) sizeof(((struct igb_adapter *)0)->m), \
13146 +                     offsetof(struct igb_adapter, m)
13147 +static const struct igb_stats igb_gstrings_stats[] = {
13148 +       { "rx_packets", IGB_STAT(stats.gprc) },
13149 +       { "tx_packets", IGB_STAT(stats.gptc) },
13150 +       { "rx_bytes", IGB_STAT(stats.gorc) },
13151 +       { "tx_bytes", IGB_STAT(stats.gotc) },
13152 +       { "rx_broadcast", IGB_STAT(stats.bprc) },
13153 +       { "tx_broadcast", IGB_STAT(stats.bptc) },
13154 +       { "rx_multicast", IGB_STAT(stats.mprc) },
13155 +       { "tx_multicast", IGB_STAT(stats.mptc) },
13156 +       { "rx_errors", IGB_STAT(net_stats.rx_errors) },
13157 +       { "tx_errors", IGB_STAT(net_stats.tx_errors) },
13158 +       { "tx_dropped", IGB_STAT(net_stats.tx_dropped) },
13159 +       { "multicast", IGB_STAT(stats.mprc) },
13160 +       { "collisions", IGB_STAT(stats.colc) },
13161 +       { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) },
13162 +       { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) },
13163 +       { "rx_crc_errors", IGB_STAT(stats.crcerrs) },
13164 +       { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) },
13165 +       { "rx_no_buffer_count", IGB_STAT(stats.rnbc) },
13166 +       { "rx_missed_errors", IGB_STAT(stats.mpc) },
13167 +       { "tx_aborted_errors", IGB_STAT(stats.ecol) },
13168 +       { "tx_carrier_errors", IGB_STAT(stats.tncrs) },
13169 +       { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) },
13170 +       { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) },
13171 +       { "tx_window_errors", IGB_STAT(stats.latecol) },
13172 +       { "tx_abort_late_coll", IGB_STAT(stats.latecol) },
13173 +       { "tx_deferred_ok", IGB_STAT(stats.dc) },
13174 +       { "tx_single_coll_ok", IGB_STAT(stats.scc) },
13175 +       { "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
13176 +       { "tx_timeout_count", IGB_STAT(tx_timeout_count) },
13177 +       { "tx_restart_queue", IGB_STAT(restart_queue) },
13178 +       { "rx_long_length_errors", IGB_STAT(stats.roc) },
13179 +       { "rx_short_length_errors", IGB_STAT(stats.ruc) },
13180 +       { "rx_align_errors", IGB_STAT(stats.algnerrc) },
13181 +       { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) },
13182 +       { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) },
13183 +       { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) },
13184 +       { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) },
13185 +       { "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
13186 +       { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
13187 +       { "rx_long_byte_count", IGB_STAT(stats.gorc) },
13188 +       { "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
13189 +       { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
13190 +       { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
13191 +       { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
13192 +       { "tx_smbus", IGB_STAT(stats.mgptc) },
13193 +       { "rx_smbus", IGB_STAT(stats.mgprc) },
13194 +       { "dropped_smbus", IGB_STAT(stats.mgpdc) },
13195 +#ifdef IGB_LRO
13196 +       { "lro_aggregated", IGB_STAT(lro_aggregated) },
13197 +       { "lro_flushed", IGB_STAT(lro_flushed) },
13198 +       { "lro_no_desc", IGB_STAT(lro_no_desc) },
13199 +#endif
13200 +};
13201 +
13202 +#define IGB_QUEUE_STATS_LEN \
13203 +        ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues + \
13204 +         ((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
13205 +       (sizeof(struct igb_queue_stats) / sizeof(u64)))
13206 +#define IGB_GLOBAL_STATS_LEN   \
13207 +       (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
13208 +#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
13209 +#endif /* ETHTOOL_GSTATS */
13210 +#ifdef ETHTOOL_TEST
13211 +static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
13212 +       "Register test  (offline)", "Eeprom test    (offline)",
13213 +       "Interrupt test (offline)", "Loopback test  (offline)",
13214 +       "Link test   (on/offline)"
13215 +};
13216 +#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
13217 +#endif /* ETHTOOL_TEST */
13218 +
13219 +static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
13220 +{
13221 +       struct igb_adapter *adapter = netdev_priv(netdev);
13222 +       struct e1000_hw *hw = &adapter->hw;
13223 +       u32 status;
13224 +
13225 +       if (hw->phy.media_type == e1000_media_type_copper) {
13226 +
13227 +               ecmd->supported = (SUPPORTED_10baseT_Half |
13228 +                                  SUPPORTED_10baseT_Full |
13229 +                                  SUPPORTED_100baseT_Half |
13230 +                                  SUPPORTED_100baseT_Full |
13231 +                                  SUPPORTED_1000baseT_Full|
13232 +                                  SUPPORTED_Autoneg |
13233 +                                  SUPPORTED_TP);
13234 +               ecmd->advertising = ADVERTISED_TP;
13235 +
13236 +               if (hw->mac.autoneg == 1) {
13237 +                       ecmd->advertising |= ADVERTISED_Autoneg;
13238 +                       /* the e1000 autoneg seems to match ethtool nicely */
13239 +                       ecmd->advertising |= hw->phy.autoneg_advertised;
13240 +               }
13241 +
13242 +               ecmd->port = PORT_TP;
13243 +               ecmd->phy_address = hw->phy.addr;
13244 +       } else {
13245 +               ecmd->supported   = (SUPPORTED_1000baseT_Full |
13246 +                                    SUPPORTED_FIBRE |
13247 +                                    SUPPORTED_Autoneg);
13248 +
13249 +               ecmd->advertising = (ADVERTISED_1000baseT_Full |
13250 +                                    ADVERTISED_FIBRE |
13251 +                                    ADVERTISED_Autoneg);
13252 +
13253 +               ecmd->port = PORT_FIBRE;
13254 +       }
13255 +
13256 +       ecmd->transceiver = XCVR_INTERNAL;
13257 +
13258 +       status = E1000_READ_REG(hw, E1000_STATUS);
13259 +
13260 +       if (status & E1000_STATUS_LU) {
13261 +
13262 +               if ((status & E1000_STATUS_SPEED_1000) ||
13263 +                   hw->phy.media_type != e1000_media_type_copper)
13264 +                       ecmd->speed = SPEED_1000;
13265 +               else if (status & E1000_STATUS_SPEED_100)
13266 +                       ecmd->speed = SPEED_100;
13267 +               else
13268 +                       ecmd->speed = SPEED_10;
13269 +
13270 +               if ((status & E1000_STATUS_FD) ||
13271 +                   hw->phy.media_type != e1000_media_type_copper)
13272 +                       ecmd->duplex = DUPLEX_FULL;
13273 +               else
13274 +                       ecmd->duplex = DUPLEX_HALF;
13275 +       } else {
13276 +               ecmd->speed = -1;
13277 +               ecmd->duplex = -1;
13278 +       }
13279 +
13280 +       ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
13281 +       return 0;
13282 +}
13283 +
13284 +static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
13285 +{
13286 +       struct igb_adapter *adapter = netdev_priv(netdev);
13287 +       struct e1000_hw *hw = &adapter->hw;
13288 +
13289 +       /* When SoL/IDER sessions are active, autoneg/speed/duplex
13290 +        * cannot be changed */
13291 +       if (e1000_check_reset_block(hw)) {
13292 +               DPRINTK(DRV, ERR, "Cannot change link characteristics "
13293 +                       "when SoL/IDER is active.\n");
13294 +               return -EINVAL;
13295 +       }
13296 +
13297 +       while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
13298 +               msleep(1);
13299 +
13300 +       if (ecmd->autoneg == AUTONEG_ENABLE) {
13301 +               hw->mac.autoneg = 1;
13302 +               hw->phy.autoneg_advertised = ecmd->advertising |
13303 +                                            ADVERTISED_TP |
13304 +                                            ADVERTISED_Autoneg;
13305 +               ecmd->advertising = hw->phy.autoneg_advertised;
13306 +               if (adapter->fc_autoneg)
13307 +                       hw->fc.requested_mode = e1000_fc_default;
13308 +       } else {
13309 +               if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
13310 +                       clear_bit(__IGB_RESETTING, &adapter->state);
13311 +                       return -EINVAL;
13312 +               }
13313 +       }
13314 +
13315 +       /* reset the link */
13316 +       if (netif_running(adapter->netdev)) {
13317 +               igb_down(adapter);
13318 +               igb_up(adapter);
13319 +       } else
13320 +               igb_reset(adapter);
13321 +
13322 +       clear_bit(__IGB_RESETTING, &adapter->state);
13323 +       return 0;
13324 +}
13325 +
13326 +static void igb_get_pauseparam(struct net_device *netdev,
13327 +                               struct ethtool_pauseparam *pause)
13328 +{
13329 +       struct igb_adapter *adapter = netdev_priv(netdev);
13330 +       struct e1000_hw *hw = &adapter->hw;
13331 +
13332 +       pause->autoneg =
13333 +               (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
13334 +
13335 +       if (hw->fc.current_mode == e1000_fc_rx_pause)
13336 +               pause->rx_pause = 1;
13337 +       else if (hw->fc.current_mode == e1000_fc_tx_pause)
13338 +               pause->tx_pause = 1;
13339 +       else if (hw->fc.current_mode == e1000_fc_full) {
13340 +               pause->rx_pause = 1;
13341 +               pause->tx_pause = 1;
13342 +       }
13343 +}
13344 +
13345 +static int igb_set_pauseparam(struct net_device *netdev,
13346 +                              struct ethtool_pauseparam *pause)
13347 +{
13348 +       struct igb_adapter *adapter = netdev_priv(netdev);
13349 +       struct e1000_hw *hw = &adapter->hw;
13350 +       int retval = 0;
13351 +
13352 +       adapter->fc_autoneg = pause->autoneg;
13353 +
13354 +       while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
13355 +               msleep(1);
13356 +
13357 +       if (adapter->fc_autoneg == AUTONEG_ENABLE) {
13358 +               hw->fc.requested_mode = e1000_fc_default;
13359 +               if (netif_running(adapter->netdev)) {
13360 +                       igb_down(adapter);
13361 +                       igb_up(adapter);
13362 +               } else {
13363 +                       igb_reset(adapter);
13364 +               }
13365 +       } else {
13366 +               if (pause->rx_pause && pause->tx_pause)
13367 +                       hw->fc.requested_mode = e1000_fc_full;
13368 +               else if (pause->rx_pause && !pause->tx_pause)
13369 +                       hw->fc.requested_mode = e1000_fc_rx_pause;
13370 +               else if (!pause->rx_pause && pause->tx_pause)
13371 +                       hw->fc.requested_mode = e1000_fc_tx_pause;
13372 +               else if (!pause->rx_pause && !pause->tx_pause)
13373 +                       hw->fc.requested_mode = e1000_fc_none;
13374 +
13375 +               hw->fc.current_mode = hw->fc.requested_mode;
13376 +
13377 +               retval = ((hw->phy.media_type == e1000_media_type_copper) ?
13378 +                         e1000_force_mac_fc(hw) : hw->mac.ops.setup_link(hw));
13379 +       }
13380 +
13381 +       clear_bit(__IGB_RESETTING, &adapter->state);
13382 +       return retval;
13383 +}
13384 +
13385 +static u32 igb_get_rx_csum(struct net_device *netdev)
13386 +{
13387 +       struct igb_adapter *adapter = netdev_priv(netdev);
13388 +       return adapter->rx_ring[0].rx_csum;
13389 +}
13390 +
13391 +static int igb_set_rx_csum(struct net_device *netdev, u32 data)
13392 +{
13393 +       struct igb_adapter *adapter = netdev_priv(netdev);
13394 +       int i;
13395 +
13396 +       for (i = 0; i < adapter->num_rx_queues; i++)
13397 +               adapter->rx_ring[i].rx_csum = !!data;
13398 +
13399 +       return 0;
13400 +}
13401 +
13402 +static u32 igb_get_tx_csum(struct net_device *netdev)
13403 +{
13404 +       return (netdev->features & NETIF_F_IP_CSUM) != 0;
13405 +}
13406 +
13407 +static int igb_set_tx_csum(struct net_device *netdev, u32 data)
13408 +{
13409 +       struct igb_adapter *adapter = netdev_priv(netdev);
13410 +
13411 +       if (data) {
13412 +#ifdef NETIF_F_IPV6_CSUM
13413 +               netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
13414 +               if (adapter->hw.mac.type >= e1000_82576)
13415 +                       netdev->features |= NETIF_F_SCTP_CSUM;
13416 +       } else {
13417 +               netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13418 +                                     NETIF_F_SCTP_CSUM);
13419 +#else
13420 +               netdev->features |= NETIF_F_IP_CSUM;
13421 +               if (adapter->hw.mac.type == e1000_82576)
13422 +                       netdev->features |= NETIF_F_SCTP_CSUM;
13423 +       } else {
13424 +               netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CSUM);
13425 +#endif
13426 +       }
13427 +
13428 +       return 0;
13429 +}
13430 +
13431 +#ifdef NETIF_F_TSO
13432 +static int igb_set_tso(struct net_device *netdev, u32 data)
13433 +{
13434 +       struct igb_adapter *adapter = netdev_priv(netdev);
13435 +       int i;
13436 +       struct net_device *v_netdev;
13437 +
13438 +       if (data) {
13439 +               netdev->features |= NETIF_F_TSO;
13440 +#ifdef NETIF_F_TSO6
13441 +               netdev->features |= NETIF_F_TSO6;
13442 +#endif
13443 +       } else {
13444 +               netdev->features &= ~NETIF_F_TSO;
13445 +#ifdef NETIF_F_TSO6
13446 +               netdev->features &= ~NETIF_F_TSO6;
13447 +#endif
13448 +               /* disable TSO on all VLANs if they're present */
13449 +               if (!adapter->vlgrp)
13450 +                       goto tso_out;
13451 +               for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
13452 +                       v_netdev = vlan_group_get_device(adapter->vlgrp, i);
13453 +                       if (!v_netdev)
13454 +                               continue;
13455 +
13456 +                       v_netdev->features &= ~NETIF_F_TSO;
13457 +#ifdef NETIF_F_TSO6
13458 +                       v_netdev->features &= ~NETIF_F_TSO6;
13459 +#endif
13460 +                       vlan_group_set_device(adapter->vlgrp, i, v_netdev);
13461 +               }
13462 +       }
13463 +
13464 +tso_out:
13465 +       DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
13466 +       return 0;
13467 +}
13468 +#endif /* NETIF_F_TSO */
13469 +
13470 +static u32 igb_get_msglevel(struct net_device *netdev)
13471 +{
13472 +       struct igb_adapter *adapter = netdev_priv(netdev);
13473 +       return adapter->msg_enable;
13474 +}
13475 +
13476 +static void igb_set_msglevel(struct net_device *netdev, u32 data)
13477 +{
13478 +       struct igb_adapter *adapter = netdev_priv(netdev);
13479 +       adapter->msg_enable = data;
13480 +}
13481 +
13482 +static int igb_get_regs_len(struct net_device *netdev)
13483 +{
13484 +#define IGB_REGS_LEN 551
13485 +       return IGB_REGS_LEN * sizeof(u32);
13486 +}
13487 +
13488 +static void igb_get_regs(struct net_device *netdev,
13489 +                        struct ethtool_regs *regs, void *p)
13490 +{
13491 +       struct igb_adapter *adapter = netdev_priv(netdev);
13492 +       struct e1000_hw *hw = &adapter->hw;
13493 +       u32 *regs_buff = p;
13494 +       u8 i;
13495 +
13496 +       memset(p, 0, IGB_REGS_LEN * sizeof(u32));
13497 +
13498 +       regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
13499 +
13500 +       /* General Registers */
13501 +       regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
13502 +       regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
13503 +       regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
13504 +       regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
13505 +       regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
13506 +       regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
13507 +       regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
13508 +       regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
13509 +       regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
13510 +       regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
13511 +       regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
13512 +       regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
13513 +
13514 +       /* NVM Register */
13515 +       regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
13516 +
13517 +       /* Interrupt */
13518 +       /* Reading EICS for EICR because they read the
13519 +        * same but EICS does not clear on read */
13520 +       regs_buff[13] = E1000_READ_REG(hw, E1000_EICS);
13521 +       regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
13522 +       regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
13523 +       regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
13524 +       regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
13525 +       regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
13526 +       /* Reading ICS for ICR because they read the
13527 +        * same but ICS does not clear on read */
13528 +       regs_buff[19] = E1000_READ_REG(hw, E1000_ICS);
13529 +       regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
13530 +       regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
13531 +       regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
13532 +       regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
13533 +       regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
13534 +       regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
13535 +
13536 +       /* Flow Control */
13537 +       regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
13538 +       regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
13539 +       regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
13540 +       regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
13541 +       regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
13542 +       regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
13543 +
13544 +       /* Receive */
13545 +       regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
13546 +       regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
13547 +       regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
13548 +       regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
13549 +       regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
13550 +       regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL);
13551 +
13552 +       /* Transmit */
13553 +       regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
13554 +       regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
13555 +       regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
13556 +       regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
13557 +
13558 +       /* Wake Up */
13559 +       regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
13560 +       regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
13561 +       regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
13562 +       regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
13563 +       regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
13564 +
13565 +       /* MAC */
13566 +       regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
13567 +       regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
13568 +       regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
13569 +       regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
13570 +       regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
13571 +       regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
13572 +       regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
13573 +
13574 +       /* Statistics */
13575 +       regs_buff[54] = adapter->stats.crcerrs;
13576 +       regs_buff[55] = adapter->stats.algnerrc;
13577 +       regs_buff[56] = adapter->stats.symerrs;
13578 +       regs_buff[57] = adapter->stats.rxerrc;
13579 +       regs_buff[58] = adapter->stats.mpc;
13580 +       regs_buff[59] = adapter->stats.scc;
13581 +       regs_buff[60] = adapter->stats.ecol;
13582 +       regs_buff[61] = adapter->stats.mcc;
13583 +       regs_buff[62] = adapter->stats.latecol;
13584 +       regs_buff[63] = adapter->stats.colc;
13585 +       regs_buff[64] = adapter->stats.dc;
13586 +       regs_buff[65] = adapter->stats.tncrs;
13587 +       regs_buff[66] = adapter->stats.sec;
13588 +       regs_buff[67] = adapter->stats.htdpmc;
13589 +       regs_buff[68] = adapter->stats.rlec;
13590 +       regs_buff[69] = adapter->stats.xonrxc;
13591 +       regs_buff[70] = adapter->stats.xontxc;
13592 +       regs_buff[71] = adapter->stats.xoffrxc;
13593 +       regs_buff[72] = adapter->stats.xofftxc;
13594 +       regs_buff[73] = adapter->stats.fcruc;
13595 +       regs_buff[74] = adapter->stats.prc64;
13596 +       regs_buff[75] = adapter->stats.prc127;
13597 +       regs_buff[76] = adapter->stats.prc255;
13598 +       regs_buff[77] = adapter->stats.prc511;
13599 +       regs_buff[78] = adapter->stats.prc1023;
13600 +       regs_buff[79] = adapter->stats.prc1522;
13601 +       regs_buff[80] = adapter->stats.gprc;
13602 +       regs_buff[81] = adapter->stats.bprc;
13603 +       regs_buff[82] = adapter->stats.mprc;
13604 +       regs_buff[83] = adapter->stats.gptc;
13605 +       regs_buff[84] = adapter->stats.gorc;
13606 +       regs_buff[86] = adapter->stats.gotc;
13607 +       regs_buff[88] = adapter->stats.rnbc;
13608 +       regs_buff[89] = adapter->stats.ruc;
13609 +       regs_buff[90] = adapter->stats.rfc;
13610 +       regs_buff[91] = adapter->stats.roc;
13611 +       regs_buff[92] = adapter->stats.rjc;
13612 +       regs_buff[93] = adapter->stats.mgprc;
13613 +       regs_buff[94] = adapter->stats.mgpdc;
13614 +       regs_buff[95] = adapter->stats.mgptc;
13615 +       regs_buff[96] = adapter->stats.tor;
13616 +       regs_buff[98] = adapter->stats.tot;
13617 +       regs_buff[100] = adapter->stats.tpr;
13618 +       regs_buff[101] = adapter->stats.tpt;
13619 +       regs_buff[102] = adapter->stats.ptc64;
13620 +       regs_buff[103] = adapter->stats.ptc127;
13621 +       regs_buff[104] = adapter->stats.ptc255;
13622 +       regs_buff[105] = adapter->stats.ptc511;
13623 +       regs_buff[106] = adapter->stats.ptc1023;
13624 +       regs_buff[107] = adapter->stats.ptc1522;
13625 +       regs_buff[108] = adapter->stats.mptc;
13626 +       regs_buff[109] = adapter->stats.bptc;
13627 +       regs_buff[110] = adapter->stats.tsctc;
13628 +       regs_buff[111] = adapter->stats.iac;
13629 +       regs_buff[112] = adapter->stats.rpthc;
13630 +       regs_buff[113] = adapter->stats.hgptc;
13631 +       regs_buff[114] = adapter->stats.hgorc;
13632 +       regs_buff[116] = adapter->stats.hgotc;
13633 +       regs_buff[118] = adapter->stats.lenerrs;
13634 +       regs_buff[119] = adapter->stats.scvpc;
13635 +       regs_buff[120] = adapter->stats.hrmpc;
13636 +
13637 +       for (i = 0; i < 4; i++)
13638 +               regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
13639 +       for (i = 0; i < 4; i++)
13640 +               regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i));
13641 +       for (i = 0; i < 4; i++)
13642 +               regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
13643 +       for (i = 0; i < 4; i++)
13644 +               regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
13645 +       for (i = 0; i < 4; i++)
13646 +               regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
13647 +       for (i = 0; i < 4; i++)
13648 +               regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
13649 +       for (i = 0; i < 4; i++)
13650 +               regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
13651 +       for (i = 0; i < 4; i++)
13652 +               regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
13653 +
13654 +       for (i = 0; i < 10; i++)
13655 +               regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
13656 +       for (i = 0; i < 8; i++)
13657 +               regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
13658 +       for (i = 0; i < 8; i++)
13659 +               regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
13660 +       for (i = 0; i < 16; i++)
13661 +               regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
13662 +       for (i = 0; i < 16; i++)
13663 +               regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
13664 +
13665 +       for (i = 0; i < 4; i++)
13666 +               regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
13667 +       for (i = 0; i < 4; i++)
13668 +               regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
13669 +       for (i = 0; i < 4; i++)
13670 +               regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
13671 +       for (i = 0; i < 4; i++)
13672 +               regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
13673 +       for (i = 0; i < 4; i++)
13674 +               regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
13675 +       for (i = 0; i < 4; i++)
13676 +               regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
13677 +       for (i = 0; i < 4; i++)
13678 +               regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
13679 +       for (i = 0; i < 4; i++)
13680 +               regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
13681 +       for (i = 0; i < 4; i++)
13682 +               regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
13683 +
13684 +       for (i = 0; i < 4; i++)
13685 +               regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
13686 +       for (i = 0; i < 4; i++)
13687 +               regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
13688 +       for (i = 0; i < 32; i++)
13689 +               regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
13690 +       for (i = 0; i < 128; i++)
13691 +               regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
13692 +       for (i = 0; i < 128; i++)
13693 +               regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
13694 +       for (i = 0; i < 4; i++)
13695 +               regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
13696 +
13697 +       regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
13698 +       regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
13699 +       regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
13700 +       regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
13701 +
13702 +}
13703 +
13704 +static int igb_get_eeprom_len(struct net_device *netdev)
13705 +{
13706 +       struct igb_adapter *adapter = netdev_priv(netdev);
13707 +       return adapter->hw.nvm.word_size * 2;
13708 +}
13709 +
13710 +static int igb_get_eeprom(struct net_device *netdev,
13711 +                          struct ethtool_eeprom *eeprom, u8 *bytes)
13712 +{
13713 +       struct igb_adapter *adapter = netdev_priv(netdev);
13714 +       struct e1000_hw *hw = &adapter->hw;
13715 +       u16 *eeprom_buff;
13716 +       int first_word, last_word;
13717 +       int ret_val = 0;
13718 +       u16 i;
13719 +
13720 +       if (eeprom->len == 0)
13721 +               return -EINVAL;
13722 +
13723 +       eeprom->magic = hw->vendor_id | (hw->device_id << 16);
13724 +
13725 +       first_word = eeprom->offset >> 1;
13726 +       last_word = (eeprom->offset + eeprom->len - 1) >> 1;
13727 +
13728 +       eeprom_buff = kmalloc(sizeof(u16) *
13729 +                       (last_word - first_word + 1), GFP_KERNEL);
13730 +       if (!eeprom_buff)
13731 +               return -ENOMEM;
13732 +
13733 +       if (hw->nvm.type == e1000_nvm_eeprom_spi)
13734 +               ret_val = e1000_read_nvm(hw, first_word,
13735 +                                        last_word - first_word + 1,
13736 +                                        eeprom_buff);
13737 +       else {
13738 +               for (i = 0; i < last_word - first_word + 1; i++) {
13739 +                       ret_val = e1000_read_nvm(hw, first_word + i, 1,
13740 +                                                     &eeprom_buff[i]);
13741 +                       if (ret_val)
13742 +                               break;
13743 +               }
13744 +       }
13745 +
13746 +       /* Device's eeprom is always little-endian, word addressable */
13747 +       for (i = 0; i < last_word - first_word + 1; i++)
13748 +               le16_to_cpus(&eeprom_buff[i]);
13749 +
13750 +       memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
13751 +                       eeprom->len);
13752 +       kfree(eeprom_buff);
13753 +
13754 +       return ret_val;
13755 +}
13756 +
13757 +static int igb_set_eeprom(struct net_device *netdev,
13758 +                          struct ethtool_eeprom *eeprom, u8 *bytes)
13759 +{
13760 +       struct igb_adapter *adapter = netdev_priv(netdev);
13761 +       struct e1000_hw *hw = &adapter->hw;
13762 +       u16 *eeprom_buff;
13763 +       void *ptr;
13764 +       int max_len, first_word, last_word, ret_val = 0;
13765 +       u16 i;
13766 +
13767 +       if (eeprom->len == 0)
13768 +               return -EOPNOTSUPP;
13769 +
13770 +       if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
13771 +               return -EFAULT;
13772 +
13773 +       max_len = hw->nvm.word_size * 2;
13774 +
13775 +       first_word = eeprom->offset >> 1;
13776 +       last_word = (eeprom->offset + eeprom->len - 1) >> 1;
13777 +       eeprom_buff = kmalloc(max_len, GFP_KERNEL);
13778 +       if (!eeprom_buff)
13779 +               return -ENOMEM;
13780 +
13781 +       ptr = (void *)eeprom_buff;
13782 +
13783 +       if (eeprom->offset & 1) {
13784 +               /* need read/modify/write of first changed EEPROM word */
13785 +               /* only the second byte of the word is being modified */
13786 +               ret_val = e1000_read_nvm(hw, first_word, 1,
13787 +                                           &eeprom_buff[0]);
13788 +               ptr++;
13789 +       }
13790 +       if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
13791 +               /* need read/modify/write of last changed EEPROM word */
13792 +               /* only the first byte of the word is being modified */
13793 +               ret_val = e1000_read_nvm(hw, last_word, 1,
13794 +                                 &eeprom_buff[last_word - first_word]);
13795 +       }
13796 +
13797 +       /* Device's eeprom is always little-endian, word addressable */
13798 +       for (i = 0; i < last_word - first_word + 1; i++)
13799 +               le16_to_cpus(&eeprom_buff[i]);
13800 +
13801 +       memcpy(ptr, bytes, eeprom->len);
13802 +
13803 +       for (i = 0; i < last_word - first_word + 1; i++)
13804 +               cpu_to_le16s(&eeprom_buff[i]);
13805 +
13806 +       ret_val = e1000_write_nvm(hw, first_word,
13807 +                                 last_word - first_word + 1, eeprom_buff);
13808 +
13809 +       /* Update the checksum over the first part of the EEPROM if needed
13810 +        * and flush shadow RAM for 82573 controllers */
13811 +       if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
13812 +               e1000_update_nvm_checksum(hw);
13813 +
13814 +       kfree(eeprom_buff);
13815 +       return ret_val;
13816 +}
13817 +
13818 +static void igb_get_drvinfo(struct net_device *netdev,
13819 +                            struct ethtool_drvinfo *drvinfo)
13820 +{
13821 +       struct igb_adapter *adapter = netdev_priv(netdev);
13822 +       u16 eeprom_data;
13823 +
13824 +       strncpy(drvinfo->driver,  igb_driver_name, 32);
13825 +       strncpy(drvinfo->version, igb_driver_version, 32);
13826 +
13827 +       /* EEPROM image version # is reported as firmware version # for
13828 +        * 82575 controllers */
13829 +       e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
13830 +       snprintf(drvinfo->fw_version, 32, "%d.%d-%d",
13831 +                (eeprom_data & 0xF000) >> 12,
13832 +                (eeprom_data & 0x0FF0) >> 4,
13833 +                eeprom_data & 0x000F);
13834 +
13835 +       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
13836 +       drvinfo->n_stats = IGB_STATS_LEN;
13837 +       drvinfo->testinfo_len = IGB_TEST_LEN;
13838 +       drvinfo->regdump_len = igb_get_regs_len(netdev);
13839 +       drvinfo->eedump_len = igb_get_eeprom_len(netdev);
13840 +}
13841 +
13842 +static void igb_get_ringparam(struct net_device *netdev,
13843 +                              struct ethtool_ringparam *ring)
13844 +{
13845 +       struct igb_adapter *adapter = netdev_priv(netdev);
13846 +
13847 +       ring->rx_max_pending = IGB_MAX_RXD;
13848 +       ring->tx_max_pending = IGB_MAX_TXD;
13849 +       ring->rx_mini_max_pending = 0;
13850 +       ring->rx_jumbo_max_pending = 0;
13851 +       ring->rx_pending = adapter->rx_ring_count;
13852 +       ring->tx_pending = adapter->tx_ring_count;
13853 +       ring->rx_mini_pending = 0;
13854 +       ring->rx_jumbo_pending = 0;
13855 +}
13856 +
13857 +static int igb_set_ringparam(struct net_device *netdev,
13858 +                             struct ethtool_ringparam *ring)
13859 +{
13860 +       struct igb_adapter *adapter = netdev_priv(netdev);
13861 +       struct igb_ring *temp_ring;
13862 +       int i, err;
13863 +       u16 new_rx_count, new_tx_count;
13864 +
13865 +       if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
13866 +               return -EINVAL;
13867 +
13868 +       new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD);
13869 +       new_rx_count = max(new_rx_count, (u16)IGB_MIN_RXD);
13870 +       new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
13871 +
13872 +       new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD);
13873 +       new_tx_count = max(new_tx_count, (u16)IGB_MIN_TXD);
13874 +       new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
13875 +
13876 +       if ((new_tx_count == adapter->tx_ring_count) &&
13877 +           (new_rx_count == adapter->rx_ring_count)) {
13878 +               /* nothing to do */
13879 +               return 0;
13880 +       }
13881 +
13882 +       if (adapter->num_tx_queues > adapter->num_rx_queues)
13883 +               temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
13884 +       else
13885 +               temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
13886 +       if (!temp_ring)
13887 +               return -ENOMEM;
13888 +
13889 +       while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
13890 +               msleep(1);
13891 +
13892 +       if (netif_running(adapter->netdev))
13893 +               igb_down(adapter);
13894 +
13895 +       /*
13896 +        * We can't just free everything and then setup again,
13897 +        * because the ISRs in MSI-X mode get passed pointers
13898 +        * to the tx and rx ring structs.
13899 +        */
13900 +       if (new_tx_count != adapter->tx_ring_count) {
13901 +               memcpy(temp_ring, adapter->tx_ring,
13902 +                      adapter->num_tx_queues * sizeof(struct igb_ring));
13903 +
13904 +               for (i = 0; i < adapter->num_tx_queues; i++) {
13905 +                       temp_ring[i].count = new_tx_count;
13906 +                       err = igb_setup_tx_resources(&temp_ring[i]);
13907 +                       if (err) {
13908 +                               while (i) {
13909 +                                       i--;
13910 +                                       igb_free_tx_resources(&temp_ring[i]);
13911 +                               }
13912 +                               goto err_setup;
13913 +                       }
13914 +               }
13915 +
13916 +               for (i = 0; i < adapter->num_tx_queues; i++)
13917 +                       igb_free_tx_resources(&adapter->tx_ring[i]);
13918 +
13919 +               memcpy(adapter->tx_ring, temp_ring,
13920 +                      adapter->num_tx_queues * sizeof(struct igb_ring));
13921 +
13922 +               adapter->tx_ring_count = new_tx_count;
13923 +       }
13924 +
13925 +       if (new_rx_count != adapter->rx_ring->count) {
13926 +               memcpy(temp_ring, adapter->rx_ring,
13927 +                      adapter->num_rx_queues * sizeof(struct igb_ring));
13928 +
13929 +               for (i = 0; i < adapter->num_rx_queues; i++) {
13930 +                       temp_ring[i].count = new_rx_count;
13931 +                       err = igb_setup_rx_resources(&temp_ring[i]);
13932 +                       if (err) {
13933 +                               while (i) {
13934 +                                       i--;
13935 +                                       igb_free_rx_resources(&temp_ring[i]);
13936 +                               }
13937 +                               goto err_setup;
13938 +                       }
13939 +
13940 +               }
13941 +
13942 +               for (i = 0; i < adapter->num_rx_queues; i++)
13943 +                       igb_free_rx_resources(&adapter->rx_ring[i]);
13944 +
13945 +               memcpy(adapter->rx_ring, temp_ring,
13946 +                      adapter->num_rx_queues * sizeof(struct igb_ring));
13947 +
13948 +               adapter->rx_ring_count = new_rx_count;
13949 +       }
13950 +
13951 +       err = 0;
13952 +err_setup:
13953 +       if (netif_running(adapter->netdev))
13954 +               igb_up(adapter);
13955 +
13956 +       clear_bit(__IGB_RESETTING, &adapter->state);
13957 +       vfree(temp_ring);
13958 +       return err;
13959 +}
13960 +
13961 +static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
13962 +                            int reg, u32 mask, u32 write)
13963 +{
13964 +       struct e1000_hw *hw = &adapter->hw;
13965 +       u32 pat, val;
13966 +       static const u32 _test[] =
13967 +               {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
13968 +       for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
13969 +               E1000_WRITE_REG(hw, reg, (_test[pat] & write));
13970 +               val = E1000_READ_REG(hw, reg);
13971 +               if (val != (_test[pat] & write & mask)) {
13972 +                       DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "
13973 +                               "0x%08X expected 0x%08X\n",
13974 +                               E1000_REGISTER(hw, reg), val,
13975 +                               (_test[pat] & write & mask));
13976 +                       *data = E1000_REGISTER(hw, reg);
13977 +                       return 1;
13978 +               }
13979 +       }
13980 +
13981 +       return 0;
13982 +}
13983 +
13984 +static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
13985 +                             int reg, u32 mask, u32 write)
13986 +{
13987 +       struct e1000_hw *hw = &adapter->hw;
13988 +       u32 val;
13989 +       E1000_WRITE_REG(hw, reg, write & mask);
13990 +       val = E1000_READ_REG(hw, reg);
13991 +       if ((write & mask) != (val & mask)) {
13992 +               DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "
13993 +                       "expected 0x%08X\n", reg, (val & mask), (write & mask));
13994 +               *data = E1000_REGISTER(hw, reg);
13995 +               return 1;
13996 +       }
13997 +
13998 +       return 0;
13999 +}
14000 +
14001 +#define REG_PATTERN_TEST(reg, mask, write)                                     \
14002 +       do {                                                                   \
14003 +               if (reg_pattern_test(adapter, data, reg, mask, write))         \
14004 +                       return 1;                                              \
14005 +       } while (0)
14006 +
14007 +#define REG_SET_AND_CHECK(reg, mask, write)                                    \
14008 +       do {                                                                   \
14009 +               if (reg_set_and_check(adapter, data, reg, mask, write))              \
14010 +                       return 1;                                              \
14011 +       } while (0)
14012 +
14013 +static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
14014 +{
14015 +       struct e1000_hw *hw = &adapter->hw;
14016 +       struct igb_reg_test *test;
14017 +       u32 value, before, after;
14018 +       u32 i, toggle;
14019 +
14020 +       switch (adapter->hw.mac.type) {
14021 +       case e1000_82576:
14022 +               test = reg_test_82576;
14023 +               toggle = 0x7FFFF3FF;
14024 +               break;
14025 +       default:
14026 +               test = reg_test_82575;
14027 +               toggle = 0x7FFFF3FF;
14028 +               break;
14029 +       }
14030 +
14031 +       /* Because the status register is such a special case,
14032 +        * we handle it separately from the rest of the register
14033 +        * tests.  Some bits are read-only, some toggle, and some
14034 +        * are writable on newer MACs.
14035 +        */
14036 +       before = E1000_READ_REG(hw, E1000_STATUS);
14037 +       value = (E1000_READ_REG(hw, E1000_STATUS) & toggle);
14038 +       E1000_WRITE_REG(hw, E1000_STATUS, toggle);
14039 +       after = E1000_READ_REG(hw, E1000_STATUS) & toggle;
14040 +       if (value != after) {
14041 +               DPRINTK(DRV, ERR, "failed STATUS register test got: "
14042 +                       "0x%08X expected: 0x%08X\n", after, value);
14043 +               *data = 1;
14044 +               return 1;
14045 +       }
14046 +       /* restore previous status */
14047 +       E1000_WRITE_REG(hw, E1000_STATUS, before);
14048 +
14049 +       /* Perform the remainder of the register test, looping through
14050 +        * the test table until we either fail or reach the null entry.
14051 +        */
14052 +       while (test->reg) {
14053 +               for (i = 0; i < test->array_len; i++) {
14054 +                       switch (test->test_type) {
14055 +                       case PATTERN_TEST:
14056 +                               REG_PATTERN_TEST(test->reg +
14057 +                                               (i * test->reg_offset),
14058 +                                               test->mask,
14059 +                                               test->write);
14060 +                               break;
14061 +                       case SET_READ_TEST:
14062 +                               REG_SET_AND_CHECK(test->reg +
14063 +                                               (i * test->reg_offset),
14064 +                                               test->mask,
14065 +                                               test->write);
14066 +                               break;
14067 +                       case WRITE_NO_TEST:
14068 +                               writel(test->write,
14069 +                                      (adapter->hw.hw_addr + test->reg)
14070 +                                       + (i * test->reg_offset));
14071 +                               break;
14072 +                       case TABLE32_TEST:
14073 +                               REG_PATTERN_TEST(test->reg + (i * 4),
14074 +                                               test->mask,
14075 +                                               test->write);
14076 +                               break;
14077 +                       case TABLE64_TEST_LO:
14078 +                               REG_PATTERN_TEST(test->reg + (i * 8),
14079 +                                               test->mask,
14080 +                                               test->write);
14081 +                               break;
14082 +                       case TABLE64_TEST_HI:
14083 +                               REG_PATTERN_TEST((test->reg + 4) + (i * 8),
14084 +                                               test->mask,
14085 +                                               test->write);
14086 +                               break;
14087 +                       }
14088 +               }
14089 +               test++;
14090 +       }
14091 +
14092 +       *data = 0;
14093 +       return 0;
14094 +}
14095 +
14096 +static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
14097 +{
14098 +       u16 temp;
14099 +       u16 checksum = 0;
14100 +       u16 i;
14101 +
14102 +       *data = 0;
14103 +       /* Read and add up the contents of the EEPROM */
14104 +       for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
14105 +               if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
14106 +                       *data = 1;
14107 +                       break;
14108 +               }
14109 +               checksum += temp;
14110 +       }
14111 +
14112 +       /* If Checksum is not Correct return error else test passed */
14113 +       if ((checksum != (u16) NVM_SUM) && !(*data))
14114 +               *data = 2;
14115 +
14116 +       return *data;
14117 +}
14118 +
14119 +static irqreturn_t igb_test_intr(int irq, void *data)
14120 +{
14121 +       struct igb_adapter *adapter = (struct igb_adapter *) data;
14122 +       struct e1000_hw *hw = &adapter->hw;
14123 +
14124 +       adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
14125 +
14126 +       return IRQ_HANDLED;
14127 +}
14128 +
14129 +static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
14130 +{
14131 +       struct e1000_hw *hw = &adapter->hw;
14132 +       struct net_device *netdev = adapter->netdev;
14133 +       u32 mask, ics_mask, i = 0, shared_int = TRUE;
14134 +       u32 irq = adapter->pdev->irq;
14135 +
14136 +       *data = 0;
14137 +
14138 +       /* Hook up test interrupt handler just for this test */
14139 +       if (adapter->msix_entries) {
14140 +               if (request_irq(adapter->msix_entries[0].vector,
14141 +                               &igb_test_intr, 0, netdev->name, adapter)) {
14142 +                       *data = 1;
14143 +                       return -1;
14144 +               }
14145 +       } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
14146 +               shared_int = FALSE;
14147 +               if (request_irq(irq, &igb_test_intr, 0, netdev->name, adapter)) {
14148 +                       *data = 1;
14149 +                       return -1;
14150 +               }
14151 +       } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
14152 +                               netdev->name, adapter)) {
14153 +               shared_int = FALSE;
14154 +       } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
14155 +                netdev->name, adapter)) {
14156 +               *data = 1;
14157 +               return -1;
14158 +       }
14159 +       DPRINTK(HW, INFO, "testing %s interrupt\n",
14160 +               (shared_int ? "shared" : "unshared"));
14161 +
14162 +       /* Disable all the interrupts */
14163 +       E1000_WRITE_REG(hw, E1000_IMC, ~0);
14164 +       msleep(10);
14165 +
14166 +       /* Define all writable bits for ICS */
14167 +       switch (hw->mac.type) {
14168 +       case e1000_82575:
14169 +               ics_mask = 0x37F47EDD;
14170 +               break;
14171 +       case e1000_82576:
14172 +               ics_mask = 0x77D4FBFD;
14173 +               break;
14174 +       default:
14175 +               ics_mask = 0x7FFFFFFF;
14176 +               break;
14177 +       }
14178 +
14179 +       /* Test each interrupt */
14180 +       for (; i < 31; i++) {
14181 +               /* Interrupt to test */
14182 +               mask = 1 << i;
14183 +
14184 +               if (!(mask & ics_mask))
14185 +                       continue;
14186 +
14187 +               if (!shared_int) {
14188 +                       /* Disable the interrupt to be reported in
14189 +                        * the cause register and then force the same
14190 +                        * interrupt and see if one gets posted.  If
14191 +                        * an interrupt was posted to the bus, the
14192 +                        * test failed.
14193 +                        */
14194 +                       adapter->test_icr = 0;
14195 +
14196 +                       /* Flush any pending interrupts */
14197 +                       E1000_WRITE_REG(hw, E1000_ICR, ~0);
14198 +
14199 +                       E1000_WRITE_REG(hw, E1000_IMC, mask);
14200 +                       E1000_WRITE_REG(hw, E1000_ICS, mask);
14201 +                       msleep(10);
14202 +
14203 +                       if (adapter->test_icr & mask) {
14204 +                               *data = 3;
14205 +                               break;
14206 +                       }
14207 +               }
14208 +
14209 +               /* Enable the interrupt to be reported in
14210 +                * the cause register and then force the same
14211 +                * interrupt and see if one gets posted.  If
14212 +                * an interrupt was not posted to the bus, the
14213 +                * test failed.
14214 +                */
14215 +               adapter->test_icr = 0;
14216 +
14217 +               /* Flush any pending interrupts */
14218 +               E1000_WRITE_REG(hw, E1000_ICR, ~0);
14219 +
14220 +               E1000_WRITE_REG(hw, E1000_IMS, mask);
14221 +               E1000_WRITE_REG(hw, E1000_ICS, mask);
14222 +               msleep(10);
14223 +
14224 +               if (!(adapter->test_icr & mask)) {
14225 +                       *data = 4;
14226 +                       break;
14227 +               }
14228 +
14229 +               if (!shared_int) {
14230 +                       /* Disable the other interrupts to be reported in
14231 +                        * the cause register and then force the other
14232 +                        * interrupts and see if any get posted.  If
14233 +                        * an interrupt was posted to the bus, the
14234 +                        * test failed.
14235 +                        */
14236 +                       adapter->test_icr = 0;
14237 +
14238 +                       /* Flush any pending interrupts */
14239 +                       E1000_WRITE_REG(hw, E1000_ICR, ~0);
14240 +
14241 +                       E1000_WRITE_REG(hw, E1000_IMC, ~mask);
14242 +                       E1000_WRITE_REG(hw, E1000_ICS, ~mask);
14243 +                       msleep(10);
14244 +
14245 +                       if (adapter->test_icr & mask) {
14246 +                               *data = 5;
14247 +                               break;
14248 +                       }
14249 +               }
14250 +       }
14251 +
14252 +       /* Disable all the interrupts */
14253 +       E1000_WRITE_REG(hw, E1000_IMC, ~0);
14254 +       msleep(10);
14255 +
14256 +       /* Unhook test interrupt handler */
14257 +       if (adapter->msix_entries)
14258 +               free_irq(adapter->msix_entries[0].vector, adapter);
14259 +       else
14260 +               free_irq(irq, adapter);
14261 +
14262 +       return *data;
14263 +}
14264 +
14265 +static void igb_free_desc_rings(struct igb_adapter *adapter)
14266 +{
14267 +       igb_free_tx_resources(&adapter->test_tx_ring);
14268 +       igb_free_rx_resources(&adapter->test_rx_ring);
14269 +}
14270 +
14271 +static int igb_setup_desc_rings(struct igb_adapter *adapter)
14272 +{
14273 +       struct igb_ring *tx_ring = &adapter->test_tx_ring;
14274 +       struct igb_ring *rx_ring = &adapter->test_rx_ring;
14275 +       int i, ret_val;
14276 +
14277 +       /* Setup Tx descriptor ring and Tx buffers */
14278 +       tx_ring->count = IGB_DEFAULT_TXD;
14279 +       tx_ring->pdev = adapter->pdev;
14280 +       tx_ring->reg_idx = adapter->vfs_allocated_count;
14281 +
14282 +       if (igb_setup_tx_resources(tx_ring)) {
14283 +               ret_val = 1;
14284 +               goto err_nomem;
14285 +       }
14286 +
14287 +       igb_setup_tctl(adapter);
14288 +       igb_configure_tx_ring(adapter, tx_ring);
14289 +
14290 +       for (i = 0; i < tx_ring->count; i++) {
14291 +               union e1000_adv_tx_desc *tx_desc;
14292 +               unsigned int size = 1024;
14293 +               struct sk_buff *skb = alloc_skb(size, GFP_KERNEL);
14294 +
14295 +               if (!skb) {
14296 +                       ret_val = 2;
14297 +                       goto err_nomem;
14298 +               }
14299 +               skb_put(skb, size);
14300 +               tx_ring->buffer_info[i].skb = skb;
14301 +               tx_ring->buffer_info[i].length = skb->len;
14302 +               tx_ring->buffer_info[i].dma =
14303 +                       pci_map_single(tx_ring->pdev, skb->data, skb->len,
14304 +                                      PCI_DMA_TODEVICE);
14305 +               tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
14306 +               tx_desc->read.buffer_addr =
14307 +                       cpu_to_le64(tx_ring->buffer_info[i].dma);
14308 +               tx_desc->read.olinfo_status =
14309 +                       cpu_to_le32(skb->len << E1000_ADVTXD_PAYLEN_SHIFT);
14310 +               tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
14311 +               tx_desc->read.cmd_type_len |=
14312 +                       cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
14313 +                                   E1000_ADVTXD_DCMD_DEXT);
14314 +               tx_desc->read.cmd_type_len |=
14315 +                       cpu_to_le32(IGB_ADVTXD_DCMD |
14316 +                                   E1000_ADVTXD_DTYP_DATA |
14317 +                                   E1000_ADVTXD_DCMD_IFCS |
14318 +                                   E1000_ADVTXD_DCMD_DEXT);
14319 +       }
14320 +
14321 +       /* Setup Rx descriptor ring and Rx buffers */
14322 +       rx_ring->count = IGB_DEFAULT_RXD;
14323 +       rx_ring->pdev = adapter->pdev;
14324 +       rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
14325 +       rx_ring->reg_idx = adapter->vfs_allocated_count;
14326 +
14327 +       if (igb_setup_rx_resources(rx_ring)) {
14328 +               ret_val = 3;
14329 +               goto err_nomem;
14330 +       }
14331 +
14332 +       /* set the default queue to queue 0 of PF */
14333 +       E1000_WRITE_REG(&adapter->hw, E1000_MRQC,
14334 +                       adapter->vfs_allocated_count << 3); 
14335 +
14336 +       /* enable receive ring */
14337 +       igb_setup_rctl(adapter);
14338 +       igb_configure_rx_ring(adapter, rx_ring);
14339 +
14340 +       if (igb_alloc_rx_buffers_adv(rx_ring, rx_ring->count)) {
14341 +               ret_val = 4;
14342 +               goto err_nomem;
14343 +       }
14344 +
14345 +
14346 +       return 0;
14347 +
14348 +err_nomem:
14349 +       igb_free_desc_rings(adapter);
14350 +       return ret_val;
14351 +}
14352 +
14353 +static void igb_phy_disable_receiver(struct igb_adapter *adapter)
14354 +{
14355 +       /* Write out to PHY registers 29 and 30 to disable the Receiver. */
14356 +       e1000_write_phy_reg(&adapter->hw, 29, 0x001F);
14357 +       e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC);
14358 +       e1000_write_phy_reg(&adapter->hw, 29, 0x001A);
14359 +       e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0);
14360 +}
14361 +
14362 +static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
14363 +{
14364 +       struct e1000_hw *hw = &adapter->hw;
14365 +       u32 ctrl_reg = 0;
14366 +
14367 +       hw->mac.autoneg = FALSE;
14368 +
14369 +       if (hw->phy.type == e1000_phy_m88) {
14370 +               /* Auto-MDI/MDIX Off */
14371 +               e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
14372 +               /* reset to update Auto-MDI/MDIX */
14373 +               e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
14374 +               /* autoneg off */
14375 +               e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
14376 +       }
14377 +
14378 +       ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
14379 +
14380 +       /* force 1000, set loopback */
14381 +       e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
14382 +
14383 +       /* Now set up the MAC to the same speed/duplex as the PHY. */
14384 +       ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
14385 +       ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
14386 +       ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
14387 +                    E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
14388 +                    E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
14389 +                    E1000_CTRL_FD |     /* Force Duplex to FULL */
14390 +                    E1000_CTRL_SLU);    /* Set link up enable bit */
14391 +
14392 +       if (hw->phy.type == e1000_phy_m88)
14393 +               ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
14394 +
14395 +       E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
14396 +
14397 +       /* Disable the receiver on the PHY so when a cable is plugged in, the
14398 +        * PHY does not begin to autoneg when a cable is reconnected to the NIC.
14399 +        */
14400 +       if (hw->phy.type == e1000_phy_m88)
14401 +               igb_phy_disable_receiver(adapter);
14402 +
14403 +       udelay(500);
14404 +
14405 +       return 0;
14406 +}
14407 +
14408 +static int igb_set_phy_loopback(struct igb_adapter *adapter)
14409 +{
14410 +       return igb_integrated_phy_loopback(adapter);
14411 +}
14412 +
14413 +static int igb_setup_loopback_test(struct igb_adapter *adapter)
14414 +{
14415 +       struct e1000_hw *hw = &adapter->hw;
14416 +       u32 reg;
14417 +
14418 +       reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
14419 +
14420 +       /* use CTRL_EXT to identify link type as SGMII can appear as copper */
14421 +       if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
14422 +               reg = E1000_READ_REG(hw, E1000_RCTL);
14423 +               reg |= E1000_RCTL_LBM_TCVR;
14424 +               E1000_WRITE_REG(hw, E1000_RCTL, reg);
14425 +
14426 +               E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
14427 +
14428 +               reg = E1000_READ_REG(hw, E1000_CTRL);
14429 +               reg &= ~(E1000_CTRL_RFCE |
14430 +                        E1000_CTRL_TFCE |
14431 +                        E1000_CTRL_LRST);
14432 +               reg |= E1000_CTRL_SLU |
14433 +                      E1000_CTRL_FD;
14434 +               E1000_WRITE_REG(hw, E1000_CTRL, reg);
14435 +
14436 +               /* Unset switch control to serdes energy detect */
14437 +               reg = E1000_READ_REG(hw, E1000_CONNSW);
14438 +               reg &= ~E1000_CONNSW_ENRGSRC;
14439 +               E1000_WRITE_REG(hw, E1000_CONNSW, reg);
14440 +
14441 +               /* Set PCS register for forced speed */
14442 +               reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
14443 +               reg &= ~E1000_PCS_LCTL_AN_ENABLE;     /* Disable Autoneg*/
14444 +               reg |= E1000_PCS_LCTL_FLV_LINK_UP |   /* Force link up */
14445 +                      E1000_PCS_LCTL_FSV_1000 |      /* Force 1000    */
14446 +                      E1000_PCS_LCTL_FDV_FULL |      /* SerDes Full duplex */
14447 +                      E1000_PCS_LCTL_FSD |           /* Force Speed */
14448 +                      E1000_PCS_LCTL_FORCE_LINK;     /* Force Link */
14449 +               E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
14450 +
14451 +               return 0;
14452 +       }
14453 +
14454 +       return igb_set_phy_loopback(adapter);
14455 +}
14456 +
14457 +static void igb_loopback_cleanup(struct igb_adapter *adapter)
14458 +{
14459 +       struct e1000_hw *hw = &adapter->hw;
14460 +       u32 rctl;
14461 +       u16 phy_reg;
14462 +
14463 +       rctl = E1000_READ_REG(hw, E1000_RCTL);
14464 +       rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
14465 +       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
14466 +
14467 +       hw->mac.autoneg = TRUE;
14468 +       e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
14469 +       if (phy_reg & MII_CR_LOOPBACK) {
14470 +               phy_reg &= ~MII_CR_LOOPBACK;
14471 +               e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
14472 +               e1000_phy_commit(hw);
14473 +       }
14474 +}
14475 +
14476 +static void igb_create_lbtest_frame(struct sk_buff *skb,
14477 +                                    unsigned int frame_size)
14478 +{
14479 +       memset(skb->data, 0xFF, frame_size);
14480 +       frame_size &= ~1;
14481 +       memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
14482 +       memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
14483 +       memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
14484 +}
14485 +
14486 +static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
14487 +{
14488 +       frame_size &= ~1;
14489 +       if (*(skb->data + 3) == 0xFF) {
14490 +               if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
14491 +                  (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
14492 +                       return 0;
14493 +               }
14494 +       }
14495 +       return 13;
14496 +}
14497 +
14498 +static int igb_run_loopback_test(struct igb_adapter *adapter)
14499 +{
14500 +       struct igb_ring *tx_ring = &adapter->test_tx_ring;
14501 +       struct igb_ring *rx_ring = &adapter->test_rx_ring;
14502 +       int i, j, k, l, lc, good_cnt, ret_val = 0;
14503 +       unsigned long time;
14504 +
14505 +       writel(rx_ring->count - 1, rx_ring->tail);
14506 +
14507 +       /* Calculate the loop count based on the largest descriptor ring
14508 +        * The idea is to wrap the largest ring a number of times using 64
14509 +        * send/receive pairs during each loop
14510 +        */
14511 +
14512 +       if (rx_ring->count <= tx_ring->count)
14513 +               lc = ((tx_ring->count / 64) * 2) + 1;
14514 +       else
14515 +               lc = ((rx_ring->count / 64) * 2) + 1;
14516 +
14517 +       k = l = 0;
14518 +       for (j = 0; j <= lc; j++) { /* loop count loop */
14519 +               for (i = 0; i < 64; i++) { /* send the packets */
14520 +                       igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
14521 +                                               1024);
14522 +                       pci_dma_sync_single_for_device(tx_ring->pdev,
14523 +                               tx_ring->buffer_info[k].dma,
14524 +                               tx_ring->buffer_info[k].length,
14525 +                               PCI_DMA_TODEVICE);
14526 +                       if (unlikely(++k == tx_ring->count))
14527 +                               k = 0;
14528 +               }
14529 +               writel(k, tx_ring->tail);
14530 +               msleep(200);
14531 +
14532 +               time = jiffies; /* set the start time for the receive */
14533 +               good_cnt = 0;
14534 +               do { /* receive the sent packets */
14535 +                       pci_dma_sync_single_for_cpu(rx_ring->pdev,
14536 +                                       rx_ring->buffer_info[l].dma,
14537 +                                       rx_ring->rx_buffer_len,
14538 +                                       PCI_DMA_FROMDEVICE);
14539 +
14540 +                       ret_val = igb_check_lbtest_frame(
14541 +                                            rx_ring->buffer_info[l].skb, 1024);
14542 +                       if (!ret_val)
14543 +                               good_cnt++;
14544 +                       if (unlikely(++l == rx_ring->count))
14545 +                               l = 0;
14546 +                       /* time + 20 msecs (200 msecs on 2.4) is more than
14547 +                        * enough time to complete the receives, if it's
14548 +                        * exceeded, break and error off
14549 +                        */
14550 +               } while (good_cnt < 64 && jiffies < (time + 20));
14551 +               if (good_cnt != 64) {
14552 +                       ret_val = 13; /* ret_val is the same as mis-compare */
14553 +                       break;
14554 +               }
14555 +               if (jiffies >= (time + 20)) {
14556 +                       ret_val = 14; /* error code for time out error */
14557 +                       break;
14558 +               }
14559 +       } /* end loop count loop */
14560 +       return ret_val;
14561 +}
14562 +
14563 +static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
14564 +{
14565 +       /* PHY loopback cannot be performed if SoL/IDER
14566 +        * sessions are active */
14567 +       if (e1000_check_reset_block(&adapter->hw)) {
14568 +               DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
14569 +                       "when SoL/IDER is active.\n");
14570 +               *data = 0;
14571 +               goto out;
14572 +       }
14573 +       *data = igb_setup_desc_rings(adapter);
14574 +       if (*data)
14575 +               goto out;
14576 +       *data = igb_setup_loopback_test(adapter);
14577 +       if (*data)
14578 +               goto err_loopback;
14579 +       *data = igb_run_loopback_test(adapter);
14580 +       igb_loopback_cleanup(adapter);
14581 +
14582 +err_loopback:
14583 +       igb_free_desc_rings(adapter);
14584 +out:
14585 +       return *data;
14586 +}
14587 +
14588 +static int igb_link_test(struct igb_adapter *adapter, u64 *data)
14589 +{
14590 +       struct e1000_hw *hw = &adapter->hw;
14591 +       *data = 0;
14592 +       if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
14593 +               int i = 0;
14594 +               adapter->hw.mac.serdes_has_link = FALSE;
14595 +
14596 +               /* On some blade server designs, link establishment
14597 +                * could take as long as 2-3 minutes */
14598 +               do {
14599 +                       e1000_check_for_link(&adapter->hw);
14600 +                       if (adapter->hw.mac.serdes_has_link)
14601 +                               return *data;
14602 +                       msleep(20);
14603 +               } while (i++ < 3750);
14604 +
14605 +               *data = 1;
14606 +       } else {
14607 +               e1000_check_for_link(&adapter->hw);
14608 +               if (adapter->hw.mac.autoneg)
14609 +                       msleep(4000);
14610 +
14611 +               if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
14612 +                       *data = 1;
14613 +       }
14614 +       return *data;
14615 +}
14616 +
14617 +static void igb_diag_test(struct net_device *netdev,
14618 +                          struct ethtool_test *eth_test, u64 *data)
14619 +{
14620 +       struct igb_adapter *adapter = netdev_priv(netdev);
14621 +       u16 autoneg_advertised;
14622 +       u8 forced_speed_duplex, autoneg;
14623 +       bool if_running = netif_running(netdev);
14624 +
14625 +       set_bit(__IGB_TESTING, &adapter->state);
14626 +       if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
14627 +               /* Offline tests */
14628 +
14629 +               /* save speed, duplex, autoneg settings */
14630 +               autoneg_advertised = adapter->hw.phy.autoneg_advertised;
14631 +               forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
14632 +               autoneg = adapter->hw.mac.autoneg;
14633 +
14634 +               DPRINTK(HW, INFO, "offline testing starting\n");
14635 +
14636 +               /* Link test performed before hardware reset so autoneg doesn't
14637 +                * interfere with test result */
14638 +               if (igb_link_test(adapter, &data[4]))
14639 +                       eth_test->flags |= ETH_TEST_FL_FAILED;
14640 +
14641 +               if (if_running)
14642 +                       /* indicate we're in test mode */
14643 +                       dev_close(netdev);
14644 +               else
14645 +                       igb_reset(adapter);
14646 +
14647 +               if (igb_reg_test(adapter, &data[0]))
14648 +                       eth_test->flags |= ETH_TEST_FL_FAILED;
14649 +
14650 +               igb_reset(adapter);
14651 +               if (igb_eeprom_test(adapter, &data[1]))
14652 +                       eth_test->flags |= ETH_TEST_FL_FAILED;
14653 +
14654 +               igb_reset(adapter);
14655 +               if (igb_intr_test(adapter, &data[2]))
14656 +                       eth_test->flags |= ETH_TEST_FL_FAILED;
14657 +
14658 +               igb_reset(adapter);
14659 +               if (igb_loopback_test(adapter, &data[3]))
14660 +                       eth_test->flags |= ETH_TEST_FL_FAILED;
14661 +
14662 +               /* restore speed, duplex, autoneg settings */
14663 +               adapter->hw.phy.autoneg_advertised = autoneg_advertised;
14664 +               adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
14665 +               adapter->hw.mac.autoneg = autoneg;
14666 +
14667 +               /* force this routine to wait until autoneg complete/timeout */
14668 +               adapter->hw.phy.autoneg_wait_to_complete = TRUE;
14669 +               igb_reset(adapter);
14670 +               adapter->hw.phy.autoneg_wait_to_complete = FALSE;
14671 +
14672 +               clear_bit(__IGB_TESTING, &adapter->state);
14673 +               if (if_running)
14674 +                       dev_open(netdev);
14675 +       } else {
14676 +               DPRINTK(HW, INFO, "online testing starting\n");
14677 +               /* Online tests */
14678 +               if (igb_link_test(adapter, &data[4]))
14679 +                       eth_test->flags |= ETH_TEST_FL_FAILED;
14680 +
14681 +               /* Online tests aren't run; pass by default */
14682 +               data[0] = 0;
14683 +               data[1] = 0;
14684 +               data[2] = 0;
14685 +               data[3] = 0;
14686 +
14687 +               clear_bit(__IGB_TESTING, &adapter->state);
14688 +       }
14689 +       msleep_interruptible(4 * 1000);
14690 +}
14691 +
14692 +static int igb_wol_exclusion(struct igb_adapter *adapter,
14693 +                             struct ethtool_wolinfo *wol)
14694 +{
14695 +       struct e1000_hw *hw = &adapter->hw;
14696 +       int retval = 1; /* fail by default */
14697 +
14698 +       switch (hw->device_id) {
14699 +       case E1000_DEV_ID_82575GB_QUAD_COPPER:
14700 +               /* WoL not supported */
14701 +               wol->supported = 0;
14702 +               break;
14703 +       case E1000_DEV_ID_82575EB_FIBER_SERDES:
14704 +       case E1000_DEV_ID_82576_FIBER:
14705 +       case E1000_DEV_ID_82576_SERDES:
14706 +               /* Wake events not supported on port B */
14707 +               if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) {
14708 +                       wol->supported = 0;
14709 +                       break;
14710 +               }
14711 +               /* return success for non excluded adapter ports */
14712 +               retval = 0;
14713 +               break;
14714 +       case E1000_DEV_ID_82576_QUAD_COPPER:
14715 +               /* quad port adapters only support WoL on port A */
14716 +               if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
14717 +                       wol->supported = 0;
14718 +                       break;
14719 +               }
14720 +               /* return success for non excluded adapter ports */
14721 +               retval = 0;
14722 +               break;
14723 +       default:
14724 +               /* dual port cards only support WoL on port A from now on
14725 +                * unless it was enabled in the eeprom for port B
14726 +                * so exclude FUNC_1 ports from having WoL enabled */
14727 +               if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1 &&
14728 +                   !adapter->eeprom_wol) {
14729 +                       wol->supported = 0;
14730 +                       break;
14731 +               }
14732 +
14733 +               retval = 0;
14734 +       }
14735 +
14736 +       return retval;
14737 +}
14738 +
14739 +static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
14740 +{
14741 +       struct igb_adapter *adapter = netdev_priv(netdev);
14742 +
14743 +       wol->supported = WAKE_UCAST | WAKE_MCAST |
14744 +                        WAKE_BCAST | WAKE_MAGIC;
14745 +       wol->wolopts = 0;
14746 +
14747 +       /* this function will set ->supported = 0 and return 1 if wol is not
14748 +        * supported by this hardware */
14749 +       if (igb_wol_exclusion(adapter, wol) ||
14750 +           !device_can_wakeup(&adapter->pdev->dev))
14751 +               return;
14752 +
14753 +       /* apply any specific unsupported masks here */
14754 +       switch (adapter->hw.device_id) {
14755 +       default:
14756 +               break;
14757 +       }
14758 +
14759 +       if (adapter->wol & E1000_WUFC_EX)
14760 +               wol->wolopts |= WAKE_UCAST;
14761 +       if (adapter->wol & E1000_WUFC_MC)
14762 +               wol->wolopts |= WAKE_MCAST;
14763 +       if (adapter->wol & E1000_WUFC_BC)
14764 +               wol->wolopts |= WAKE_BCAST;
14765 +       if (adapter->wol & E1000_WUFC_MAG)
14766 +               wol->wolopts |= WAKE_MAGIC;
14767 +
14768 +       return;
14769 +}
14770 +
14771 +static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
14772 +{
14773 +       struct igb_adapter *adapter = netdev_priv(netdev);
14774 +
14775 +       if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
14776 +               return -EOPNOTSUPP;
14777 +
14778 +       if (igb_wol_exclusion(adapter, wol))
14779 +               return wol->wolopts ? -EOPNOTSUPP : 0;
14780 +
14781 +       /* these settings will always override what we currently have */
14782 +       adapter->wol = 0;
14783 +
14784 +       if (wol->wolopts & WAKE_UCAST)
14785 +               adapter->wol |= E1000_WUFC_EX;
14786 +       if (wol->wolopts & WAKE_MCAST)
14787 +               adapter->wol |= E1000_WUFC_MC;
14788 +       if (wol->wolopts & WAKE_BCAST)
14789 +               adapter->wol |= E1000_WUFC_BC;
14790 +       if (wol->wolopts & WAKE_MAGIC)
14791 +               adapter->wol |= E1000_WUFC_MAG;
14792 +       device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
14793 +
14794 +       return 0;
14795 +}
14796 +
14797 +/* bit defines for adapter->led_status */
14798 +#define IGB_LED_ON             0
14799 +
14800 +static int igb_phys_id(struct net_device *netdev, u32 data)
14801 +{
14802 +       struct igb_adapter *adapter = netdev_priv(netdev);
14803 +       struct e1000_hw *hw = &adapter->hw;
14804 +       unsigned long timeout;
14805 +
14806 +       timeout = data * 1000;
14807 +
14808 +       /*
14809 +        *  msleep_interruptable only accepts unsigned int so we are limited
14810 +        * in how long a duration we can wait
14811 +        */
14812 +       if (!timeout || timeout > UINT_MAX)
14813 +               timeout = UINT_MAX;
14814 +
14815 +       e1000_blink_led(hw);
14816 +       msleep_interruptible(timeout);
14817 +
14818 +       e1000_led_off(hw);
14819 +       clear_bit(IGB_LED_ON, &adapter->led_status);
14820 +       e1000_cleanup_led(hw);
14821 +
14822 +       return 0;
14823 +}
14824 +
14825 +static int igb_set_coalesce(struct net_device *netdev,
14826 +                           struct ethtool_coalesce *ec)
14827 +{
14828 +       struct igb_adapter *adapter = netdev_priv(netdev);
14829 +       int i;
14830 +
14831 +       if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
14832 +           ((ec->rx_coalesce_usecs > 3) &&
14833 +            (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
14834 +           (ec->rx_coalesce_usecs == 2))
14835 +               return -EINVAL;
14836 +
14837 +       /* convert to rate of irq's per second */
14838 +       if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
14839 +               adapter->itr = IGB_START_ITR;
14840 +               adapter->itr_setting = ec->rx_coalesce_usecs;
14841 +       } else {
14842 +               adapter->itr = ec->rx_coalesce_usecs << 2;
14843 +               adapter->itr_setting = adapter->itr;
14844 +       }
14845 +
14846 +       for (i = 0; i < adapter->num_q_vectors; i++) {
14847 +               struct igb_q_vector *q_vector = adapter->q_vector[i];
14848 +               q_vector->itr_val = adapter->itr;
14849 +               q_vector->set_itr = 1;
14850 +       }
14851 +
14852 +       return 0;
14853 +}
14854 +
14855 +static int igb_get_coalesce(struct net_device *netdev,
14856 +                           struct ethtool_coalesce *ec)
14857 +{
14858 +       struct igb_adapter *adapter = netdev_priv(netdev);
14859 +
14860 +       if (adapter->itr_setting <= 3)
14861 +               ec->rx_coalesce_usecs = adapter->itr_setting;
14862 +       else
14863 +               ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
14864 +
14865 +       return 0;
14866 +}
14867 +
14868 +static int igb_nway_reset(struct net_device *netdev)
14869 +{
14870 +       struct igb_adapter *adapter = netdev_priv(netdev);
14871 +       if (netif_running(netdev))
14872 +               igb_reinit_locked(adapter);
14873 +       return 0;
14874 +}
14875 +
14876 +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
14877 +static int igb_get_sset_count(struct net_device *netdev, int sset)
14878 +{
14879 +       switch (sset) {
14880 +       case ETH_SS_STATS:
14881 +               return IGB_STATS_LEN;
14882 +       case ETH_SS_TEST:
14883 +               return IGB_TEST_LEN;
14884 +       default:
14885 +               return -ENOTSUPP;
14886 +       }
14887 +}
14888 +#else
14889 +static int igb_get_stats_count(struct net_device *netdev)
14890 +{
14891 +       return IGB_STATS_LEN;
14892 +}
14893 +
14894 +static int igb_diag_test_count(struct net_device *netdev)
14895 +{
14896 +       return IGB_TEST_LEN;
14897 +}
14898 +#endif
14899 +
14900 +static void igb_get_ethtool_stats(struct net_device *netdev,
14901 +                                  struct ethtool_stats *stats, u64 *data)
14902 +{
14903 +       struct igb_adapter *adapter = netdev_priv(netdev);
14904 +       u64 *queue_stat;
14905 +       int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
14906 +       int j;
14907 +       int i;
14908 +       u64 restart_queue = 0, hw_csum_err = 0, hw_csum_good = 0;
14909 +#ifdef IGB_LRO
14910 +       int aggregated = 0, flushed = 0, no_desc = 0;
14911 +#endif
14912 +
14913 +       /* collect tx ring stats */
14914 +       for (i = 0; i < adapter->num_tx_queues; i++)
14915 +               restart_queue += adapter->tx_ring[i].restart_queue;
14916 +       adapter->restart_queue = restart_queue;
14917 +
14918 +
14919 +       for (i = 0; i < adapter->num_rx_queues; i++) {
14920 +               hw_csum_err += adapter->rx_ring[i].hw_csum_err;
14921 +               hw_csum_good += adapter->rx_ring[i].hw_csum_good;
14922 +#ifdef IGB_LRO
14923 +               aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
14924 +               flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
14925 +               no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
14926 +       }
14927 +       adapter->lro_aggregated = aggregated;
14928 +       adapter->lro_flushed = flushed;
14929 +       adapter->lro_no_desc = no_desc;
14930 +#else
14931 +       }
14932 +#endif
14933 +       adapter->hw_csum_err = hw_csum_err;
14934 +       adapter->hw_csum_good = hw_csum_good;
14935 +
14936 +       igb_update_stats(adapter);
14937 +
14938 +       for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
14939 +               char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset;
14940 +               data[i] = (igb_gstrings_stats[i].sizeof_stat ==
14941 +                       sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
14942 +       }
14943 +       for (j = 0; j < adapter->num_tx_queues; j++) {
14944 +               int k;
14945 +               queue_stat = (u64 *)&adapter->tx_ring[j].stats;
14946 +               for (k = 0; k < stat_count; k++)
14947 +                       data[i + k] = queue_stat[k];
14948 +               i += k;
14949 +       }
14950 +       for (j = 0; j < adapter->num_rx_queues; j++) {
14951 +               int k;
14952 +               queue_stat = (u64 *)&adapter->rx_ring[j].stats;
14953 +               for (k = 0; k < stat_count; k++)
14954 +                       data[i + k] = queue_stat[k];
14955 +               i += k;
14956 +       }
14957 +}
14958 +
14959 +static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
14960 +{
14961 +       struct igb_adapter *adapter = netdev_priv(netdev);
14962 +       u8 *p = data;
14963 +       int i;
14964 +
14965 +       switch (stringset) {
14966 +       case ETH_SS_TEST:
14967 +               memcpy(data, *igb_gstrings_test,
14968 +                       IGB_TEST_LEN*ETH_GSTRING_LEN);
14969 +               break;
14970 +       case ETH_SS_STATS:
14971 +               for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
14972 +                       memcpy(p, igb_gstrings_stats[i].stat_string,
14973 +                              ETH_GSTRING_LEN);
14974 +                       p += ETH_GSTRING_LEN;
14975 +               }
14976 +               for (i = 0; i < adapter->num_tx_queues; i++) {
14977 +                       sprintf(p, "tx_queue_%u_packets", i);
14978 +                       p += ETH_GSTRING_LEN;
14979 +                       sprintf(p, "tx_queue_%u_bytes", i);
14980 +                       p += ETH_GSTRING_LEN;
14981 +               }
14982 +               for (i = 0; i < adapter->num_rx_queues; i++) {
14983 +                       sprintf(p, "rx_queue_%u_packets", i);
14984 +                       p += ETH_GSTRING_LEN;
14985 +                       sprintf(p, "rx_queue_%u_bytes", i);
14986 +                       p += ETH_GSTRING_LEN;
14987 +               }
14988 +/*             BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
14989 +               break;
14990 +       }
14991 +}
14992 +
14993 +static struct ethtool_ops igb_ethtool_ops = {
14994 +       .get_settings           = igb_get_settings,
14995 +       .set_settings           = igb_set_settings,
14996 +       .get_drvinfo            = igb_get_drvinfo,
14997 +       .get_regs_len           = igb_get_regs_len,
14998 +       .get_regs               = igb_get_regs,
14999 +       .get_wol                = igb_get_wol,
15000 +       .set_wol                = igb_set_wol,
15001 +       .get_msglevel           = igb_get_msglevel,
15002 +       .set_msglevel           = igb_set_msglevel,
15003 +       .nway_reset             = igb_nway_reset,
15004 +       .get_link               = ethtool_op_get_link,
15005 +       .get_eeprom_len         = igb_get_eeprom_len,
15006 +       .get_eeprom             = igb_get_eeprom,
15007 +       .set_eeprom             = igb_set_eeprom,
15008 +       .get_ringparam          = igb_get_ringparam,
15009 +       .set_ringparam          = igb_set_ringparam,
15010 +       .get_pauseparam         = igb_get_pauseparam,
15011 +       .set_pauseparam         = igb_set_pauseparam,
15012 +       .get_rx_csum            = igb_get_rx_csum,
15013 +       .set_rx_csum            = igb_set_rx_csum,
15014 +       .get_tx_csum            = igb_get_tx_csum,
15015 +       .set_tx_csum            = igb_set_tx_csum,
15016 +       .get_sg                 = ethtool_op_get_sg,
15017 +       .set_sg                 = ethtool_op_set_sg,
15018 +#ifdef NETIF_F_TSO
15019 +       .get_tso                = ethtool_op_get_tso,
15020 +       .set_tso                = igb_set_tso,
15021 +#endif
15022 +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
15023 +       .get_sset_count         = igb_get_sset_count,
15024 +#else
15025 +       .get_stats_count        = igb_get_stats_count,
15026 +       .self_test_count        = igb_diag_test_count,
15027 +#endif
15028 +       .self_test              = igb_diag_test,
15029 +       .get_strings            = igb_get_strings,
15030 +       .phys_id                = igb_phys_id,
15031 +       .get_ethtool_stats      = igb_get_ethtool_stats,
15032 +#ifdef ETHTOOL_GPERMADDR
15033 +       .get_perm_addr          = ethtool_op_get_perm_addr,
15034 +#endif
15035 +       .get_coalesce           = igb_get_coalesce,
15036 +       .set_coalesce           = igb_set_coalesce,
15037 +#ifdef NETIF_F_LRO
15038 +       .get_flags              = ethtool_op_get_flags,
15039 +       .set_flags              = ethtool_op_set_flags,
15040 +#endif
15041 +};
15042 +
15043 +void igb_set_ethtool_ops(struct net_device *netdev)
15044 +{
15045 +       SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
15046 +}
15047 +#endif /* SIOCETHTOOL */
15048 Index: linux-2.6.22/drivers/net/igb/igb_main.c
15049 ===================================================================
15050 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
15051 +++ linux-2.6.22/drivers/net/igb/igb_main.c     2009-12-18 12:39:22.000000000 -0500
15052 @@ -0,0 +1,6250 @@
15053 +/*******************************************************************************
15054 +
15055 +  Intel(R) Gigabit Ethernet Linux driver
15056 +  Copyright(c) 2007-2009 Intel Corporation.
15057 +
15058 +  This program is free software; you can redistribute it and/or modify it
15059 +  under the terms and conditions of the GNU General Public License,
15060 +  version 2, as published by the Free Software Foundation.
15061 +
15062 +  This program is distributed in the hope it will be useful, but WITHOUT
15063 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15064 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15065 +  more details.
15066 +
15067 +  You should have received a copy of the GNU General Public License along with
15068 +  this program; if not, write to the Free Software Foundation, Inc.,
15069 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15070 +
15071 +  The full GNU General Public License is included in this distribution in
15072 +  the file called "COPYING".
15073 +
15074 +  Contact Information:
15075 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
15076 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
15077 +
15078 +*******************************************************************************/
15079 +
15080 +#include <linux/module.h>
15081 +#include <linux/types.h>
15082 +#include <linux/init.h>
15083 +#include <linux/vmalloc.h>
15084 +#include <linux/pagemap.h>
15085 +#include <linux/netdevice.h>
15086 +#include <linux/tcp.h>
15087 +#ifdef NETIF_F_TSO
15088 +#include <net/checksum.h>
15089 +#ifdef NETIF_F_TSO6
15090 +#include <linux/ipv6.h>
15091 +#include <net/ip6_checksum.h>
15092 +#endif
15093 +#endif
15094 +#ifdef SIOCGMIIPHY
15095 +#include <linux/mii.h>
15096 +#endif
15097 +#ifdef SIOCETHTOOL
15098 +#include <linux/ethtool.h>
15099 +#endif
15100 +#include <linux/if_vlan.h>
15101 +
15102 +#include "igb.h"
15103 +
15104 +#define DRV_DEBUG
15105 +#define DRV_HW_PERF
15106 +#define VERSION_SUFFIX
15107 +
15108 +#define DRV_VERSION "2.0.6" VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF
15109 +
15110 +char igb_driver_name[] = "igb";
15111 +char igb_driver_version[] = DRV_VERSION;
15112 +static const char igb_driver_string[] =
15113 +                                "Intel(R) Gigabit Ethernet Network Driver";
15114 +static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
15115 +
15116 +static struct pci_device_id igb_pci_tbl[] = {
15117 +       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) },
15118 +       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) },
15119 +       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) },
15120 +       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) },
15121 +       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) },
15122 +       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) },
15123 +       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) },
15124 +       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) },
15125 +       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) },
15126 +       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) },
15127 +       /* required last entry */
15128 +       {0, }
15129 +};
15130 +
15131 +MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
15132 +
15133 +void igb_reset(struct igb_adapter *);
15134 +static int igb_setup_all_tx_resources(struct igb_adapter *);
15135 +static int igb_setup_all_rx_resources(struct igb_adapter *);
15136 +static void igb_free_all_tx_resources(struct igb_adapter *);
15137 +static void igb_free_all_rx_resources(struct igb_adapter *);
15138 +static void igb_setup_mrqc(struct igb_adapter *);
15139 +void igb_update_stats(struct igb_adapter *);
15140 +static int igb_probe(struct pci_dev *, const struct pci_device_id *);
15141 +static void __devexit igb_remove(struct pci_dev *pdev);
15142 +static int igb_sw_init(struct igb_adapter *);
15143 +static int igb_open(struct net_device *);
15144 +static int igb_close(struct net_device *);
15145 +static void igb_configure_tx(struct igb_adapter *);
15146 +static void igb_configure_rx(struct igb_adapter *);
15147 +static void igb_clean_all_tx_rings(struct igb_adapter *);
15148 +static void igb_clean_all_rx_rings(struct igb_adapter *);
15149 +static void igb_clean_tx_ring(struct igb_ring *);
15150 +static void igb_clean_rx_ring(struct igb_ring *);
15151 +static void igb_set_rx_mode(struct net_device *);
15152 +static void igb_update_phy_info(unsigned long);
15153 +static void igb_watchdog(unsigned long);
15154 +static void igb_watchdog_task(struct work_struct *);
15155 +static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
15156 +                                           struct igb_ring *);
15157 +static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
15158 +static struct net_device_stats *igb_get_stats(struct net_device *);
15159 +static int igb_change_mtu(struct net_device *, int);
15160 +static int igb_set_mac(struct net_device *, void *);
15161 +static void igb_set_uta(struct igb_adapter *adapter);
15162 +static irqreturn_t igb_intr(int irq, void *);
15163 +static irqreturn_t igb_intr_msi(int irq, void *);
15164 +static irqreturn_t igb_msix_other(int irq, void *);
15165 +static irqreturn_t igb_msix_ring(int irq, void *);
15166 +#ifdef IGB_DCA
15167 +static void igb_update_dca(struct igb_q_vector *);
15168 +static void igb_setup_dca(struct igb_adapter *);
15169 +#endif /* IGB_DCA */
15170 +static bool igb_clean_tx_irq(struct igb_q_vector *);
15171 +static int igb_poll(struct napi_struct *, int);
15172 +static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
15173 +#ifdef IGB_LRO
15174 +static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
15175 +#endif
15176 +static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
15177 +static void igb_tx_timeout(struct net_device *);
15178 +static void igb_reset_task(struct work_struct *);
15179 +static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
15180 +static void igb_vlan_rx_add_vid(struct net_device *, u16);
15181 +static void igb_vlan_rx_kill_vid(struct net_device *, u16);
15182 +static void igb_restore_vlan(struct igb_adapter *);
15183 +static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
15184 +static void igb_ping_all_vfs(struct igb_adapter *);
15185 +static void igb_msg_task(struct igb_adapter *);
15186 +static void igb_vmm_control(struct igb_adapter *);
15187 +static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
15188 +static void igb_vf_configuration(struct pci_dev *, unsigned int);
15189 +
15190 +#ifdef CONFIG_PM
15191 +static int igb_suspend(struct pci_dev *, pm_message_t);
15192 +static int igb_resume(struct pci_dev *);
15193 +#endif
15194 +#ifndef USE_REBOOT_NOTIFIER
15195 +static void igb_shutdown(struct pci_dev *);
15196 +#else
15197 +static int igb_notify_reboot(struct notifier_block *, unsigned long, void *);
15198 +static struct notifier_block igb_notifier_reboot = {
15199 +       .notifier_call  = igb_notify_reboot,
15200 +       .next           = NULL,
15201 +       .priority       = 0
15202 +};
15203 +#endif
15204 +#ifdef IGB_DCA
15205 +static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
15206 +static struct notifier_block dca_notifier = {
15207 +       .notifier_call  = igb_notify_dca,
15208 +       .next           = NULL,
15209 +       .priority       = 0
15210 +};
15211 +#endif
15212 +
15213 +#ifdef CONFIG_NET_POLL_CONTROLLER
15214 +/* for netdump / net console */
15215 +static void igb_netpoll (struct net_device *);
15216 +#endif
15217 +
15218 +#ifdef HAVE_PCI_ERS
15219 +static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
15220 +                     pci_channel_state_t);
15221 +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
15222 +static void igb_io_resume(struct pci_dev *);
15223 +
15224 +static struct pci_error_handlers igb_err_handler = {
15225 +       .error_detected = igb_io_error_detected,
15226 +       .slot_reset = igb_io_slot_reset,
15227 +       .resume = igb_io_resume,
15228 +};
15229 +#endif
15230 +
15231 +
15232 +static struct pci_driver igb_driver = {
15233 +       .name     = igb_driver_name,
15234 +       .id_table = igb_pci_tbl,
15235 +       .probe    = igb_probe,
15236 +       .remove   = __devexit_p(igb_remove),
15237 +#ifdef CONFIG_PM
15238 +       /* Power Managment Hooks */
15239 +       .suspend  = igb_suspend,
15240 +       .resume   = igb_resume,
15241 +#endif
15242 +#ifndef USE_REBOOT_NOTIFIER
15243 +       .shutdown = igb_shutdown,
15244 +#endif
15245 +#ifdef HAVE_PCI_ERS
15246 +       .err_handler = &igb_err_handler,
15247 +#endif
15248 +};
15249 +
15250 +MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
15251 +MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
15252 +MODULE_LICENSE("GPL");
15253 +MODULE_VERSION(DRV_VERSION);
15254 +
15255 +static void igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
15256 +{
15257 +       struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie;
15258 +       u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
15259 +       u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
15260 +       u32 vfta;
15261 +
15262 +       /*
15263 +        * if this is the management vlan the only option is to add it in so
15264 +        * that the management pass through will continue to work
15265 +        */
15266 +       if ((mng_cookie->status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
15267 +           (vid == mng_cookie->vlan_id))
15268 +               add = TRUE;
15269 +
15270 +       vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
15271 +       if (add)
15272 +               vfta |= mask;
15273 +       else
15274 +               vfta &= ~mask;
15275 +
15276 +       e1000_write_vfta(hw, index, vfta);
15277 +}
15278 +
15279 +#ifdef SIOCSHWTSTAMP
15280 +/**
15281 + * igb_read_clock - read raw cycle counter (to be used by time counter)
15282 + */
15283 +static cycle_t igb_read_clock(const struct cyclecounter *tc)
15284 +{
15285 +       struct igb_adapter *adapter =
15286 +               container_of(tc, struct igb_adapter, cycles);
15287 +       struct e1000_hw *hw = &adapter->hw;
15288 +       u64 stamp = 0;
15289 +       int shift = 0;
15290 +
15291 +       stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIML) << shift;
15292 +       stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIMH) << (shift + 32);
15293 +       return stamp;
15294 +}
15295 +
15296 +#endif /* SIOCSHWTSTAMP */
15297 +static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
15298 +module_param(debug, int, 0);
15299 +MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)");
15300 +
15301 +/**
15302 + * igb_init_module - Driver Registration Routine
15303 + *
15304 + * igb_init_module is the first routine called when the driver is
15305 + * loaded. All it does is register with the PCI subsystem.
15306 + **/
15307 +static int __init igb_init_module(void)
15308 +{
15309 +       int ret;
15310 +       printk(KERN_INFO "%s - version %s\n",
15311 +              igb_driver_string, igb_driver_version);
15312 +
15313 +       printk(KERN_INFO "%s\n", igb_copyright);
15314 +
15315 +#ifdef IGB_DCA
15316 +       dca_register_notify(&dca_notifier);
15317 +#endif
15318 +       ret = pci_register_driver(&igb_driver);
15319 +#ifdef USE_REBOOT_NOTIFIER
15320 +       if (ret >= 0) {
15321 +               register_reboot_notifier(&igb_notifier_reboot);
15322 +       }
15323 +#endif
15324 +       return ret;
15325 +}
15326 +
15327 +module_init(igb_init_module);
15328 +
15329 +/**
15330 + * igb_exit_module - Driver Exit Cleanup Routine
15331 + *
15332 + * igb_exit_module is called just before the driver is removed
15333 + * from memory.
15334 + **/
15335 +static void __exit igb_exit_module(void)
15336 +{
15337 +#ifdef IGB_DCA
15338 +       dca_unregister_notify(&dca_notifier);
15339 +#endif
15340 +#ifdef USE_REBOOT_NOTIFIER
15341 +       unregister_reboot_notifier(&igb_notifier_reboot);
15342 +#endif
15343 +       pci_unregister_driver(&igb_driver);
15344 +}
15345 +
15346 +module_exit(igb_exit_module);
15347 +
15348 +/**
15349 + * igb_cache_ring_register - Descriptor ring to register mapping
15350 + * @adapter: board private structure to initialize
15351 + *
15352 + * Once we know the feature-set enabled for the device, we'll cache
15353 + * the register offset the descriptor ring is assigned to.
15354 + **/
15355 +static void igb_cache_ring_register(struct igb_adapter *adapter)
15356 +{
15357 +       int i = 0, j = 0;
15358 +       u32 rbase_offset = adapter->vfs_allocated_count;
15359 +
15360 +       switch (adapter->hw.mac.type) {
15361 +       case e1000_82576:
15362 +               /* The queues are allocated for virtualization such that VF 0
15363 +                * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
15364 +                * In order to avoid collision we start at the first free queue
15365 +                * and continue consuming queues in the same sequence
15366 +                */
15367 +               if ((adapter->RSS_queues > 1) && adapter->VMDQ_queues) {
15368 +                       for (; i < adapter->RSS_queues; i++)
15369 +                               adapter->rx_ring[i].reg_idx = rbase_offset +
15370 +                                       ((i & 0x1) << 3) + (i >> 1);
15371 +#ifdef HAVE_TX_MQ
15372 +                       for (; j < adapter->RSS_queues; j++)
15373 +                               adapter->tx_ring[j].reg_idx = rbase_offset +
15374 +                                       ((j & 0x1) << 3) + (j >> 1);
15375 +#endif
15376 +               }
15377 +       case e1000_82575:
15378 +       default:
15379 +               for (; i < adapter->num_rx_queues; i++)
15380 +                       adapter->rx_ring[i].reg_idx = rbase_offset + i;
15381 +               for (; j < adapter->num_tx_queues; j++)
15382 +                       adapter->tx_ring[j].reg_idx = rbase_offset + j;
15383 +               break;
15384 +       }
15385 +}
15386 +
15387 +static void igb_free_queues(struct igb_adapter *adapter)
15388 +{
15389 +       kfree(adapter->tx_ring);
15390 +       kfree(adapter->rx_ring);
15391 +
15392 +       adapter->tx_ring = NULL;
15393 +       adapter->rx_ring = NULL;
15394 +
15395 +       adapter->num_rx_queues = 0;
15396 +       adapter->num_tx_queues = 0;
15397 +
15398 +}
15399 +
15400 +/**
15401 + * igb_alloc_queues - Allocate memory for all rings
15402 + * @adapter: board private structure to initialize
15403 + *
15404 + * We allocate one ring per queue at run-time since we don't know the
15405 + * number of queues at compile-time.
15406 + **/
15407 +static int igb_alloc_queues(struct igb_adapter *adapter)
15408 +{
15409 +       int i;
15410 +
15411 +       adapter->tx_ring = kcalloc(adapter->num_tx_queues,
15412 +                                  sizeof(struct igb_ring), GFP_KERNEL);
15413 +       if (!adapter->tx_ring)
15414 +               goto err;
15415 +
15416 +       adapter->rx_ring = kcalloc(adapter->num_rx_queues,
15417 +                                  sizeof(struct igb_ring), GFP_KERNEL);
15418 +       if (!adapter->rx_ring)
15419 +               goto err;
15420 +
15421 +       for (i = 0; i < adapter->num_tx_queues; i++) {
15422 +               struct igb_ring *ring = &(adapter->tx_ring[i]);
15423 +               ring->count = adapter->tx_ring_count;
15424 +               ring->queue_index = i;
15425 +               ring->pdev = adapter->pdev;
15426 +               /* For 82575, context index must be unique per ring. */
15427 +               if (adapter->hw.mac.type == e1000_82575)
15428 +                       ring->ctx_idx = i << 4;
15429 +
15430 +       }
15431 +       for (i = 0; i < adapter->num_rx_queues; i++) {
15432 +               struct igb_ring *ring = &(adapter->rx_ring[i]);
15433 +               ring->count = adapter->rx_ring_count;
15434 +               ring->queue_index = i;
15435 +               ring->pdev = adapter->pdev;
15436 +               ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
15437 +               ring->rx_ps_hdr_size = 0; /* disable packet split */
15438 +               ring->rx_csum = true;     /* enable rx checksum */
15439 +
15440 +#ifdef IGB_LRO
15441 +               /* Intitial LRO Settings */
15442 +               ring->lro_mgr.max_aggr = adapter->lro_max_aggr;
15443 +               ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
15444 +               ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
15445 +               ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
15446 +               ring->lro_mgr.dev = adapter->netdev;
15447 +               ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
15448 +               ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
15449 +#endif
15450 +       }
15451 +
15452 +       igb_cache_ring_register(adapter);
15453 +
15454 +       return E1000_SUCCESS;
15455 +
15456 +err:
15457 +       igb_free_queues(adapter);
15458 +
15459 +       return -ENOMEM;
15460 +}
15461 +
15462 +static void igb_configure_lli(struct igb_adapter *adapter)
15463 +{
15464 +       struct e1000_hw *hw = &adapter->hw;
15465 +       u16 port;
15466 +
15467 +       /* LLI should only be enabled for MSI-X or MSI interrupts */
15468 +       if (!adapter->msix_entries && !(adapter->flags & IGB_FLAG_HAS_MSI))
15469 +               return;
15470 +
15471 +       if (adapter->lli_port) {
15472 +               /* use filter 0 for port */
15473 +               port = htons((u16)adapter->lli_port);
15474 +               E1000_WRITE_REG(hw, E1000_IMIR(0),
15475 +                       (port | E1000_IMIR_PORT_IM_EN));
15476 +               E1000_WRITE_REG(hw, E1000_IMIREXT(0),
15477 +                       (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
15478 +       }
15479 +
15480 +       if (adapter->flags & IGB_FLAG_LLI_PUSH) {
15481 +               /* use filter 1 for push flag */
15482 +               E1000_WRITE_REG(hw, E1000_IMIR(1),
15483 +                       (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
15484 +               E1000_WRITE_REG(hw, E1000_IMIREXT(1),
15485 +                       (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH));
15486 +       }
15487 +
15488 +       if (adapter->lli_size) {
15489 +               /* use filter 2 for size */
15490 +               E1000_WRITE_REG(hw, E1000_IMIR(2),
15491 +                       (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
15492 +               E1000_WRITE_REG(hw, E1000_IMIREXT(2),
15493 +                       (adapter->lli_size | E1000_IMIREXT_CTRL_BP));
15494 +       }
15495 +
15496 +}
15497 +
15498 +#define IGB_N0_QUEUE -1
15499 +static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
15500 +{
15501 +       u32 msixbm = 0;
15502 +       struct igb_adapter *adapter = q_vector->adapter;
15503 +       struct e1000_hw *hw = &adapter->hw;
15504 +       u32 ivar, index;
15505 +       int rx_queue = IGB_N0_QUEUE;
15506 +       int tx_queue = IGB_N0_QUEUE;
15507 +
15508 +       if (q_vector->rx_ring)
15509 +               rx_queue = q_vector->rx_ring->reg_idx;
15510 +       if (q_vector->tx_ring)
15511 +               tx_queue = q_vector->tx_ring->reg_idx;
15512 +
15513 +       switch (hw->mac.type) {
15514 +       case e1000_82575:
15515 +               /* The 82575 assigns vectors using a bitmask, which matches the
15516 +                  bitmask for the EICR/EIMS/EIMC registers.  To assign one
15517 +                  or more queues to a vector, we write the appropriate bits
15518 +                  into the MSIXBM register for that vector. */
15519 +               if (rx_queue > IGB_N0_QUEUE)
15520 +                       msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
15521 +               if (tx_queue > IGB_N0_QUEUE)
15522 +                       msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
15523 +               E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm);
15524 +               q_vector->eims_value = msixbm;
15525 +               break;
15526 +       case e1000_82576:
15527 +               /* 82576 uses a table-based method for assigning vectors.
15528 +                  Each queue has a single entry in the table to which we write
15529 +                  a vector number along with a "valid" bit.  Sadly, the layout
15530 +                  of the table is somewhat counterintuitive. */
15531 +               if (rx_queue > IGB_N0_QUEUE) {
15532 +                       index = (rx_queue & 0x7);
15533 +                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
15534 +                       if (rx_queue < 8) {
15535 +                               /* vector goes into low byte of register */
15536 +                               ivar = ivar & 0xFFFFFF00;
15537 +                               ivar |= msix_vector | E1000_IVAR_VALID;
15538 +                       } else {
15539 +                               /* vector goes into third byte of register */
15540 +                               ivar = ivar & 0xFF00FFFF;
15541 +                               ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
15542 +                       }
15543 +                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
15544 +               }
15545 +               if (tx_queue > IGB_N0_QUEUE) {
15546 +                       index = (tx_queue & 0x7);
15547 +                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
15548 +                       if (tx_queue < 8) {
15549 +                               /* vector goes into second byte of register */
15550 +                               ivar = ivar & 0xFFFF00FF;
15551 +                               ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
15552 +                       } else {
15553 +                               /* vector goes into high byte of register */
15554 +                               ivar = ivar & 0x00FFFFFF;
15555 +                               ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
15556 +                       }
15557 +                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
15558 +               }
15559 +               q_vector->eims_value = 1 << msix_vector;
15560 +               break;
15561 +       default:
15562 +               BUG();
15563 +               break;
15564 +       }
15565 +}
15566 +
15567 +/**
15568 + * igb_configure_msix - Configure MSI-X hardware
15569 + *
15570 + * igb_configure_msix sets up the hardware to properly
15571 + * generate MSI-X interrupts.
15572 + **/
15573 +static void igb_configure_msix(struct igb_adapter *adapter)
15574 +{
15575 +       u32 tmp;
15576 +       int i, vector = 0;
15577 +       struct e1000_hw *hw = &adapter->hw;
15578 +
15579 +       adapter->eims_enable_mask = 0;
15580 +
15581 +       /* set vector for other causes, i.e. link changes */
15582 +       switch (hw->mac.type) {
15583 +       case e1000_82575:
15584 +               tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
15585 +               /* enable MSI-X PBA support*/
15586 +               tmp |= E1000_CTRL_EXT_PBA_CLR;
15587 +
15588 +               /* Auto-Mask interrupts upon ICR read. */
15589 +               tmp |= E1000_CTRL_EXT_EIAME;
15590 +               tmp |= E1000_CTRL_EXT_IRCA;
15591 +
15592 +               E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
15593 +
15594 +               /* enable msix_other interrupt */
15595 +               E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++,
15596 +                                     E1000_EIMS_OTHER);
15597 +               adapter->eims_other = E1000_EIMS_OTHER;
15598 +
15599 +               break;
15600 +
15601 +       case e1000_82576:
15602 +               /* Turn on MSI-X capability first, or our settings
15603 +                * won't stick.  And it will take days to debug. */
15604 +               E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
15605 +                               E1000_GPIE_PBA | E1000_GPIE_EIAME |
15606 +                               E1000_GPIE_NSICR);
15607 +
15608 +               /* enable msix_other interrupt */
15609 +               adapter->eims_other = 1 << vector;
15610 +               tmp = (vector++ | E1000_IVAR_VALID) << 8;
15611 +
15612 +               E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmp);
15613 +               break;
15614 +       default:
15615 +               /* do nothing, since nothing else supports MSI-X */
15616 +               break;
15617 +       } /* switch (hw->mac.type) */
15618 +
15619 +       adapter->eims_enable_mask |= adapter->eims_other;
15620 +
15621 +       for (i = 0; i < adapter->num_q_vectors; i++) {
15622 +               struct igb_q_vector *q_vector = adapter->q_vector[i];
15623 +               igb_assign_vector(q_vector, vector++);
15624 +               adapter->eims_enable_mask |= q_vector->eims_value;
15625 +       }
15626 +
15627 +       E1000_WRITE_FLUSH(hw);
15628 +}
15629 +
15630 +/**
15631 + * igb_request_msix - Initialize MSI-X interrupts
15632 + *
15633 + * igb_request_msix allocates MSI-X vectors and requests interrupts from the
15634 + * kernel.
15635 + **/
15636 +static int igb_request_msix(struct igb_adapter *adapter)
15637 +{
15638 +       struct net_device *netdev = adapter->netdev;
15639 +       struct e1000_hw *hw = &adapter->hw;
15640 +       int i, err = 0, vector = 0;
15641 +
15642 +       err = request_irq(adapter->msix_entries[vector].vector,
15643 +                         &igb_msix_other, 0, netdev->name, adapter);
15644 +       if (err)
15645 +               goto out;
15646 +       vector++;
15647 +
15648 +       for (i = 0; i < adapter->num_q_vectors; i++) {
15649 +               struct igb_q_vector *q_vector = adapter->q_vector[i];
15650 +
15651 +               q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
15652 +
15653 +               if (q_vector->rx_ring && q_vector->tx_ring)
15654 +                       sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
15655 +                               q_vector->rx_ring->queue_index);
15656 +               else if (q_vector->tx_ring)
15657 +                       sprintf(q_vector->name, "%s-tx-%u", netdev->name,
15658 +                               q_vector->tx_ring->queue_index);
15659 +               else if (q_vector->rx_ring)
15660 +                       sprintf(q_vector->name, "%s-rx-%u", netdev->name,
15661 +                               q_vector->rx_ring->queue_index);
15662 +               else
15663 +                       sprintf(q_vector->name, "%s-unused", netdev->name);
15664 +
15665 +               err = request_irq(adapter->msix_entries[vector].vector,
15666 +                                 &igb_msix_ring, 0, q_vector->name,
15667 +                                 q_vector);
15668 +               if (err)
15669 +                       goto out;
15670 +               vector++;
15671 +       }
15672 +
15673 +       igb_configure_msix(adapter);
15674 +       return 0;
15675 +out:
15676 +       return err;
15677 +}
15678 +
15679 +static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
15680 +{
15681 +       if (adapter->msix_entries) {
15682 +               pci_disable_msix(adapter->pdev);
15683 +               kfree(adapter->msix_entries);
15684 +               adapter->msix_entries = NULL;
15685 +       } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
15686 +               pci_disable_msi(adapter->pdev);
15687 +       }
15688 +
15689 +       adapter->num_rx_queues = 0;
15690 +       adapter->num_tx_queues = 0;
15691 +
15692 +       return;
15693 +}
15694 +
15695 +/**
15696 + * igb_free_q_vectors - Free memory allocated for interrupt vectors
15697 + * @adapter: board private structure to initialize
15698 + *
15699 + * This function frees the memory allocated to the q_vectors.  In addition if
15700 + * NAPI is enabled it will delete any references to the NAPI struct prior
15701 + * to freeing the q_vector.
15702 + **/
15703 +static void igb_free_q_vectors(struct igb_adapter *adapter)
15704 +{
15705 +       int v_idx;
15706 +
15707 +       for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
15708 +               struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
15709 +               adapter->q_vector[v_idx] = NULL;
15710 +               netif_napi_del(&q_vector->napi);
15711 +               kfree(q_vector);
15712 +       }
15713 +       adapter->num_q_vectors = 0;
15714 +}
15715 +
15716 +/**
15717 + * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
15718 + *
15719 + * This function resets the device so that it has 0 rx queues, tx queues, and
15720 + * MSI-X interrupts allocated.
15721 + */
15722 +static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
15723 +{
15724 +       igb_free_queues(adapter);
15725 +       igb_free_q_vectors(adapter);
15726 +       igb_reset_interrupt_capability(adapter);
15727 +}
15728 +
15729 +/**
15730 + * igb_set_interrupt_capability - set MSI or MSI-X if supported
15731 + *
15732 + * Attempt to configure interrupts using the best available
15733 + * capabilities of the hardware and kernel.
15734 + **/
15735 +static void igb_set_interrupt_capability(struct igb_adapter *adapter)
15736 +{
15737 +       int err;
15738 +       int numvecs, i;
15739 +
15740 +       /* Number of supported queues. */
15741 +       adapter->num_rx_queues = adapter->RSS_queues;
15742 +
15743 +       if (adapter->VMDQ_queues > 1)
15744 +               adapter->num_rx_queues += adapter->VMDQ_queues - 1;
15745 +
15746 +#ifdef HAVE_TX_MQ
15747 +       adapter->num_tx_queues = adapter->num_rx_queues;
15748 +#else
15749 +       adapter->num_tx_queues = max_t(u32, 1, adapter->VMDQ_queues);
15750 +#endif
15751 +
15752 +       switch (adapter->int_mode) {
15753 +       case IGB_INT_MODE_MSIX:
15754 +               /* start with one vector for every rx queue */
15755 +               numvecs = adapter->num_rx_queues;
15756 +
15757 +               /* if tx handler is seperate add 1 for every tx queue */
15758 +               if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
15759 +                       numvecs += adapter->num_tx_queues;
15760 +
15761 +               /* store the number of vectors reserved for queues */
15762 +               adapter->num_q_vectors = numvecs;
15763 +
15764 +               /* add 1 vector for link status interrupts */
15765 +               numvecs++;
15766 +               adapter->msix_entries = kcalloc(numvecs,
15767 +                                               sizeof(struct msix_entry),
15768 +                                               GFP_KERNEL);
15769 +               if (adapter->msix_entries) {
15770 +                       for (i = 0; i < numvecs; i++)
15771 +                               adapter->msix_entries[i].entry = i;
15772 +
15773 +                       err = pci_enable_msix(adapter->pdev,
15774 +                                             adapter->msix_entries, numvecs);
15775 +                       if (err == 0)
15776 +                               break;
15777 +               }
15778 +               /* MSI-X failed, so fall through and try MSI */
15779 +               DPRINTK(PROBE, WARNING, "Failed to initialize MSI-X interrupts."
15780 +                       "  Falling back to MSI interrupts.\n");
15781 +               igb_reset_interrupt_capability(adapter);
15782 +       case IGB_INT_MODE_MSI:
15783 +               if (!pci_enable_msi(adapter->pdev))
15784 +                       adapter->flags |= IGB_FLAG_HAS_MSI;
15785 +               else
15786 +                       DPRINTK(PROBE, WARNING, "Failed to initialize MSI "
15787 +                               "interrupts. Falling back to legacy interrupts.\n");
15788 +               /* Fall through */
15789 +       case IGB_INT_MODE_LEGACY:
15790 +               /* disable advanced features and set number of queues to 1 */
15791 +               adapter->vfs_allocated_count = 0;
15792 +               adapter->VMDQ_queues = 0;
15793 +               adapter->RSS_queues = 1;
15794 +               adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
15795 +               adapter->num_rx_queues = 1;
15796 +               adapter->num_tx_queues = 1;
15797 +               adapter->num_q_vectors = 1;
15798 +               /* Don't do anything; this is system default */
15799 +               break;
15800 +       }
15801 +
15802 +#ifdef HAVE_TX_MQ
15803 +       /* Notify the stack of the (possibly) reduced Tx Queue count. */
15804 +#ifdef CONFIG_NETDEVICES_MULTIQUEUE
15805 +       adapter->netdev->egress_subqueue_count =
15806 +               min_t(u32, adapter->num_tx_queues, adapter->RSS_queues);
15807 +#else
15808 +       adapter->netdev->real_num_tx_queues =
15809 +               min_t(u32, adapter->num_tx_queues, adapter->RSS_queues);
15810 +#endif
15811 +#endif
15812 +
15813 +       return;
15814 +}
15815 +
15816 +/**
15817 + * igb_alloc_q_vectors - Allocate memory for interrupt vectors
15818 + * @adapter: board private structure to initialize
15819 + *
15820 + * We allocate one q_vector per queue interrupt.  If allocation fails we
15821 + * return -ENOMEM.
15822 + **/
15823 +static int igb_alloc_q_vectors(struct igb_adapter *adapter)
15824 +{
15825 +       struct igb_q_vector *q_vector;
15826 +       struct e1000_hw *hw = &adapter->hw;
15827 +       int v_idx;
15828 +
15829 +       for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
15830 +               q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
15831 +               if (!q_vector)
15832 +                       goto err_out;
15833 +               q_vector->adapter = adapter;
15834 +               q_vector->itr_val = adapter->itr;
15835 +               q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
15836 +               q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
15837 +               q_vector->set_itr = 1;
15838 +               netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
15839 +               adapter->q_vector[v_idx] = q_vector;
15840 +       }
15841 +       return 0;
15842 +
15843 +err_out:
15844 +       while (v_idx) {
15845 +               v_idx--;
15846 +               q_vector = adapter->q_vector[v_idx];
15847 +               netif_napi_del(&q_vector->napi);
15848 +               kfree(q_vector);
15849 +               adapter->q_vector[v_idx] = NULL;
15850 +       }
15851 +       return -ENOMEM;
15852 +}
15853 +
15854 +/**
15855 + * igb_map_ring_to_vector - maps allocated queues to vectors
15856 + *
15857 + * This function maps the recently allocated queues to vectors.
15858 + **/
15859 +static int igb_map_ring_to_vector(struct igb_adapter *adapter)
15860 +{
15861 +       struct igb_q_vector *q_vector;
15862 +       int i;
15863 +       int v_idx = 0;
15864 +
15865 +       if ((adapter->num_q_vectors  < adapter->num_rx_queues) ||
15866 +           (adapter->num_q_vectors < adapter->num_tx_queues))
15867 +               return -ENOMEM;
15868 +
15869 +       if (adapter->num_q_vectors == (adapter->num_rx_queues + adapter->num_tx_queues)) {
15870 +               for (i = 0; i < adapter->num_tx_queues; i++) {
15871 +                       q_vector = adapter->q_vector[v_idx++];
15872 +                       adapter->tx_ring[i].q_vector = q_vector;
15873 +                       q_vector->tx_ring = &adapter->tx_ring[i];
15874 +               }
15875 +               for (i = 0; i < adapter->num_rx_queues; i++) {
15876 +                       q_vector = adapter->q_vector[v_idx++];
15877 +                       adapter->rx_ring[i].q_vector = q_vector;
15878 +                       q_vector->rx_ring = &adapter->rx_ring[i];
15879 +                       q_vector->rx_ring->q_vector = q_vector;
15880 +               }
15881 +       } else {
15882 +               for (i = 0; i < adapter->num_rx_queues; i++) {
15883 +                       q_vector = adapter->q_vector[v_idx++];
15884 +                       adapter->rx_ring[i].q_vector = q_vector;
15885 +                       q_vector->rx_ring = &adapter->rx_ring[i];
15886 +                       if (i < adapter->num_tx_queues) {
15887 +                               adapter->tx_ring[i].q_vector = q_vector;
15888 +                               q_vector->tx_ring = &adapter->tx_ring[i];
15889 +                       }
15890 +               }
15891 +               for (; i < adapter->num_tx_queues; i++) {
15892 +                       q_vector = adapter->q_vector[v_idx++];
15893 +                       adapter->tx_ring[i].q_vector = q_vector;
15894 +                       q_vector->tx_ring = &adapter->tx_ring[i];
15895 +               }
15896 +       }
15897 +       return 0;
15898 +}
15899 +
15900 +/**
15901 + * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
15902 + *
15903 + * This function initializes the interrupts and allocates all of the queues.
15904 + **/
15905 +static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
15906 +{
15907 +       int err;
15908 +
15909 +       igb_set_interrupt_capability(adapter);
15910 +
15911 +       err = igb_alloc_q_vectors(adapter);
15912 +       if (err) {
15913 +               DPRINTK(PROBE, ERR, "Unable to allocate memory for q_vectors\n");
15914 +               goto err_alloc_q_vectors;
15915 +       }
15916 +
15917 +       err = igb_alloc_queues(adapter);
15918 +       if (err) {
15919 +               DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
15920 +               goto err_alloc_queues;
15921 +       }
15922 +
15923 +       err = igb_map_ring_to_vector(adapter);
15924 +       if (err) {
15925 +               DPRINTK(PROBE, ERR, "Invalid q_vector to ring mapping\n");
15926 +               goto err_map_queues;
15927 +       }
15928 +
15929 +
15930 +       return 0;
15931 +err_map_queues:
15932 +       igb_free_queues(adapter);
15933 +err_alloc_queues:
15934 +       igb_free_q_vectors(adapter);
15935 +err_alloc_q_vectors:
15936 +       igb_reset_interrupt_capability(adapter);
15937 +       return err;
15938 +}
15939 +
15940 +/**
15941 + * igb_request_irq - initialize interrupts
15942 + *
15943 + * Attempts to configure interrupts using the best available
15944 + * capabilities of the hardware and kernel.
15945 + **/
15946 +static int igb_request_irq(struct igb_adapter *adapter)
15947 +{
15948 +       struct net_device *netdev = adapter->netdev;
15949 +       struct e1000_hw *hw = &adapter->hw;
15950 +       int err = 0;
15951 +
15952 +       if (adapter->msix_entries) {
15953 +               err = igb_request_msix(adapter);
15954 +               if (!err)
15955 +                       goto request_done;
15956 +               /* fall back to MSI */
15957 +               igb_clear_interrupt_scheme(adapter);
15958 +               if (!pci_enable_msi(adapter->pdev))
15959 +                       adapter->flags |= IGB_FLAG_HAS_MSI;
15960 +               igb_free_all_tx_resources(adapter);
15961 +               igb_free_all_rx_resources(adapter);
15962 +               adapter->num_tx_queues = 1;
15963 +               adapter->num_rx_queues = 1;
15964 +               adapter->num_q_vectors = 1;
15965 +               err = igb_alloc_q_vectors(adapter);
15966 +               if (err) {
15967 +                       DPRINTK(PROBE, ERR, "Unable to allocate memory for q_vectors\n");
15968 +                       goto request_done;
15969 +               }
15970 +               err = igb_alloc_queues(adapter);
15971 +               if (err) {
15972 +                       DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
15973 +                       igb_free_q_vectors(adapter);
15974 +                       goto request_done;
15975 +               }
15976 +               igb_setup_all_tx_resources(adapter);
15977 +               igb_setup_all_rx_resources(adapter);
15978 +       } else {
15979 +               switch (hw->mac.type) {
15980 +               case e1000_82575:
15981 +                       E1000_WRITE_REG(hw, E1000_MSIXBM(0),
15982 +                                       (E1000_EICR_RX_QUEUE0 |
15983 +                                        E1000_EICR_TX_QUEUE0 |
15984 +                                        E1000_EIMS_OTHER));
15985 +                       break;
15986 +               case e1000_82576:
15987 +                       E1000_WRITE_REG(hw, E1000_IVAR0, E1000_IVAR_VALID);
15988 +                       break;
15989 +               default:
15990 +                       break;
15991 +               }
15992 +       }
15993 +       if (adapter->flags & IGB_FLAG_HAS_MSI) {
15994 +               err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
15995 +                                 netdev->name, adapter);
15996 +               if (!err)
15997 +                       goto request_done;
15998 +
15999 +               /* fall back to legacy interrupts */
16000 +               igb_reset_interrupt_capability(adapter);
16001 +               adapter->flags &= ~IGB_FLAG_HAS_MSI;
16002 +       }
16003 +
16004 +       err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
16005 +                         netdev->name, adapter);
16006 +
16007 +       if (err) {
16008 +               DPRINTK(PROBE, ERR, "Error %d getting interrupt\n", err);
16009 +               goto request_done;
16010 +       }
16011 +
16012 +request_done:
16013 +       return err;
16014 +}
16015 +
16016 +static void igb_free_irq(struct igb_adapter *adapter)
16017 +{
16018 +       if (adapter->msix_entries) {
16019 +               int vector = 0, i;
16020 +
16021 +               free_irq(adapter->msix_entries[vector++].vector, adapter);
16022 +
16023 +               for (i = 0; i < adapter->num_q_vectors; i++) {
16024 +                       struct igb_q_vector *q_vector = adapter->q_vector[i];
16025 +                       free_irq(adapter->msix_entries[vector++].vector,
16026 +                                q_vector);
16027 +               }
16028 +       } else {
16029 +               free_irq(adapter->pdev->irq, adapter);
16030 +       }
16031 +}
16032 +
16033 +/**
16034 + * igb_irq_disable - Mask off interrupt generation on the NIC
16035 + * @adapter: board private structure
16036 + **/
16037 +static void igb_irq_disable(struct igb_adapter *adapter)
16038 +{
16039 +       struct e1000_hw *hw = &adapter->hw;
16040 +
16041 +       /*
16042 +        * we need to be careful when disabling interrupts.  The VFs are also
16043 +        * mapped into these registers and so clearing the bits can cause
16044 +        * issues on the VF drivers so we only need to clear what we set
16045 +        */
16046 +       if (adapter->msix_entries) {
16047 +               u32 regval = E1000_READ_REG(hw, E1000_EIAM);
16048 +               regval &= ~adapter->eims_enable_mask;
16049 +               E1000_WRITE_REG(hw, E1000_EIAM, regval);
16050 +               E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask);
16051 +               regval = E1000_READ_REG(hw, E1000_EIAC);
16052 +               regval &= ~adapter->eims_enable_mask;
16053 +               E1000_WRITE_REG(hw, E1000_EIAC, regval);
16054 +       }
16055 +
16056 +       E1000_WRITE_REG(hw, E1000_IAM, 0);
16057 +       E1000_WRITE_REG(hw, E1000_IMC, ~0);
16058 +       E1000_WRITE_FLUSH(hw);
16059 +
16060 +       synchronize_irq(adapter->pdev->irq);
16061 +}
16062 +
16063 +/**
16064 + * igb_irq_enable - Enable default interrupt generation settings
16065 + * @adapter: board private structure
16066 + **/
16067 +static void igb_irq_enable(struct igb_adapter *adapter)
16068 +{
16069 +       struct e1000_hw *hw = &adapter->hw;
16070 +
16071 +       if (adapter->msix_entries) {
16072 +               u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
16073 +               u32 regval = E1000_READ_REG(hw, E1000_EIAC);
16074 +               E1000_WRITE_REG(hw, E1000_EIAC, 
16075 +                               regval | adapter->eims_enable_mask);
16076 +               regval = E1000_READ_REG(hw, E1000_EIAM);
16077 +               E1000_WRITE_REG(hw, E1000_EIAM,
16078 +                               regval | adapter->eims_enable_mask);
16079 +               E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask);
16080 +               if (adapter->vfs_allocated_count) {
16081 +                       E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF);
16082 +                       ims |= E1000_IMS_VMMB;
16083 +               }
16084 +               E1000_WRITE_REG(hw, E1000_IMS, ims);
16085 +       } else {
16086 +               E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
16087 +               E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK);
16088 +       }
16089 +}
16090 +
16091 +static void igb_update_mng_vlan(struct igb_adapter *adapter)
16092 +{
16093 +       struct e1000_hw *hw = &adapter->hw;
16094 +       u16 vid = adapter->hw.mng_cookie.vlan_id;
16095 +       u16 old_vid = adapter->mng_vlan_id;
16096 +
16097 +       if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
16098 +               /* add VID to filter table */
16099 +               igb_vfta_set(hw, vid, TRUE);
16100 +               adapter->mng_vlan_id = vid;
16101 +       } else {
16102 +               adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
16103 +       }
16104 +
16105 +       if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
16106 +           (vid != old_vid) &&
16107 +           !vlan_group_get_device(adapter->vlgrp, old_vid)) {
16108 +               /* remove VID from filter table */
16109 +               igb_vfta_set(hw, old_vid, FALSE);
16110 +       }
16111 +}
16112 +
16113 +/**
16114 + * igb_release_hw_control - release control of the h/w to f/w
16115 + * @adapter: address of board private structure
16116 + *
16117 + * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
16118 + * For ASF and Pass Through versions of f/w this means that the
16119 + * driver is no longer loaded.
16120 + *
16121 + **/
16122 +static void igb_release_hw_control(struct igb_adapter *adapter)
16123 +{
16124 +       struct e1000_hw *hw = &adapter->hw;
16125 +       u32 ctrl_ext;
16126 +
16127 +       /* Let firmware take over control of h/w */
16128 +       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
16129 +       E1000_WRITE_REG(hw, E1000_CTRL_EXT,
16130 +                       ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
16131 +}
16132 +
16133 +/**
16134 + * igb_get_hw_control - get control of the h/w from f/w
16135 + * @adapter: address of board private structure
16136 + *
16137 + * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
16138 + * For ASF and Pass Through versions of f/w this means that
16139 + * the driver is loaded.
16140 + *
16141 + **/
16142 +static void igb_get_hw_control(struct igb_adapter *adapter)
16143 +{
16144 +       struct e1000_hw *hw = &adapter->hw;
16145 +       u32 ctrl_ext;
16146 +
16147 +       /* Let firmware know the driver has taken over */
16148 +       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
16149 +       E1000_WRITE_REG(hw, E1000_CTRL_EXT,
16150 +                       ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
16151 +}
16152 +
16153 +/**
16154 + * igb_configure - configure the hardware for RX and TX
16155 + * @adapter: private board structure
16156 + **/
16157 +static void igb_configure(struct igb_adapter *adapter)
16158 +{
16159 +       struct net_device *netdev = adapter->netdev;
16160 +       int i;
16161 +
16162 +       igb_get_hw_control(adapter);
16163 +       igb_set_rx_mode(netdev);
16164 +
16165 +       igb_restore_vlan(adapter);
16166 +
16167 +       igb_setup_tctl(adapter);
16168 +       igb_setup_mrqc(adapter);
16169 +       igb_setup_rctl(adapter);
16170 +
16171 +       igb_configure_tx(adapter);
16172 +       igb_configure_rx(adapter);
16173 +
16174 +       e1000_rx_fifo_flush_82575(&adapter->hw);
16175 +#ifdef CONFIG_NETDEVICES_MULTIQUEUE
16176 +       if (adapter->num_tx_queues > 1)
16177 +               netdev->features |= NETIF_F_MULTI_QUEUE;
16178 +       else
16179 +               netdev->features &= ~NETIF_F_MULTI_QUEUE;
16180 +
16181 +#endif
16182 +       /* call IGB_DESC_UNUSED which always leaves
16183 +        * at least 1 descriptor unused to make sure
16184 +        * next_to_use != next_to_clean */
16185 +       for (i = 0; i < adapter->num_rx_queues; i++) {
16186 +               struct igb_ring *ring = &adapter->rx_ring[i];
16187 +               if (igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring)))
16188 +                       adapter->alloc_rx_buff_failed++;
16189 +       }
16190 +
16191 +
16192 +       adapter->tx_queue_len = netdev->tx_queue_len;
16193 +}
16194 +
16195 +
16196 +/**
16197 + * igb_up - Open the interface and prepare it to handle traffic
16198 + * @adapter: board private structure
16199 + **/
16200 +int igb_up(struct igb_adapter *adapter)
16201 +{
16202 +       struct e1000_hw *hw = &adapter->hw;
16203 +       int i;
16204 +
16205 +       /* hardware has been reset, we need to reload some things */
16206 +       igb_configure(adapter);
16207 +
16208 +       clear_bit(__IGB_DOWN, &adapter->state);
16209 +
16210 +       for (i = 0; i < adapter->num_q_vectors; i++) {
16211 +               struct igb_q_vector *q_vector = adapter->q_vector[i];
16212 +               napi_enable(&q_vector->napi);
16213 +       }
16214 +       if (adapter->msix_entries)
16215 +               igb_configure_msix(adapter);
16216 +
16217 +       igb_configure_lli(adapter);
16218 +
16219 +       /* Clear any pending interrupts. */
16220 +       E1000_READ_REG(hw, E1000_ICR);
16221 +       igb_irq_enable(adapter);
16222 +
16223 +       /* notify VFs that reset has been completed */
16224 +       if (adapter->vfs_allocated_count) {
16225 +               u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
16226 +               reg_data |= E1000_CTRL_EXT_PFRSTD;
16227 +               E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
16228 +       }
16229 +
16230 +       /* start the watchdog. */
16231 +       hw->mac.get_link_status = 1;
16232 +       mod_timer(&adapter->watchdog_timer, jiffies + 1);
16233 +
16234 +       return 0;
16235 +}
16236 +
16237 +void igb_down(struct igb_adapter *adapter)
16238 +{
16239 +       struct net_device *netdev = adapter->netdev;
16240 +       struct e1000_hw *hw = &adapter->hw;
16241 +       u32 tctl, rctl;
16242 +       int i;
16243 +
16244 +       /* signal that we're down so the interrupt handler does not
16245 +        * reschedule our watchdog timer */
16246 +       set_bit(__IGB_DOWN, &adapter->state);
16247 +
16248 +       /* disable receives in the hardware */
16249 +       rctl = E1000_READ_REG(hw, E1000_RCTL);
16250 +       E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
16251 +       /* flush and sleep below */
16252 +
16253 +       netif_tx_stop_all_queues(netdev);
16254 +
16255 +       /* disable transmits in the hardware */
16256 +       tctl = E1000_READ_REG(hw, E1000_TCTL);
16257 +       tctl &= ~E1000_TCTL_EN;
16258 +       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
16259 +       /* flush both disables and wait for them to finish */
16260 +       E1000_WRITE_FLUSH(hw);
16261 +       msleep(10);
16262 +
16263 +       for (i = 0; i < adapter->num_q_vectors; i++) {
16264 +               struct igb_q_vector *q_vector = adapter->q_vector[i];
16265 +               napi_disable(&q_vector->napi);
16266 +       }
16267 +
16268 +       igb_irq_disable(adapter);
16269 +
16270 +       del_timer_sync(&adapter->watchdog_timer);
16271 +       del_timer_sync(&adapter->phy_info_timer);
16272 +
16273 +       netdev->tx_queue_len = adapter->tx_queue_len;
16274 +       netif_carrier_off(netdev);
16275 +
16276 +       /* record the stats before reset*/
16277 +       igb_update_stats(adapter);
16278 +
16279 +       adapter->link_speed = 0;
16280 +       adapter->link_duplex = 0;
16281 +#ifdef HAVE_PCI_ERS
16282 +       if (!pci_channel_offline(adapter->pdev))
16283 +               igb_reset(adapter);
16284 +#else
16285 +       igb_reset(adapter);
16286 +#endif
16287 +       igb_clean_all_tx_rings(adapter);
16288 +       igb_clean_all_rx_rings(adapter);
16289 +#ifdef IGB_DCA
16290 +
16291 +       /* since we reset the hardware DCA settings were cleared */
16292 +       igb_setup_dca(adapter);
16293 +#endif
16294 +}
16295 +
16296 +void igb_reinit_locked(struct igb_adapter *adapter)
16297 +{
16298 +       WARN_ON(in_interrupt());
16299 +       while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
16300 +               msleep(1);
16301 +       igb_down(adapter);
16302 +       igb_up(adapter);
16303 +       clear_bit(__IGB_RESETTING, &adapter->state);
16304 +}
16305 +
16306 +void igb_reset(struct igb_adapter *adapter)
16307 +{
16308 +       struct e1000_hw *hw = &adapter->hw;
16309 +       struct e1000_mac_info *mac = &hw->mac;
16310 +       struct e1000_fc_info *fc = &hw->fc;
16311 +       u32 pba = 0, tx_space, min_tx_space, min_rx_space;
16312 +       u16 hwm;
16313 +
16314 +       /* Repartition Pba for greater than 9k mtu
16315 +        * To take effect CTRL.RST is required.
16316 +        */
16317 +       switch (mac->type) {
16318 +       case e1000_82576:
16319 +               pba = E1000_READ_REG(hw, E1000_RXPBS);
16320 +               pba &= E1000_RXPBS_SIZE_MASK_82576;
16321 +               break;
16322 +       case e1000_82575:
16323 +       default:
16324 +               pba = E1000_PBA_34K;
16325 +               break;
16326 +       }
16327 +
16328 +       if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
16329 +           (mac->type < e1000_82576)) {
16330 +               /* adjust PBA for jumbo frames */
16331 +               E1000_WRITE_REG(hw, E1000_PBA, pba);
16332 +
16333 +               /* To maintain wire speed transmits, the Tx FIFO should be
16334 +                * large enough to accommodate two full transmit packets,
16335 +                * rounded up to the next 1KB and expressed in KB.  Likewise,
16336 +                * the Rx FIFO should be large enough to accommodate at least
16337 +                * one full receive packet and is similarly rounded up and
16338 +                * expressed in KB. */
16339 +               pba = E1000_READ_REG(hw, E1000_PBA);
16340 +               /* upper 16 bits has Tx packet buffer allocation size in KB */
16341 +               tx_space = pba >> 16;
16342 +               /* lower 16 bits has Rx packet buffer allocation size in KB */
16343 +               pba &= 0xffff;
16344 +               /* the tx fifo also stores 16 bytes of information about the tx
16345 +                * but don't include ethernet FCS because hardware appends it */
16346 +               min_tx_space = (adapter->max_frame_size +
16347 +                               sizeof(struct e1000_tx_desc) -
16348 +                               ETH_FCS_LEN) * 2;
16349 +               min_tx_space = ALIGN(min_tx_space, 1024);
16350 +               min_tx_space >>= 10;
16351 +               /* software strips receive CRC, so leave room for it */
16352 +               min_rx_space = adapter->max_frame_size;
16353 +               min_rx_space = ALIGN(min_rx_space, 1024);
16354 +               min_rx_space >>= 10;
16355 +
16356 +               /* If current Tx allocation is less than the min Tx FIFO size,
16357 +                * and the min Tx FIFO size is less than the current Rx FIFO
16358 +                * allocation, take space away from current Rx allocation */
16359 +               if (tx_space < min_tx_space &&
16360 +                   ((min_tx_space - tx_space) < pba)) {
16361 +                       pba = pba - (min_tx_space - tx_space);
16362 +
16363 +                       /* if short on rx space, rx wins and must trump tx
16364 +                        * adjustment */
16365 +                       if (pba < min_rx_space)
16366 +                               pba = min_rx_space;
16367 +               }
16368 +               E1000_WRITE_REG(hw, E1000_PBA, pba);
16369 +       }
16370 +
16371 +       /* flow control settings */
16372 +       /* The high water mark must be low enough to fit one full frame
16373 +        * (or the size used for early receive) above it in the Rx FIFO.
16374 +        * Set it to the lower of:
16375 +        * - 90% of the Rx FIFO size, or
16376 +        * - the full Rx FIFO size minus one full frame */
16377 +       hwm = min(((pba << 10) * 9 / 10),
16378 +                       ((pba << 10) - 2 * adapter->max_frame_size));
16379 +
16380 +       if (mac->type < e1000_82576) {
16381 +               fc->high_water = hwm & 0xFFF8;  /* 8-byte granularity */
16382 +               fc->low_water = fc->high_water - 8;
16383 +       } else {
16384 +               fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
16385 +               fc->low_water = fc->high_water - 16;
16386 +       }
16387 +       fc->pause_time = 0xFFFF;
16388 +       fc->send_xon = 1;
16389 +       fc->current_mode = fc->requested_mode;
16390 +
16391 +       /* disable receive for all VFs and wait one second */
16392 +       if (adapter->vfs_allocated_count) {
16393 +               int i;
16394 +               for (i = 0 ; i < adapter->vfs_allocated_count; i++)
16395 +                       adapter->vf_data[i].flags = 0;
16396 +
16397 +               /* ping all the active vfs to let them know we are going down */
16398 +               igb_ping_all_vfs(adapter);
16399 +
16400 +               /* disable transmits and receives */
16401 +               E1000_WRITE_REG(hw, E1000_VFRE, 0);
16402 +               E1000_WRITE_REG(hw, E1000_VFTE, 0);
16403 +       }
16404 +
16405 +       /* Allow time for pending master requests to run */
16406 +       e1000_reset_hw(hw);
16407 +       E1000_WRITE_REG(hw, E1000_WUC, 0);
16408 +
16409 +       if (e1000_init_hw(hw))
16410 +               DPRINTK(PROBE, ERR, "Hardware Error\n");
16411 +
16412 +       igb_update_mng_vlan(adapter);
16413 +
16414 +       /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
16415 +       E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
16416 +
16417 +       e1000_get_phy_info(hw);
16418 +}
16419 +
16420 +#ifdef HAVE_NET_DEVICE_OPS
16421 +static const struct net_device_ops igb_netdev_ops = {
16422 +       .ndo_open               = igb_open,
16423 +       .ndo_stop               = igb_close,
16424 +       .ndo_start_xmit         = igb_xmit_frame_adv,
16425 +       .ndo_get_stats          = igb_get_stats,
16426 +       .ndo_set_rx_mode        = igb_set_rx_mode,
16427 +       .ndo_set_multicast_list = igb_set_rx_mode,
16428 +       .ndo_set_mac_address    = igb_set_mac,
16429 +       .ndo_change_mtu         = igb_change_mtu,
16430 +       .ndo_do_ioctl           = igb_ioctl,
16431 +       .ndo_tx_timeout         = igb_tx_timeout,
16432 +       .ndo_validate_addr      = eth_validate_addr,
16433 +       .ndo_vlan_rx_register   = igb_vlan_rx_register,
16434 +       .ndo_vlan_rx_add_vid    = igb_vlan_rx_add_vid,
16435 +       .ndo_vlan_rx_kill_vid   = igb_vlan_rx_kill_vid,
16436 +#ifdef CONFIG_NET_POLL_CONTROLLER
16437 +       .ndo_poll_controller    = igb_netpoll,
16438 +#endif
16439 +};
16440 +#endif /* HAVE_NET_DEVICE_OPS */
16441 +
16442 +/**
16443 + * igb_probe - Device Initialization Routine
16444 + * @pdev: PCI device information struct
16445 + * @ent: entry in igb_pci_tbl
16446 + *
16447 + * Returns 0 on success, negative on failure
16448 + *
16449 + * igb_probe initializes an adapter identified by a pci_dev structure.
16450 + * The OS initialization, configuring of the adapter private structure,
16451 + * and a hardware reset occur.
16452 + **/
16453 +static int __devinit igb_probe(struct pci_dev *pdev,
16454 +                               const struct pci_device_id *ent)
16455 +{
16456 +       struct net_device *netdev;
16457 +       struct igb_adapter *adapter;
16458 +       struct e1000_hw *hw;
16459 +       int i, err, pci_using_dac;
16460 +       u16 eeprom_data = 0;
16461 +       static int cards_found;
16462 +       static int global_quad_port_a; /* global quad port a indication */
16463 +
16464 +       err = pci_enable_device_mem(pdev);
16465 +       if (err)
16466 +               return err;
16467 +
16468 +       pci_using_dac = 0;
16469 +       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
16470 +       if (!err) {
16471 +               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
16472 +               if (!err)
16473 +                       pci_using_dac = 1;
16474 +       } else {
16475 +               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16476 +               if (err) {
16477 +                       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
16478 +                       if (err) {
16479 +                               IGB_ERR("No usable DMA configuration, "
16480 +                                       "aborting\n");
16481 +                               goto err_dma;
16482 +                       }
16483 +               }
16484 +       }
16485 +
16486 +#ifndef HAVE_ASPM_QUIRKS
16487 +       /* 82575 requires that the pci-e link partner disable the L0s state */
16488 +       switch (pdev->device) {
16489 +       case E1000_DEV_ID_82575EB_COPPER:
16490 +       case E1000_DEV_ID_82575EB_FIBER_SERDES:
16491 +       case E1000_DEV_ID_82575GB_QUAD_COPPER:
16492 +               pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
16493 +       default:
16494 +               break;
16495 +       }
16496 +
16497 +#endif /* HAVE_ASPM_QUIRKS */
16498 +       err = pci_request_selected_regions(pdev,
16499 +                                          pci_select_bars(pdev,
16500 +                                                           IORESOURCE_MEM),
16501 +                                          igb_driver_name);
16502 +       if (err)
16503 +               goto err_pci_reg;
16504 +
16505 +       pci_enable_pcie_error_reporting(pdev);
16506 +
16507 +       pci_set_master(pdev);
16508 +
16509 +       err = -ENOMEM;
16510 +#ifdef HAVE_TX_MQ
16511 +       netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_ABS_MAX_TX_QUEUES);
16512 +#else
16513 +       netdev = alloc_etherdev(sizeof(struct igb_adapter));
16514 +#endif /* HAVE_TX_MQ */
16515 +       if (!netdev)
16516 +               goto err_alloc_etherdev;
16517 +
16518 +       SET_MODULE_OWNER(netdev);
16519 +       SET_NETDEV_DEV(netdev, &pdev->dev);
16520 +
16521 +       pci_set_drvdata(pdev, netdev);
16522 +       adapter = netdev_priv(netdev);
16523 +       adapter->netdev = netdev;
16524 +       adapter->pdev = pdev;
16525 +       hw = &adapter->hw;
16526 +       hw->back = adapter;
16527 +       adapter->msg_enable = (1 << debug) - 1;
16528 +
16529 +#ifdef HAVE_PCI_ERS
16530 +       err = pci_save_state(pdev);
16531 +       if (err)
16532 +               goto err_ioremap;
16533 +#endif
16534 +       err = -EIO;
16535 +       hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
16536 +                             pci_resource_len(pdev, 0));
16537 +       if (!hw->hw_addr)
16538 +               goto err_ioremap;
16539 +
16540 +#ifdef HAVE_NET_DEVICE_OPS
16541 +       netdev->netdev_ops = &igb_netdev_ops;
16542 +#else /* HAVE_NET_DEVICE_OPS */
16543 +       netdev->open = &igb_open;
16544 +       netdev->stop = &igb_close;
16545 +       netdev->get_stats = &igb_get_stats;
16546 +#ifdef HAVE_SET_RX_MODE
16547 +       netdev->set_rx_mode = &igb_set_rx_mode;
16548 +#endif
16549 +       netdev->set_multicast_list = &igb_set_rx_mode;
16550 +       netdev->set_mac_address = &igb_set_mac;
16551 +       netdev->change_mtu = &igb_change_mtu;
16552 +       netdev->do_ioctl = &igb_ioctl;
16553 +#ifdef HAVE_TX_TIMEOUT
16554 +       netdev->tx_timeout = &igb_tx_timeout;
16555 +#endif
16556 +       netdev->vlan_rx_register = igb_vlan_rx_register;
16557 +       netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
16558 +       netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
16559 +#ifdef CONFIG_NET_POLL_CONTROLLER
16560 +       netdev->poll_controller = igb_netpoll;
16561 +#endif
16562 +       netdev->hard_start_xmit = &igb_xmit_frame_adv;
16563 +#endif /* HAVE_NET_DEVICE_OPS */
16564 +       igb_set_ethtool_ops(netdev);
16565 +#ifdef HAVE_TX_TIMEOUT
16566 +       netdev->watchdog_timeo = 5 * HZ;
16567 +#endif
16568 +
16569 +       strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
16570 +
16571 +       adapter->bd_number = cards_found;
16572 +
16573 +       /* setup the private structure */
16574 +       err = igb_sw_init(adapter);
16575 +       if (err)
16576 +               goto err_sw_init;
16577 +
16578 +       e1000_get_bus_info(hw);
16579 +
16580 +       hw->phy.autoneg_wait_to_complete = FALSE;
16581 +       hw->mac.adaptive_ifs = FALSE;
16582 +
16583 +       /* Copper options */
16584 +       if (hw->phy.media_type == e1000_media_type_copper) {
16585 +               hw->phy.mdix = AUTO_ALL_MODES;
16586 +               hw->phy.disable_polarity_correction = FALSE;
16587 +               hw->phy.ms_type = e1000_ms_hw_default;
16588 +       }
16589 +
16590 +       if (e1000_check_reset_block(hw))
16591 +               DPRINTK(PROBE, INFO,
16592 +                       "PHY reset is blocked due to SOL/IDER session.\n");
16593 +
16594 +       netdev->features = NETIF_F_SG |
16595 +                          NETIF_F_IP_CSUM |
16596 +                          NETIF_F_HW_VLAN_TX |
16597 +                          NETIF_F_HW_VLAN_RX |
16598 +                          NETIF_F_HW_VLAN_FILTER;
16599 +
16600 +#ifdef NETIF_F_IPV6_CSUM
16601 +       netdev->features |= NETIF_F_IPV6_CSUM;
16602 +#endif
16603 +#ifdef NETIF_F_TSO
16604 +       netdev->features |= NETIF_F_TSO;
16605 +#ifdef NETIF_F_TSO6
16606 +       netdev->features |= NETIF_F_TSO6;
16607 +#endif
16608 +#endif /* NETIF_F_TSO */
16609 +
16610 +#ifdef IGB_LRO
16611 +       netdev->features |= NETIF_F_LRO;
16612 +#endif
16613 +#ifdef NETIF_F_GRO
16614 +       netdev->features |= NETIF_F_GRO;
16615 +#endif
16616 +
16617 +#ifdef HAVE_NETDEV_VLAN_FEATURES
16618 +       netdev->vlan_features |= NETIF_F_TSO;
16619 +       netdev->vlan_features |= NETIF_F_TSO6;
16620 +       netdev->vlan_features |= NETIF_F_IP_CSUM;
16621 +       netdev->vlan_features |= NETIF_F_IPV6_CSUM;
16622 +       netdev->vlan_features |= NETIF_F_SG;
16623 +
16624 +#endif
16625 +       if (pci_using_dac)
16626 +               netdev->features |= NETIF_F_HIGHDMA;
16627 +
16628 +       if (hw->mac.type >= e1000_82576)
16629 +               netdev->features |= NETIF_F_SCTP_CSUM;
16630 +
16631 +       adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
16632 +
16633 +       /* before reading the NVM, reset the controller to put the device in a
16634 +        * known good starting state */
16635 +       e1000_reset_hw(hw);
16636 +
16637 +       /* make sure the NVM is good */
16638 +       if (e1000_validate_nvm_checksum(hw) < 0) {
16639 +               DPRINTK(PROBE, ERR, "The NVM Checksum Is Not Valid\n");
16640 +               err = -EIO;
16641 +               goto err_eeprom;
16642 +       }
16643 +
16644 +       /* copy the MAC address out of the NVM */
16645 +       if (e1000_read_mac_addr(hw))
16646 +               DPRINTK(PROBE, ERR, "NVM Read Error\n");
16647 +       memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
16648 +#ifdef ETHTOOL_GPERMADDR
16649 +       memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
16650 +
16651 +       if (!is_valid_ether_addr(netdev->perm_addr)) {
16652 +#else
16653 +       if (!is_valid_ether_addr(netdev->dev_addr)) {
16654 +#endif
16655 +               DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
16656 +               err = -EIO;
16657 +               goto err_eeprom;
16658 +       }
16659 +
16660 +       init_timer(&adapter->watchdog_timer);
16661 +       adapter->watchdog_timer.function = &igb_watchdog;
16662 +       adapter->watchdog_timer.data = (unsigned long) adapter;
16663 +
16664 +       init_timer(&adapter->phy_info_timer);
16665 +       adapter->phy_info_timer.function = &igb_update_phy_info;
16666 +       adapter->phy_info_timer.data = (unsigned long) adapter;
16667 +
16668 +       INIT_WORK(&adapter->reset_task, igb_reset_task);
16669 +       INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
16670 +
16671 +       /* Initialize link properties that are user-changeable */
16672 +       adapter->fc_autoneg = true;
16673 +       hw->mac.autoneg = true;
16674 +       hw->phy.autoneg_advertised = 0x2f;
16675 +
16676 +       hw->fc.requested_mode = e1000_fc_default;
16677 +       hw->fc.current_mode = e1000_fc_default;
16678 +
16679 +       e1000_validate_mdi_setting(hw);
16680 +
16681 +       /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
16682 +        * enable the ACPI Magic Packet filter
16683 +        */
16684 +
16685 +       if (hw->bus.func == 0)
16686 +               e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
16687 +       else if (hw->bus.func == 1)
16688 +               e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
16689 +
16690 +       if (eeprom_data & IGB_EEPROM_APME)
16691 +               adapter->eeprom_wol |= E1000_WUFC_MAG;
16692 +
16693 +       /* now that we have the eeprom settings, apply the special cases where
16694 +        * the eeprom may be wrong or the board simply won't support wake on
16695 +        * lan on a particular port */
16696 +       switch (pdev->device) {
16697 +       case E1000_DEV_ID_82575GB_QUAD_COPPER:
16698 +               adapter->eeprom_wol = 0;
16699 +               break;
16700 +       case E1000_DEV_ID_82575EB_FIBER_SERDES:
16701 +       case E1000_DEV_ID_82576_FIBER:
16702 +       case E1000_DEV_ID_82576_SERDES:
16703 +               /* Wake events only supported on port A for dual fiber
16704 +                * regardless of eeprom setting */
16705 +               if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
16706 +                       adapter->eeprom_wol = 0;
16707 +               break;
16708 +       case E1000_DEV_ID_82576_QUAD_COPPER:
16709 +               /* if quad port adapter, disable WoL on all but port A */
16710 +               if (global_quad_port_a != 0)
16711 +                       adapter->eeprom_wol = 0;
16712 +               else
16713 +                       adapter->flags |= IGB_FLAG_QUAD_PORT_A;
16714 +               /* Reset for multiple quad port adapters */
16715 +               if (++global_quad_port_a == 4)
16716 +                       global_quad_port_a = 0;
16717 +               break;
16718 +       }
16719 +
16720 +       /* initialize the wol settings based on the eeprom settings */
16721 +       adapter->wol = adapter->eeprom_wol;
16722 +       device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
16723 +
16724 +       /* reset the hardware with the new settings */
16725 +       igb_reset(adapter);
16726 +
16727 +       /* let the f/w know that the h/w is now under the control of the
16728 +        * driver. */
16729 +       igb_get_hw_control(adapter);
16730 +
16731 +       /* tell the stack to leave us alone until igb_open() is called */
16732 +       netif_carrier_off(netdev);
16733 +       netif_tx_stop_all_queues(netdev);
16734 +
16735 +       strncpy(netdev->name, "eth%d", IFNAMSIZ);
16736 +       err = register_netdev(netdev);
16737 +       if (err)
16738 +               goto err_register;
16739 +
16740 +#ifdef IGB_DCA
16741 +       if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) {
16742 +               adapter->flags |= IGB_FLAG_DCA_ENABLED;
16743 +               DPRINTK(PROBE, INFO, "DCA enabled\n");
16744 +               igb_setup_dca(adapter);
16745 +       }
16746 +
16747 +#endif
16748 +#ifdef SIOCSHWTSTAMP
16749 +       switch (hw->mac.type) {
16750 +       case e1000_82576:
16751 +               /*
16752 +                * Initialize hardware timer: we keep it running just in case
16753 +                * that some program needs it later on.
16754 +                */
16755 +               memset(&adapter->cycles, 0, sizeof(adapter->cycles));
16756 +               adapter->cycles.read = igb_read_clock;
16757 +               adapter->cycles.mask = CLOCKSOURCE_MASK(64);
16758 +               adapter->cycles.mult = 1;
16759 +               /**
16760 +                * Scale the NIC clock cycle by a large factor so that
16761 +                * relatively small clock corrections can be added or
16762 +                * substracted at each clock tick. The drawbacks of a large
16763 +                * factor are a) that the clock register overflows more quickly
16764 +                * (not such a big deal) and b) that the increment per tick has
16765 +                * to fit into 24 bits.  As a result we need to use a shift of
16766 +                * 19 so we can fit a value of 16 into the TIMINCA register.
16767 +                */
16768 +               adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
16769 +               E1000_WRITE_REG(hw, E1000_TIMINCA,
16770 +                               (1 << E1000_TIMINCA_16NS_SHIFT) |
16771 +                               (16 << IGB_82576_TSYNC_SHIFT));
16772 +
16773 +               /* Set registers so that rollover occurs soon to test this. */
16774 +               E1000_WRITE_REG(hw, E1000_SYSTIML, 0x00000000);
16775 +               E1000_WRITE_REG(hw, E1000_SYSTIMH, 0xFF800000);
16776 +               E1000_WRITE_FLUSH(hw);
16777 +
16778 +               timecounter_init(&adapter->clock,
16779 +                                &adapter->cycles,
16780 +                                ktime_to_ns(ktime_get_real()));
16781 +               /*
16782 +                * Synchronize our NIC clock against system wall clock. NIC
16783 +                * time stamp reading requires ~3us per sample, each sample
16784 +                * was pretty stable even under load => only require 10
16785 +                * samples for each offset comparison.
16786 +                */
16787 +               memset(&adapter->compare, 0, sizeof(adapter->compare));
16788 +               adapter->compare.source = &adapter->clock;
16789 +               adapter->compare.target = ktime_get_real;
16790 +               adapter->compare.num_samples = 10;
16791 +               timecompare_update(&adapter->compare, 0);
16792 +               break;
16793 +       case e1000_82575:
16794 +               /* 82575 does not support timesync */
16795 +       default:
16796 +               break;
16797 +       }
16798 +
16799 +#endif /* SIOCSHWTSTAMP */
16800 +       DPRINTK(PROBE, INFO, "Intel(R) Gigabit Ethernet Network Connection\n");
16801 +       /* print bus type/speed/width info */
16802 +       DPRINTK(PROBE, INFO, "(PCIe:%s:%s) ",
16803 +             ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : "unknown"),
16804 +             ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
16805 +              (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
16806 +              (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
16807 +               "unknown"));
16808 +
16809 +       for (i = 0; i < 6; i++)
16810 +               printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
16811 +
16812 +       for (i = 0; i < adapter->vfs_allocated_count; i++)
16813 +               igb_vf_configuration(pdev, (i | 0x10000000));
16814 +
16815 +       DPRINTK(PROBE, INFO,
16816 +               "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
16817 +               adapter->msix_entries ? "MSI-X" :
16818 +               adapter->flags & IGB_FLAG_HAS_MSI ? "MSI" :
16819 +               "legacy",
16820 +               adapter->num_rx_queues, adapter->num_tx_queues);
16821 +
16822 +       cards_found++;
16823 +       return 0;
16824 +
16825 +err_register:
16826 +       igb_release_hw_control(adapter);
16827 +err_eeprom:
16828 +       if (!e1000_check_reset_block(hw))
16829 +               e1000_phy_hw_reset(hw);
16830 +
16831 +       if (hw->flash_address)
16832 +               iounmap(hw->flash_address);
16833 +err_sw_init:
16834 +       igb_clear_interrupt_scheme(adapter);
16835 +       iounmap(hw->hw_addr);
16836 +err_ioremap:
16837 +       free_netdev(netdev);
16838 +err_alloc_etherdev:
16839 +       pci_release_selected_regions(pdev,
16840 +                                    pci_select_bars(pdev, IORESOURCE_MEM));
16841 +err_pci_reg:
16842 +err_dma:
16843 +       pci_disable_device(pdev);
16844 +       return err;
16845 +}
16846 +
16847 +/**
16848 + * igb_remove - Device Removal Routine
16849 + * @pdev: PCI device information struct
16850 + *
16851 + * igb_remove is called by the PCI subsystem to alert the driver
16852 + * that it should release a PCI device.  The could be caused by a
16853 + * Hot-Plug event, or because the driver is going to be removed from
16854 + * memory.
16855 + **/
16856 +static void __devexit igb_remove(struct pci_dev *pdev)
16857 +{
16858 +       struct net_device *netdev = pci_get_drvdata(pdev);
16859 +       struct igb_adapter *adapter = netdev_priv(netdev);
16860 +       struct e1000_hw *hw = &adapter->hw;
16861 +
16862 +       /* flush_scheduled work may reschedule our watchdog task, so
16863 +        * explicitly disable watchdog tasks from being rescheduled  */
16864 +       set_bit(__IGB_DOWN, &adapter->state);
16865 +       del_timer_sync(&adapter->watchdog_timer);
16866 +       del_timer_sync(&adapter->phy_info_timer);
16867 +
16868 +       flush_scheduled_work();
16869 +
16870 +
16871 +#ifdef IGB_DCA
16872 +       if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
16873 +               DPRINTK(PROBE, INFO, "DCA disabled\n");
16874 +               dca_remove_requester(&pdev->dev);
16875 +               adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
16876 +               E1000_WRITE_REG(hw, E1000_DCA_CTRL, 1);
16877 +       }
16878 +#endif
16879 +
16880 +       /* Release control of h/w to f/w.  If f/w is AMT enabled, this
16881 +        * would have already happened in close and is redundant. */
16882 +       igb_release_hw_control(adapter);
16883 +
16884 +       unregister_netdev(netdev);
16885 +
16886 +       if (!e1000_check_reset_block(hw))
16887 +               e1000_phy_hw_reset(hw);
16888 +
16889 +       igb_clear_interrupt_scheme(adapter);
16890 +
16891 +#ifdef CONFIG_PCI_IOV
16892 +       if (adapter->vf_data) {
16893 +               /* disable iov and allow time for transactions to clear */
16894 +               pci_disable_sriov(pdev);
16895 +               msleep(500);
16896 +
16897 +               kfree(adapter->vf_data);
16898 +               adapter->vf_data = NULL;
16899 +               E1000_WRITE_REG(&adapter->hw, E1000_IOVCTL,
16900 +                               E1000_IOVCTL_REUSE_VFQ);
16901 +               msleep(100);
16902 +               dev_info(&adapter->pdev->dev, "IOV Disabled\n");
16903 +       }
16904 +#endif
16905 +
16906 +       iounmap(hw->hw_addr);
16907 +       if (hw->flash_address)
16908 +               iounmap(adapter->hw.flash_address);
16909 +       pci_release_selected_regions(pdev,
16910 +                                    pci_select_bars(pdev, IORESOURCE_MEM));
16911 +
16912 +       free_netdev(netdev);
16913 +
16914 +       pci_disable_pcie_error_reporting(pdev);
16915 +
16916 +       pci_disable_device(pdev);
16917 +}
16918 +
16919 +/**
16920 + * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
16921 + * @adapter: board private structure to initialize
16922 + *
16923 + * This function initializes the vf specific data storage and then attempts to
16924 + * allocate the VFs.  The reason for ordering it this way is because it is much
16925 + * more expensive time wise to disable SR-IOV than it is to allocate and free
16926 + * the memory for the VFs.
16927 + **/
16928 +static void __devinit igb_probe_vfs(struct igb_adapter *adapter)
16929 +{
16930 +#ifdef CONFIG_PCI_IOV
16931 +       struct pci_dev *pdev = adapter->pdev;
16932 +
16933 +       if (adapter->vfs_allocated_count) {
16934 +               adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
16935 +                                          sizeof(struct vf_data_storage),
16936 +                                          GFP_KERNEL);
16937 +               /* if allocation failed then we do not support SR-IOV */
16938 +               if (!adapter->vf_data) {
16939 +                       adapter->vfs_allocated_count = 0;
16940 +                       dev_err(&pdev->dev, "Unable to allocate memory for VF "
16941 +                               "Data Storage\n");
16942 +               }
16943 +       }
16944 +
16945 +       if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
16946 +               kfree(adapter->vf_data);
16947 +               adapter->vf_data = NULL;
16948 +#endif /* CONFIG_PCI_IOV */
16949 +               adapter->vfs_allocated_count = 0;
16950 +#ifdef CONFIG_PCI_IOV
16951 +       } else {
16952 +               dev_info(&pdev->dev, "IOV1 VFs enabled := %d\n",
16953 +                        adapter->vfs_allocated_count);
16954 +       }
16955 +
16956 +#endif /* CONFIG_PCI_IOV */
16957 +}
16958 +/**
16959 + * igb_sw_init - Initialize general software structures (struct igb_adapter)
16960 + * @adapter: board private structure to initialize
16961 + *
16962 + * igb_sw_init initializes the Adapter private data structure.
16963 + * Fields are initialized based on PCI device information and
16964 + * OS network device settings (MTU size).
16965 + **/
16966 +static int __devinit igb_sw_init(struct igb_adapter *adapter)
16967 +{
16968 +       struct e1000_hw *hw = &adapter->hw;
16969 +       struct net_device *netdev = adapter->netdev;
16970 +       struct pci_dev *pdev = adapter->pdev;
16971 +
16972 +       /* PCI config space info */
16973 +
16974 +       hw->vendor_id = pdev->vendor;
16975 +       hw->device_id = pdev->device;
16976 +       hw->subsystem_vendor_id = pdev->subsystem_vendor;
16977 +       hw->subsystem_device_id = pdev->subsystem_device;
16978 +
16979 +       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
16980 +
16981 +       pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
16982 +
16983 +       adapter->tx_ring_count = IGB_DEFAULT_TXD;
16984 +       adapter->rx_ring_count = IGB_DEFAULT_RXD;
16985 +       adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
16986 +       adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
16987 +
16988 +       /* Initialize the hardware-specific values */
16989 +       if (e1000_setup_init_funcs(hw, TRUE)) {
16990 +               DPRINTK(PROBE, ERR, "Hardware Initialization Failure\n");
16991 +               return -EIO;
16992 +       }
16993 +
16994 +       igb_check_options(adapter);
16995 +
16996 +       if (igb_init_interrupt_scheme(adapter)) {
16997 +               DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
16998 +               return -ENOMEM;
16999 +       }
17000 +
17001 +       igb_probe_vfs(adapter);
17002 +
17003 +       /* Explicitly disable IRQ since the NIC can be in any state. */
17004 +       igb_irq_disable(adapter);
17005 +
17006 +       set_bit(__IGB_DOWN, &adapter->state);
17007 +       return 0;
17008 +}
17009 +
17010 +/**
17011 + * igb_open - Called when a network interface is made active
17012 + * @netdev: network interface device structure
17013 + *
17014 + * Returns 0 on success, negative value on failure
17015 + *
17016 + * The open entry point is called when a network interface is made
17017 + * active by the system (IFF_UP).  At this point all resources needed
17018 + * for transmit and receive operations are allocated, the interrupt
17019 + * handler is registered with the OS, the watchdog timer is started,
17020 + * and the stack is notified that the interface is ready.
17021 + **/
17022 +static int igb_open(struct net_device *netdev)
17023 +{
17024 +       struct igb_adapter *adapter = netdev_priv(netdev);
17025 +       struct e1000_hw *hw = &adapter->hw;
17026 +       int err;
17027 +       int i;
17028 +
17029 +       /* disallow open during test */
17030 +       if (test_bit(__IGB_TESTING, &adapter->state))
17031 +               return -EBUSY;
17032 +
17033 +       /* allocate transmit descriptors */
17034 +       err = igb_setup_all_tx_resources(adapter);
17035 +       if (err)
17036 +               goto err_setup_tx;
17037 +
17038 +       /* allocate receive descriptors */
17039 +       err = igb_setup_all_rx_resources(adapter);
17040 +       if (err)
17041 +               goto err_setup_rx;
17042 +
17043 +       /* e1000_power_up_phy(adapter); */
17044 +
17045 +       /* before we allocate an interrupt, we must be ready to handle it.
17046 +        * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
17047 +        * as soon as we call pci_request_irq, so we have to setup our
17048 +        * clean_rx handler before we do so.  */
17049 +       igb_configure(adapter);
17050 +
17051 +       err = igb_request_irq(adapter);
17052 +       if (err)
17053 +               goto err_req_irq;
17054 +
17055 +       /* From here on the code is the same as igb_up() */
17056 +       clear_bit(__IGB_DOWN, &adapter->state);
17057 +
17058 +       for (i = 0; i < adapter->num_q_vectors; i++) {
17059 +               struct igb_q_vector *q_vector = adapter->q_vector[i];
17060 +               napi_enable(&q_vector->napi);
17061 +       }
17062 +       igb_configure_lli(adapter);
17063 +
17064 +       /* Clear any pending interrupts. */
17065 +       E1000_READ_REG(hw, E1000_ICR);
17066 +
17067 +       igb_irq_enable(adapter);
17068 +
17069 +       /* notify VFs that reset has been completed */
17070 +       if (adapter->vfs_allocated_count) {
17071 +               u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
17072 +               reg_data |= E1000_CTRL_EXT_PFRSTD;
17073 +               E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
17074 +       }
17075 +
17076 +       netif_tx_start_all_queues(netdev);
17077 +
17078 +       /* start the watchdog. */
17079 +       hw->mac.get_link_status = 1;
17080 +       mod_timer(&adapter->watchdog_timer, jiffies + 1);
17081 +
17082 +       return E1000_SUCCESS;
17083 +
17084 +err_req_irq:
17085 +       igb_release_hw_control(adapter);
17086 +       /* e1000_power_down_phy(adapter); */
17087 +       igb_free_all_rx_resources(adapter);
17088 +err_setup_rx:
17089 +       igb_free_all_tx_resources(adapter);
17090 +err_setup_tx:
17091 +       igb_reset(adapter);
17092 +
17093 +       return err;
17094 +}
17095 +
17096 +/**
17097 + * igb_close - Disables a network interface
17098 + * @netdev: network interface device structure
17099 + *
17100 + * Returns 0, this is not allowed to fail
17101 + *
17102 + * The close entry point is called when an interface is de-activated
17103 + * by the OS.  The hardware is still under the driver's control, but
17104 + * needs to be disabled.  A global MAC reset is issued to stop the
17105 + * hardware, and all transmit and receive resources are freed.
17106 + **/
17107 +static int igb_close(struct net_device *netdev)
17108 +{
17109 +       struct igb_adapter *adapter = netdev_priv(netdev);
17110 +
17111 +       WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
17112 +       igb_down(adapter);
17113 +
17114 +       igb_free_irq(adapter);
17115 +
17116 +       igb_free_all_tx_resources(adapter);
17117 +       igb_free_all_rx_resources(adapter);
17118 +
17119 +       return 0;
17120 +}
17121 +
17122 +/**
17123 + * igb_setup_tx_resources - allocate Tx resources (Descriptors)
17124 + * @tx_ring: tx descriptor ring (for a specific queue) to setup
17125 + *
17126 + * Return 0 on success, negative on failure
17127 + **/
17128 +int igb_setup_tx_resources(struct igb_ring *tx_ring)
17129 +{
17130 +       struct pci_dev *pdev = tx_ring->pdev;
17131 +       int size;
17132 +
17133 +       size = sizeof(struct igb_buffer) * tx_ring->count;
17134 +       tx_ring->buffer_info = vmalloc(size);
17135 +       if (!tx_ring->buffer_info)
17136 +               goto err;
17137 +       memset(tx_ring->buffer_info, 0, size);
17138 +
17139 +       /* round up to nearest 4K */
17140 +       tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
17141 +       tx_ring->size = ALIGN(tx_ring->size, 4096);
17142 +
17143 +       tx_ring->desc = pci_alloc_consistent(pdev,
17144 +                                            tx_ring->size,
17145 +                                            &tx_ring->dma);
17146 +
17147 +       if (!tx_ring->desc)
17148 +               goto err;
17149 +
17150 +       tx_ring->next_to_use = 0;
17151 +       tx_ring->next_to_clean = 0;
17152 +       return 0;
17153 +
17154 +err:
17155 +       vfree(tx_ring->buffer_info);
17156 +       dev_err(&pdev->dev, "Unable to allocate memory for the "
17157 +               "transmit descriptor ring\n");
17158 +       return -ENOMEM;
17159 +}
17160 +
17161 +/**
17162 + * igb_setup_all_tx_resources - wrapper to allocate Tx resources
17163 + *                               (Descriptors) for all queues
17164 + * @adapter: board private structure
17165 + *
17166 + * Return 0 on success, negative on failure
17167 + **/
17168 +static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
17169 +{
17170 +       int i, err = 0;
17171 +
17172 +       for (i = 0; i < adapter->num_tx_queues; i++) {
17173 +               err = igb_setup_tx_resources(&adapter->tx_ring[i]);
17174 +               if (err) {
17175 +                       DPRINTK(PROBE, ERR,
17176 +                               "Allocation for Tx Queue %u failed\n", i);
17177 +                       for (i--; i >= 0; i--)
17178 +                               igb_free_tx_resources(&adapter->tx_ring[i]);
17179 +                       break;
17180 +               }
17181 +       }
17182 +
17183 +#ifdef HAVE_TX_MQ
17184 +       for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
17185 +#ifdef CONFIG_NETDEVICES_MULTIQUEUE
17186 +               int r_idx = i % adapter->netdev->egress_subqueue_count;
17187 +#else
17188 +               int r_idx = i % adapter->netdev->real_num_tx_queues;
17189 +#endif
17190 +               adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
17191 +       }
17192 +#endif
17193 +       return err;
17194 +}
17195 +
17196 +/**
17197 + * igb_setup_tctl - configure the transmit control registers
17198 + * @adapter: Board private structure
17199 + **/
17200 +void igb_setup_tctl(struct igb_adapter *adapter)
17201 +{
17202 +       struct e1000_hw *hw = &adapter->hw;
17203 +       u32 tctl;
17204 +
17205 +       /* disable queue 0 which is enabled by default on 82575 and 82576 */
17206 +       E1000_WRITE_REG(hw, E1000_TXDCTL(0), 0);
17207 +
17208 +       /* Program the Transmit Control Register */
17209 +       tctl = E1000_READ_REG(hw, E1000_TCTL);
17210 +       tctl &= ~E1000_TCTL_CT;
17211 +       tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
17212 +               (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
17213 +
17214 +       e1000_config_collision_dist(hw);
17215 +
17216 +       /* Enable transmits */
17217 +       tctl |= E1000_TCTL_EN;
17218 +
17219 +       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
17220 +}
17221 +
17222 +/**
17223 + * igb_configure_tx_ring - Configure transmit ring after Reset
17224 + * @adapter: board private structure
17225 + * @ring: tx ring to configure
17226 + *
17227 + * Configure a transmit ring after a reset.
17228 + **/
17229 +void igb_configure_tx_ring(struct igb_adapter *adapter,
17230 +                           struct igb_ring *ring)
17231 +{
17232 +       struct e1000_hw *hw = &adapter->hw;
17233 +       u32 txdctl;
17234 +       u64 tdba = ring->dma;
17235 +       int reg_idx = ring->reg_idx;
17236 +
17237 +       /* disable the queue */
17238 +       txdctl = E1000_READ_REG(hw, E1000_TXDCTL(reg_idx));
17239 +       E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx),
17240 +                       txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
17241 +       E1000_WRITE_FLUSH(hw);
17242 +       mdelay(10);
17243 +
17244 +       E1000_WRITE_REG(hw, E1000_TDLEN(reg_idx),
17245 +                       ring->count * sizeof(struct e1000_tx_desc));
17246 +       E1000_WRITE_REG(hw, E1000_TDBAL(reg_idx),
17247 +                       tdba & 0x00000000ffffffffULL);
17248 +       E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32);
17249 +
17250 +       ring->head = hw->hw_addr + E1000_TDH(reg_idx);
17251 +       ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
17252 +       writel(0, ring->head);
17253 +       writel(0, ring->tail);
17254 +
17255 +       txdctl |= IGB_TX_PTHRESH;
17256 +       txdctl |= IGB_TX_HTHRESH << 8;
17257 +       txdctl |= IGB_TX_WTHRESH << 16;
17258 +
17259 +       txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
17260 +       E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl);
17261 +}
17262 +
17263 +/**
17264 + * igb_configure_tx - Configure transmit Unit after Reset
17265 + * @adapter: board private structure
17266 + *
17267 + * Configure the Tx unit of the MAC after a reset.
17268 + **/
17269 +static void igb_configure_tx(struct igb_adapter *adapter)
17270 +{
17271 +       int i;
17272 +
17273 +       for (i = 0; i < adapter->num_tx_queues; i++)
17274 +               igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
17275 +
17276 +}
17277 +
17278 +/**
17279 + * igb_setup_rx_resources - allocate Rx resources (Descriptors)
17280 + * @rx_ring:    rx descriptor ring (for a specific queue) to setup
17281 + *
17282 + * Returns 0 on success, negative on failure
17283 + **/
17284 +int igb_setup_rx_resources(struct igb_ring *rx_ring)
17285 +{
17286 +       struct pci_dev *pdev = rx_ring->pdev;
17287 +       int size, desc_len;
17288 +
17289 +#ifdef IGB_LRO
17290 +       size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
17291 +       rx_ring->lro_mgr.lro_arr = vmalloc(size);
17292 +       if (!rx_ring->lro_mgr.lro_arr)
17293 +               goto err;
17294 +       memset(rx_ring->lro_mgr.lro_arr, 0, size);
17295 +#endif /* IGB_LRO */
17296 +
17297 +       size = sizeof(struct igb_buffer) * rx_ring->count;
17298 +       rx_ring->buffer_info = vmalloc(size);
17299 +       if (!rx_ring->buffer_info)
17300 +               goto err;
17301 +       memset(rx_ring->buffer_info, 0, size);
17302 +
17303 +       desc_len = sizeof(union e1000_adv_rx_desc);
17304 +
17305 +       /* Round up to nearest 4K */
17306 +       rx_ring->size = rx_ring->count * desc_len;
17307 +       rx_ring->size = ALIGN(rx_ring->size, 4096);
17308 +
17309 +       rx_ring->desc = pci_alloc_consistent(pdev,
17310 +                                            rx_ring->size,
17311 +                                            &rx_ring->dma);
17312 +
17313 +       if (!rx_ring->desc)
17314 +               goto err;
17315 +
17316 +       rx_ring->next_to_clean = 0;
17317 +       rx_ring->next_to_use = 0;
17318 +
17319 +
17320 +       return 0;
17321 +
17322 +err:
17323 +#ifdef IGB_LRO
17324 +       vfree(rx_ring->lro_mgr.lro_arr);
17325 +       rx_ring->lro_mgr.lro_arr = NULL;
17326 +#endif
17327 +       vfree(rx_ring->buffer_info);
17328 +       rx_ring->buffer_info = NULL;
17329 +       dev_err(&pdev->dev, "Unable to allocate memory for the "
17330 +               "receive descriptor ring\n");
17331 +       return -ENOMEM;
17332 +}
17333 +
17334 +/**
17335 + * igb_setup_all_rx_resources - wrapper to allocate Rx resources
17336 + *                               (Descriptors) for all queues
17337 + * @adapter: board private structure
17338 + *
17339 + * Return 0 on success, negative on failure
17340 + **/
17341 +static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
17342 +{
17343 +       int i, err = 0;
17344 +
17345 +       for (i = 0; i < adapter->num_rx_queues; i++) {
17346 +               err = igb_setup_rx_resources(&adapter->rx_ring[i]);
17347 +               if (err) {
17348 +                       DPRINTK(PROBE, ERR,
17349 +                               "Allocation for Rx Queue %u failed\n", i);
17350 +                       for (i--; i >= 0; i--)
17351 +                               igb_free_rx_resources(&adapter->rx_ring[i]);
17352 +                       break;
17353 +               }
17354 +       }
17355 +
17356 +       return err;
17357 +}
17358 +
17359 +/**
17360 + * igb_setup_mrqc - configure the multiple receive queue control registers
17361 + * @adapter: Board private structure
17362 + **/
17363 +static void igb_setup_mrqc(struct igb_adapter *adapter)
17364 +{
17365 +       struct e1000_hw *hw = &adapter->hw;
17366 +       u32 mrqc, rxcsum;
17367 +       u32 j, num_rx_queues, shift = 0, shift2 = 0;
17368 +       union e1000_reta {
17369 +               u32 dword;
17370 +               u8  bytes[4];
17371 +       } reta;
17372 +       static const u8 rsshash[40] = {
17373 +               0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
17374 +               0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
17375 +               0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
17376 +               0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
17377 +
17378 +       /* Fill out hash function seeds */
17379 +       for (j = 0; j < 10; j++) {
17380 +               u32 rsskey = rsshash[(j * 4)];
17381 +               rsskey |= rsshash[(j * 4) + 1] << 8;
17382 +               rsskey |= rsshash[(j * 4) + 2] << 16;
17383 +               rsskey |= rsshash[(j * 4) + 3] << 24;
17384 +               E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), j, rsskey);
17385 +       }
17386 +
17387 +       num_rx_queues = adapter->RSS_queues;
17388 +
17389 +       if (adapter->vfs_allocated_count || adapter->VMDQ_queues) {
17390 +               /* 82575 and 82576 supports 2 RSS queues for VMDq */
17391 +               switch (hw->mac.type) {
17392 +               case e1000_82576:
17393 +                       shift = 3;
17394 +                       num_rx_queues = 2;
17395 +                       break;
17396 +               case e1000_82575:
17397 +                       shift = 2;
17398 +                       shift2 = 6;
17399 +               default:
17400 +                       break;
17401 +               }
17402 +       } else {
17403 +               if (hw->mac.type == e1000_82575)
17404 +                       shift = 6;
17405 +       }
17406 +
17407 +       for (j = 0; j < (32 * 4); j++) {
17408 +               reta.bytes[j & 3] = (j % num_rx_queues) << shift;
17409 +               if (shift2)
17410 +                       reta.bytes[j & 3] |= num_rx_queues << shift2;
17411 +               if ((j & 3) == 3)
17412 +                       E1000_WRITE_REG(hw, E1000_RETA(j >> 2), reta.dword);
17413 +       }
17414 +
17415 +       /*
17416 +        * Disable raw packet checksumming so that RSS hash is placed in
17417 +        * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
17418 +        * offloads as they are enabled by default
17419 +        */
17420 +       rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
17421 +       rxcsum |= E1000_RXCSUM_PCSD;
17422 +
17423 +       if (adapter->hw.mac.type >= e1000_82576)
17424 +               /* Enable Receive Checksum Offload for SCTP */
17425 +               rxcsum |= E1000_RXCSUM_CRCOFL;
17426 +
17427 +       /* Don't need to set TUOFL or IPOFL, they default to 1 */
17428 +       E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
17429 +
17430 +       /* If VMDq is enabled then we set the appropriate mode for that, else
17431 +        * we default to RSS so that an RSS hash is calculated per packet even
17432 +        * if we are only using one queue */
17433 +       if (adapter->vfs_allocated_count || adapter->VMDQ_queues) {
17434 +               if (hw->mac.type > e1000_82575) {
17435 +                       /* Set the default pool for the PF's first queue */
17436 +                       u32 vtctl = E1000_READ_REG(hw, E1000_VT_CTL);
17437 +                       vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
17438 +                                  E1000_VT_CTL_DISABLE_DEF_POOL);
17439 +                       vtctl |= adapter->vfs_allocated_count <<
17440 +                               E1000_VT_CTL_DEFAULT_POOL_SHIFT;
17441 +                       E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl);
17442 +               } else if (adapter->RSS_queues > 1) {
17443 +                       /* set default queue for pool 1 to queue 2 */
17444 +                       E1000_WRITE_REG(hw, E1000_VT_CTL,
17445 +                                       adapter->RSS_queues << 7);
17446 +               }
17447 +               if (adapter->RSS_queues > 1)
17448 +                       mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
17449 +               else
17450 +                       mrqc = E1000_MRQC_ENABLE_VMDQ;
17451 +       } else {
17452 +               mrqc = E1000_MRQC_ENABLE_RSS_4Q;
17453 +       }
17454 +       igb_vmm_control(adapter);
17455 +
17456 +       mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
17457 +                E1000_MRQC_RSS_FIELD_IPV4_TCP);
17458 +       mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
17459 +                E1000_MRQC_RSS_FIELD_IPV6_TCP);
17460 +       mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
17461 +                E1000_MRQC_RSS_FIELD_IPV6_UDP);
17462 +       mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
17463 +                E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
17464 +
17465 +       E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
17466 +}
17467 +
17468 +/**
17469 + * igb_setup_rctl - configure the receive control registers
17470 + * @adapter: Board private structure
17471 + **/
17472 +void igb_setup_rctl(struct igb_adapter *adapter)
17473 +{
17474 +       struct e1000_hw *hw = &adapter->hw;
17475 +       u32 rctl;
17476 +
17477 +       rctl = E1000_READ_REG(hw, E1000_RCTL);
17478 +
17479 +       rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
17480 +       rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
17481 +
17482 +       rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
17483 +               (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
17484 +
17485 +       /*
17486 +        * enable stripping of CRC. It's unlikely this will break BMC
17487 +        * redirection as it did with e1000. Newer features require
17488 +        * that the HW strips the CRC.
17489 +        */
17490 +       rctl |= E1000_RCTL_SECRC;
17491 +
17492 +
17493 +       /* disable store bad packets and clear size bits. */
17494 +       rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
17495 +
17496 +       /* enable LPE to prevent packets larger than max_frame_size */
17497 +       rctl |= E1000_RCTL_LPE;
17498 +
17499 +       /* disable rx queue 0 which is enabled by default on 82575 and 82576 */
17500 +       E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0);
17501 +
17502 +       /* Attention!!!  For SR-IOV PF driver operations you must enable
17503 +        * queue drop for all VF and PF queues to prevent head of line blocking
17504 +        * if an un-trusted VF does not provide descriptors to hardware.
17505 +        */
17506 +       if (adapter->vfs_allocated_count) {
17507 +               /* set all queue drop enable bits */
17508 +               E1000_WRITE_REG(hw, E1000_QDE, 0xFF);
17509 +
17510 +       }
17511 +
17512 +       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
17513 +}
17514 +
17515 +static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, int vfn)
17516 +{
17517 +       struct e1000_hw *hw = &adapter->hw;
17518 +       u32 vmolr;
17519 +
17520 +       /* if it isn't the PF check to see if VFs are enabled and
17521 +        * increase the size to support vlan tags */
17522 +       if (vfn < adapter->vfs_allocated_count &&
17523 +           adapter->vf_data[vfn].vlans_enabled)
17524 +               size += VLAN_TAG_SIZE;
17525 +
17526 +       vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
17527 +       vmolr &= ~E1000_VMOLR_RLPML_MASK;
17528 +       vmolr |= size | E1000_VMOLR_LPE;
17529 +       E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
17530 +
17531 +       return 0;
17532 +}
17533 +
17534 +
17535 +/**
17536 + * igb_set_rlpml - set receive large packet maximum length
17537 + * @adapter: board private structure
17538 + *
17539 + * Configure the maximum size of packets that will be received
17540 + */
17541 +static void igb_set_rlpml(struct igb_adapter *adapter)
17542 +{
17543 +       int max_frame_size = adapter->max_frame_size;
17544 +       struct e1000_hw *hw = &adapter->hw;
17545 +       u16 pf_id = adapter->vfs_allocated_count;
17546 +
17547 +       if (adapter->vlgrp)
17548 +               max_frame_size += VLAN_TAG_SIZE;
17549 +       if (adapter->VMDQ_queues) {
17550 +               int i;
17551 +               for (i = 0; i < adapter->VMDQ_queues; i++)
17552 +                       igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i);
17553 +               max_frame_size = MAX_JUMBO_FRAME_SIZE;
17554 +       }
17555 +       E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size);
17556 +
17557 +}
17558 +
17559 +static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
17560 +{
17561 +       struct e1000_hw *hw = &adapter->hw;
17562 +       u32 vmolr;
17563 +
17564 +       /*
17565 +        * This register exists only on 82576 and newer so if we are older then
17566 +        * we should exit and do nothing
17567 +        */
17568 +       if (hw->mac.type < e1000_82576)
17569 +               return;
17570 +
17571 +       vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
17572 +       vmolr |= E1000_VMOLR_AUPE |        /* Accept untagged packets */
17573 +                E1000_VMOLR_STRVLAN;      /* Strip vlan tags */
17574 +
17575 +       /* clear all bits that might not be set */
17576 +       vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
17577 +
17578 +       if (adapter->RSS_queues > 1 && vfn == adapter->vfs_allocated_count)
17579 +               vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
17580 +       /*
17581 +        * for VMDq only allow the VFs and pool 0 to accept broadcast and
17582 +        * multicast packets
17583 +        */
17584 +       if (vfn <= adapter->vfs_allocated_count)
17585 +               vmolr |= E1000_VMOLR_BAM;          /* Accept broadcast */
17586 +
17587 +       E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
17588 +}
17589 +
17590 +/**
17591 + * igb_configure_rx_ring - Configure a receive ring after Reset
17592 + * @adapter: board private structure
17593 + * @ring: receive ring to be configured
17594 + *
17595 + * Configure the Rx unit of the MAC after a reset.
17596 + **/
17597 +void igb_configure_rx_ring(struct igb_adapter *adapter,
17598 +                           struct igb_ring *ring)
17599 +{
17600 +       struct e1000_hw *hw = &adapter->hw;
17601 +       u64 rdba = ring->dma;
17602 +       int reg_idx = ring->reg_idx;
17603 +       u32 srrctl, rxdctl;
17604 +
17605 +       /* disable the queue */
17606 +       rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(reg_idx));
17607 +       E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx),
17608 +                       rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
17609 +
17610 +       /* Set DMA base address registers */
17611 +       E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx),
17612 +                       rdba & 0x00000000ffffffffULL);
17613 +       E1000_WRITE_REG(hw, E1000_RDBAH(reg_idx), rdba >> 32);
17614 +       E1000_WRITE_REG(hw, E1000_RDLEN(reg_idx),
17615 +                      ring->count * sizeof(union e1000_adv_rx_desc));
17616 +
17617 +       /* initialize head and tail */
17618 +       ring->head = hw->hw_addr + E1000_RDH(reg_idx);
17619 +       ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
17620 +       writel(0, ring->head);
17621 +       writel(0, ring->tail);
17622 +
17623 +       /* set descriptor configuration */
17624 +       srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
17625 +                E1000_SRRCTL_BSIZEPKT_SHIFT;
17626 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
17627 +       srrctl |= ALIGN(ring->rx_ps_hdr_size, 64) <<
17628 +                 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
17629 +       if (ring->rx_ps_hdr_size)
17630 +               srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
17631 +       else
17632 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
17633 +               srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
17634 +
17635 +       E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl);
17636 +
17637 +       /* set filtering for VMDQ pools */
17638 +       igb_set_vmolr(adapter, reg_idx & 0x7);
17639 +
17640 +       /* enable receive descriptor fetching */
17641 +       rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(reg_idx));
17642 +       rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
17643 +       rxdctl &= 0xFFF00000;
17644 +       rxdctl |= IGB_RX_PTHRESH;
17645 +       rxdctl |= IGB_RX_HTHRESH << 8;
17646 +       rxdctl |= IGB_RX_WTHRESH << 16;
17647 +       E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl);
17648 +}
17649 +
17650 +static inline void igb_set_vlan_stripping(struct igb_adapter *adapter)
17651 +{
17652 +       struct e1000_hw *hw = &adapter->hw;
17653 +       u32 reg;
17654 +
17655 +       /* enable replication vlan tag stripping */
17656 +       reg = E1000_READ_REG(hw, E1000_RPLOLR);
17657 +       reg |= E1000_RPLOLR_STRVLAN;
17658 +       E1000_WRITE_REG(hw, E1000_RPLOLR, reg);
17659 +
17660 +       /* notify HW that the MAC is adding vlan tags */
17661 +       reg = E1000_READ_REG(hw, E1000_DTXCTL);
17662 +       reg |= E1000_DTXCTL_VLAN_ADDED;
17663 +       E1000_WRITE_REG(hw, E1000_DTXCTL, reg); 
17664 +}
17665 +
17666 +/**
17667 + * igb_configure_rx - Configure receive Unit after Reset
17668 + * @adapter: board private structure
17669 + *
17670 + * Configure the Rx unit of the MAC after a reset.
17671 + **/
17672 +static void igb_configure_rx(struct igb_adapter *adapter)
17673 +{
17674 +       int i;
17675 +
17676 +       /* enable vlan tag stripping for replicated packets */
17677 +       igb_set_vlan_stripping(adapter);
17678 +
17679 +       /* set UTA to appropriate mode */
17680 +       igb_set_uta(adapter);
17681 +
17682 +       /* set the correct pool for the PF default MAC address in entry 0 */
17683 +       igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
17684 +                        adapter->vfs_allocated_count);
17685 +
17686 +       /* Setup the HW Rx Head and Tail Descriptor Pointers and
17687 +        * the Base and Length of the Rx Descriptor Ring */
17688 +        for (i = 0; i < adapter->num_rx_queues; i++)
17689 +               igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
17690 +}
17691 +
17692 +/**
17693 + * igb_free_tx_resources - Free Tx Resources per Queue
17694 + * @tx_ring: Tx descriptor ring for a specific queue
17695 + *
17696 + * Free all transmit software resources
17697 + **/
17698 +void igb_free_tx_resources(struct igb_ring *tx_ring)
17699 +{
17700 +       igb_clean_tx_ring(tx_ring);
17701 +
17702 +       vfree(tx_ring->buffer_info);
17703 +       tx_ring->buffer_info = NULL;
17704 +
17705 +       /* if not set, then don't free */
17706 +       if (!tx_ring->desc)
17707 +               return;
17708 +
17709 +       pci_free_consistent(tx_ring->pdev, tx_ring->size,
17710 +                           tx_ring->desc, tx_ring->dma);
17711 +
17712 +       tx_ring->desc = NULL;
17713 +}
17714 +
17715 +/**
17716 + * igb_free_all_tx_resources - Free Tx Resources for All Queues
17717 + * @adapter: board private structure
17718 + *
17719 + * Free all transmit software resources
17720 + **/
17721 +static void igb_free_all_tx_resources(struct igb_adapter *adapter)
17722 +{
17723 +       int i;
17724 +
17725 +       for (i = 0; i < adapter->num_tx_queues; i++)
17726 +               igb_free_tx_resources(&adapter->tx_ring[i]);
17727 +}
17728 +
17729 +static void igb_unmap_and_free_tx_resource(struct pci_dev *pdev,
17730 +                                           struct igb_buffer *buffer_info)
17731 +{
17732 +       if (buffer_info->page_dma) {
17733 +               pci_unmap_page(pdev,
17734 +                               buffer_info->page_dma,
17735 +                               buffer_info->length,
17736 +                               PCI_DMA_TODEVICE);
17737 +               buffer_info->page_dma = 0;
17738 +       }
17739 +       if (buffer_info->dma) {
17740 +               pci_unmap_single(pdev,
17741 +                               buffer_info->dma,
17742 +                               buffer_info->length,
17743 +                               PCI_DMA_TODEVICE);
17744 +               buffer_info->dma = 0;
17745 +       }
17746 +       if (buffer_info->skb) {
17747 +               dev_kfree_skb_any(buffer_info->skb);
17748 +               buffer_info->skb = NULL;
17749 +       }
17750 +       buffer_info->time_stamp = 0;
17751 +       buffer_info->next_to_watch = 0;
17752 +       /* buffer_info must be completely set up in the transmit path */
17753 +}
17754 +
17755 +/**
17756 + * igb_clean_tx_ring - Free Tx Buffers
17757 + * @tx_ring: ring to be cleaned
17758 + **/
17759 +static void igb_clean_tx_ring(struct igb_ring *tx_ring)
17760 +{
17761 +       struct igb_buffer *buffer_info;
17762 +       unsigned long size;
17763 +       unsigned int i;
17764 +
17765 +       if (!tx_ring->buffer_info)
17766 +               return;
17767 +       /* Free all the Tx ring sk_buffs */
17768 +
17769 +       for (i = 0; i < tx_ring->count; i++) {
17770 +               buffer_info = &tx_ring->buffer_info[i];
17771 +               igb_unmap_and_free_tx_resource(tx_ring->pdev, buffer_info);
17772 +       }
17773 +
17774 +       size = sizeof(struct igb_buffer) * tx_ring->count;
17775 +       memset(tx_ring->buffer_info, 0, size);
17776 +
17777 +       /* Zero out the descriptor ring */
17778 +       memset(tx_ring->desc, 0, tx_ring->size);
17779 +
17780 +       tx_ring->next_to_use = 0;
17781 +       tx_ring->next_to_clean = 0;
17782 +}
17783 +
17784 +/**
17785 + * igb_clean_all_tx_rings - Free Tx Buffers for all queues
17786 + * @adapter: board private structure
17787 + **/
17788 +static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
17789 +{
17790 +       int i;
17791 +
17792 +       for (i = 0; i < adapter->num_tx_queues; i++)
17793 +               igb_clean_tx_ring(&adapter->tx_ring[i]);
17794 +}
17795 +
17796 +/**
17797 + * igb_free_rx_resources - Free Rx Resources
17798 + * @rx_ring: ring to clean the resources from
17799 + *
17800 + * Free all receive software resources
17801 + **/
17802 +void igb_free_rx_resources(struct igb_ring *rx_ring)
17803 +{
17804 +       igb_clean_rx_ring(rx_ring);
17805 +
17806 +       vfree(rx_ring->buffer_info);
17807 +       rx_ring->buffer_info = NULL;
17808 +
17809 +#ifdef IGB_LRO
17810 +       vfree(rx_ring->lro_mgr.lro_arr);
17811 +       rx_ring->lro_mgr.lro_arr = NULL;
17812 +#endif /* IGB_LRO */
17813 +
17814 +       /* if not set, then don't free */
17815 +       if (!rx_ring->desc)
17816 +               return;
17817 +
17818 +       pci_free_consistent(rx_ring->pdev, rx_ring->size,
17819 +                           rx_ring->desc, rx_ring->dma);
17820 +
17821 +       rx_ring->desc = NULL;
17822 +}
17823 +
17824 +/**
17825 + * igb_free_all_rx_resources - Free Rx Resources for All Queues
17826 + * @adapter: board private structure
17827 + *
17828 + * Free all receive software resources
17829 + **/
17830 +static void igb_free_all_rx_resources(struct igb_adapter *adapter)
17831 +{
17832 +       int i;
17833 +
17834 +       for (i = 0; i < adapter->num_rx_queues; i++)
17835 +               igb_free_rx_resources(&adapter->rx_ring[i]);
17836 +}
17837 +
17838 +/**
17839 + * igb_clean_rx_ring - Free Rx Buffers per Queue
17840 + * @rx_ring: ring to free buffers from
17841 + **/
17842 +static void igb_clean_rx_ring(struct igb_ring *rx_ring)
17843 +{
17844 +       struct igb_buffer *buffer_info;
17845 +       unsigned long size;
17846 +       unsigned int i;
17847 +
17848 +       if (!rx_ring->buffer_info)
17849 +               return;
17850 +
17851 +       /* Free all the Rx ring sk_buffs */
17852 +       for (i = 0; i < rx_ring->count; i++) {
17853 +               buffer_info = &rx_ring->buffer_info[i];
17854 +               if (buffer_info->dma) {
17855 +                       if (rx_ring->rx_ps_hdr_size)
17856 +                               pci_unmap_single(rx_ring->pdev,
17857 +                                                buffer_info->dma,
17858 +                                                rx_ring->rx_ps_hdr_size,
17859 +                                                PCI_DMA_FROMDEVICE);
17860 +                       else
17861 +                               pci_unmap_single(rx_ring->pdev,
17862 +                                                buffer_info->dma,
17863 +                                                rx_ring->rx_buffer_len,
17864 +                                                PCI_DMA_FROMDEVICE);
17865 +                       buffer_info->dma = 0;
17866 +               }
17867 +
17868 +               if (buffer_info->skb) {
17869 +                       dev_kfree_skb(buffer_info->skb);
17870 +                       buffer_info->skb = NULL;
17871 +               }
17872 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
17873 +               if (buffer_info->page) {
17874 +                       if (buffer_info->page_dma)
17875 +                               pci_unmap_page(rx_ring->pdev,
17876 +                                              buffer_info->page_dma,
17877 +                                              rx_ring->rx_buffer_len,
17878 +                                              PCI_DMA_FROMDEVICE);
17879 +                       put_page(buffer_info->page);
17880 +                       buffer_info->page = NULL;
17881 +                       buffer_info->page_dma = 0;
17882 +                       buffer_info->page_offset = 0;
17883 +               }
17884 +#endif
17885 +       }
17886 +
17887 +       size = sizeof(struct igb_buffer) * rx_ring->count;
17888 +       memset(rx_ring->buffer_info, 0, size);
17889 +
17890 +       /* Zero out the descriptor ring */
17891 +       memset(rx_ring->desc, 0, rx_ring->size);
17892 +
17893 +       rx_ring->next_to_clean = 0;
17894 +       rx_ring->next_to_use = 0;
17895 +}
17896 +
17897 +/**
17898 + * igb_clean_all_rx_rings - Free Rx Buffers for all queues
17899 + * @adapter: board private structure
17900 + **/
17901 +static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
17902 +{
17903 +       int i;
17904 +
17905 +       for (i = 0; i < adapter->num_rx_queues; i++)
17906 +               igb_clean_rx_ring(&adapter->rx_ring[i]);
17907 +}
17908 +
17909 +/**
17910 + * igb_set_mac - Change the Ethernet Address of the NIC
17911 + * @netdev: network interface device structure
17912 + * @p: pointer to an address structure
17913 + *
17914 + * Returns 0 on success, negative on failure
17915 + **/
17916 +static int igb_set_mac(struct net_device *netdev, void *p)
17917 +{
17918 +       struct igb_adapter *adapter = netdev_priv(netdev);
17919 +       struct e1000_hw *hw = &adapter->hw;
17920 +       struct sockaddr *addr = p;
17921 +
17922 +       if (!is_valid_ether_addr(addr->sa_data))
17923 +               return -EADDRNOTAVAIL;
17924 +
17925 +       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
17926 +       memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
17927 +
17928 +       /* set the correct pool for the new PF MAC address in entry 0 */
17929 +       igb_rar_set_qsel(adapter, hw->mac.addr, 0,
17930 +                        adapter->vfs_allocated_count);
17931 +
17932 +       return 0;
17933 +}
17934 +
17935 +/**
17936 + * igb_write_mc_addr_list - write multicast addresses to MTA
17937 + * @netdev: network interface device structure
17938 + *
17939 + * Writes multicast address list to the MTA hash table.
17940 + * Returns: -ENOMEM on failure
17941 + *                0 on no addresses written
17942 + *                X on writing X addresses to MTA
17943 + **/
17944 +static int igb_write_mc_addr_list(struct net_device *netdev)
17945 +{
17946 +       struct igb_adapter *adapter = netdev_priv(netdev);
17947 +       struct e1000_hw *hw = &adapter->hw;
17948 +       struct dev_mc_list *mc_ptr = netdev->mc_list;
17949 +       u8  *mta_list;
17950 +       u32 vmolr = 0;
17951 +       int i;
17952 +
17953 +       if (!netdev->mc_count) {
17954 +               /* nothing to program, so clear mc list */
17955 +               e1000_update_mc_addr_list(hw, NULL, 0);
17956 +               igb_restore_vf_multicasts(adapter);
17957 +               return 0;
17958 +       }
17959 +
17960 +       mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
17961 +       if (!mta_list)
17962 +               return -ENOMEM;
17963 +
17964 +       /* set vmolr receive overflow multicast bit */
17965 +       vmolr |= E1000_VMOLR_ROMPE;
17966 +
17967 +       /* The shared function expects a packed array of only addresses. */
17968 +       mc_ptr = netdev->mc_list;
17969 +
17970 +       for (i = 0; i < netdev->mc_count; i++) {
17971 +               if (!mc_ptr)
17972 +                       break;
17973 +               memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
17974 +               mc_ptr = mc_ptr->next;
17975 +       }
17976 +       e1000_update_mc_addr_list(hw, mta_list, i);
17977 +       kfree(mta_list);
17978 +
17979 +       return netdev->mc_count;
17980 +}
17981 +
17982 +#ifdef HAVE_SET_RX_MODE
17983 +/**
17984 + * igb_write_uc_addr_list - write unicast addresses to RAR table
17985 + * @netdev: network interface device structure
17986 + *
17987 + * Writes unicast address list to the RAR table.
17988 + * Returns: -ENOMEM on failure/insufficient address space
17989 + *                0 on no addresses written
17990 + *                X on writing X addresses to the RAR table
17991 + **/
17992 +static int igb_write_uc_addr_list(struct net_device *netdev)
17993 +{
17994 +       struct igb_adapter *adapter = netdev_priv(netdev);
17995 +       struct e1000_hw *hw = &adapter->hw;
17996 +       unsigned int vfn = adapter->vfs_allocated_count;
17997 +       unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
17998 +#ifndef HAVE_NETDEV_HW_ADDR
17999 +       struct dev_mc_list *uc_ptr = netdev->uc_list;
18000 +#endif
18001 +       int count = 0;
18002 +
18003 +       /* return ENOMEM indicating insufficient memory for addresses */
18004 +#ifndef HAVE_NETDEV_HW_ADDR
18005 +       if (netdev->uc_count > rar_entries)
18006 +#else
18007 +       if (netdev->uc.count > rar_entries)
18008 +#endif
18009 +               return -ENOMEM;
18010 +
18011 +#ifdef HAVE_NETDEV_HW_ADDR
18012 +       if (netdev->uc.count && rar_entries) {
18013 +               struct netdev_hw_addr *ha;
18014 +               list_for_each_entry(ha, &netdev->uc.list, list) {
18015 +                       if (!rar_entries)
18016 +                               break;
18017 +                       igb_rar_set_qsel(adapter, ha->addr,
18018 +                                        rar_entries--, 
18019 +                                        vfn);
18020 +                       count++;
18021 +               }
18022 +       }
18023 +#else
18024 +       while (uc_ptr) {
18025 +               igb_rar_set_qsel(adapter, uc_ptr->da_addr,
18026 +                                rar_entries--, vfn);
18027 +               uc_ptr = uc_ptr->next;
18028 +               count++;
18029 +       }
18030 +#endif
18031 +       /* write the addresses in reverse order to avoid write combining */
18032 +       for (; rar_entries > 0 ; rar_entries--) {
18033 +               E1000_WRITE_REG(hw, E1000_RAH(rar_entries), 0);
18034 +               E1000_WRITE_REG(hw, E1000_RAL(rar_entries), 0);
18035 +       }                       
18036 +       E1000_WRITE_FLUSH(hw);
18037 +
18038 +       return count;
18039 +}
18040 +
18041 +#endif
18042 +/**
18043 + * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
18044 + * @netdev: network interface device structure
18045 + *
18046 + * The set_rx_mode entry point is called whenever the unicast or multicast
18047 + * address lists or the network interface flags are updated.  This routine is
18048 + * responsible for configuring the hardware for proper unicast, multicast,
18049 + * promiscuous mode, and all-multi behavior.
18050 + **/
18051 +static void igb_set_rx_mode(struct net_device *netdev)
18052 +{
18053 +       struct igb_adapter *adapter = netdev_priv(netdev);
18054 +       struct e1000_hw *hw = &adapter->hw;
18055 +       unsigned int vfn = adapter->vfs_allocated_count;
18056 +       u32 rctl, vmolr = 0;
18057 +       int count;
18058 +
18059 +       /* Check for Promiscuous and All Multicast modes */
18060 +       rctl = E1000_READ_REG(hw, E1000_RCTL);
18061 +
18062 +       /* clear the effected bits */
18063 +       rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
18064 +
18065 +       if (netdev->flags & IFF_PROMISC) {
18066 +               rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
18067 +               vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
18068 +       } else {
18069 +               if (netdev->flags & IFF_ALLMULTI) {
18070 +                       rctl |= E1000_RCTL_MPE;
18071 +                       vmolr |= E1000_VMOLR_MPME;
18072 +               } else {
18073 +                       /*
18074 +                        * Write addresses to the MTA, if the attempt fails
18075 +                        * then we should just turn on promiscous mode so
18076 +                        * that we can at least receive multicast traffic
18077 +                        */
18078 +                       count = igb_write_mc_addr_list(netdev);
18079 +                       if (count < 0) {
18080 +                               rctl |= E1000_RCTL_MPE;
18081 +                               vmolr |= E1000_VMOLR_MPME;
18082 +                       } else if (count) {
18083 +                               vmolr |= E1000_VMOLR_ROMPE;
18084 +                       }
18085 +               }
18086 +#ifdef HAVE_SET_RX_MODE
18087 +               /*
18088 +                * Write addresses to available RAR registers, if there is not
18089 +                * sufficient space to store all the addresses then enable
18090 +                * unicast promiscous mode
18091 +                */
18092 +               count = igb_write_uc_addr_list(netdev);
18093 +               if (count < 0) {
18094 +                       rctl |= E1000_RCTL_UPE;
18095 +                       vmolr |= E1000_VMOLR_ROPE;
18096 +               }
18097 +#endif
18098 +               rctl |= E1000_RCTL_VFE;
18099 +       }
18100 +       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
18101 +
18102 +       /*
18103 +        * In order to support SR-IOV and eventually VMDq it is necessary to set
18104 +        * the VMOLR to enable the appropriate modes.  Without this workaround
18105 +        * we will have issues with VLAN tag stripping not being done for frames
18106 +        * that are only arriving because we are the default pool
18107 +        */
18108 +       if (hw->mac.type < e1000_82576)
18109 +               return;
18110 +
18111 +       vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) &
18112 +                ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
18113 +       E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
18114 +       igb_restore_vf_multicasts(adapter);
18115 +}
18116 +
18117 +/* Need to wait a few seconds after link up to get diagnostic information from
18118 + * the phy */
18119 +static void igb_update_phy_info(unsigned long data)
18120 +{
18121 +       struct igb_adapter *adapter = (struct igb_adapter *) data;
18122 +       e1000_get_phy_info(&adapter->hw);
18123 +}
18124 +
18125 +/**
18126 + * igb_has_link - check shared code for link and determine up/down
18127 + * @adapter: pointer to driver private info
18128 + **/
18129 +static bool igb_has_link(struct igb_adapter *adapter)
18130 +{
18131 +       struct e1000_hw *hw = &adapter->hw;
18132 +       bool link_active = FALSE;
18133 +       s32 ret_val = 0;
18134 +
18135 +       /* get_link_status is set on LSC (link status) interrupt or
18136 +        * rx sequence error interrupt.  get_link_status will stay
18137 +        * false until the e1000_check_for_link establishes link
18138 +        * for copper adapters ONLY
18139 +        */
18140 +       switch (hw->phy.media_type) {
18141 +       case e1000_media_type_copper:
18142 +               if (hw->mac.get_link_status) {
18143 +                       ret_val = e1000_check_for_link(hw);
18144 +                       link_active = !hw->mac.get_link_status;
18145 +               } else {
18146 +                       link_active = TRUE;
18147 +               }
18148 +               break;
18149 +       case e1000_media_type_internal_serdes:
18150 +               ret_val = e1000_check_for_link(hw);
18151 +               link_active = hw->mac.serdes_has_link;
18152 +               break;
18153 +       default:
18154 +       case e1000_media_type_unknown:
18155 +               break;
18156 +       }
18157 +
18158 +       return link_active;
18159 +}
18160 +
18161 +/**
18162 + * igb_watchdog - Timer Call-back
18163 + * @data: pointer to adapter cast into an unsigned long
18164 + **/
18165 +static void igb_watchdog(unsigned long data)
18166 +{
18167 +       struct igb_adapter *adapter = (struct igb_adapter *)data;
18168 +       /* Do the rest outside of interrupt context */
18169 +       schedule_work(&adapter->watchdog_task);
18170 +}
18171 +
18172 +static void igb_watchdog_task(struct work_struct *work)
18173 +{
18174 +       struct igb_adapter *adapter = container_of(work,
18175 +                                       struct igb_adapter, watchdog_task);
18176 +       struct e1000_hw *hw = &adapter->hw;
18177 +       struct net_device *netdev = adapter->netdev;
18178 +       struct igb_ring *tx_ring = adapter->tx_ring;
18179 +       u32 link;
18180 +       int i;
18181 +
18182 +       link = igb_has_link(adapter);
18183 +
18184 +       if (link) {
18185 +               if (!netif_carrier_ok(netdev)) {
18186 +                       u32 ctrl;
18187 +                       e1000_get_speed_and_duplex(hw, &adapter->link_speed,
18188 +                                                  &adapter->link_duplex);
18189 +
18190 +                       ctrl = E1000_READ_REG(hw, E1000_CTRL);
18191 +                       DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
18192 +                               "Flow Control: %s\n",
18193 +                               adapter->link_speed,
18194 +                               adapter->link_duplex == FULL_DUPLEX ?
18195 +                               "Full Duplex" : "Half Duplex",
18196 +                               ((ctrl & E1000_CTRL_TFCE) && (ctrl &
18197 +                               E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
18198 +                               E1000_CTRL_RFCE) ? "RX" : ((ctrl &
18199 +                               E1000_CTRL_TFCE) ? "TX" : "None")));
18200 +
18201 +                       /* tweak tx_queue_len according to speed/duplex and
18202 +                        * adjust the timeout factor */
18203 +                       netdev->tx_queue_len = adapter->tx_queue_len;
18204 +                       adapter->tx_timeout_factor = 1;
18205 +                       switch (adapter->link_speed) {
18206 +                       case SPEED_10:
18207 +                               netdev->tx_queue_len = 10;
18208 +                               adapter->tx_timeout_factor = 14;
18209 +                               break;
18210 +                       case SPEED_100:
18211 +                               netdev->tx_queue_len = 100;
18212 +                               /* maybe add some timeout factor ? */
18213 +                               break;
18214 +                       }
18215 +
18216 +                       netif_carrier_on(netdev);
18217 +                       netif_tx_wake_all_queues(netdev);
18218 +
18219 +                       igb_ping_all_vfs(adapter);
18220 +
18221 +                       /* link state has changed, schedule phy info update */
18222 +                       if (!test_bit(__IGB_DOWN, &adapter->state))
18223 +                               mod_timer(&adapter->phy_info_timer,
18224 +                                         round_jiffies(jiffies + 2 * HZ));
18225 +               }
18226 +       } else {
18227 +               if (netif_carrier_ok(netdev)) {
18228 +                       adapter->link_speed = 0;
18229 +                       adapter->link_duplex = 0;
18230 +                       DPRINTK(LINK, INFO, "NIC Link is Down\n");
18231 +                       netif_carrier_off(netdev);
18232 +                       netif_tx_stop_all_queues(netdev);
18233 +
18234 +                       igb_ping_all_vfs(adapter);
18235 +
18236 +                       /* link state has changed, schedule phy info update */
18237 +                       if (!test_bit(__IGB_DOWN, &adapter->state))
18238 +                               mod_timer(&adapter->phy_info_timer,
18239 +                                         round_jiffies(jiffies + 2 * HZ));
18240 +               }
18241 +       }
18242 +
18243 +       igb_update_stats(adapter);
18244 +
18245 +       if (!netif_carrier_ok(netdev)) {
18246 +               if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
18247 +                       /* We've lost link, so the controller stops DMA,
18248 +                        * but we've got queued Tx work that's never going
18249 +                        * to get done, so reset controller to flush Tx.
18250 +                        * (Do the reset outside of interrupt context). */
18251 +                       adapter->tx_timeout_count++;
18252 +                       schedule_work(&adapter->reset_task);
18253 +               }
18254 +       }
18255 +
18256 +       /* Force detection of hung controller every watchdog period */
18257 +       for (i = 0; i < adapter->num_tx_queues; i++)
18258 +               adapter->tx_ring[i].detect_tx_hung = TRUE;
18259 +
18260 +       /* Cause software interrupt to ensure rx ring is cleaned */
18261 +       if (adapter->msix_entries) {
18262 +               u32 eics = 0;
18263 +               for (i = 0; i < adapter->num_q_vectors; i++) {
18264 +                       struct igb_q_vector *q_vector = adapter->q_vector[i];
18265 +                       eics |= q_vector->eims_value;
18266 +               }
18267 +               E1000_WRITE_REG(hw, E1000_EICS, eics);
18268 +       } else {
18269 +               E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0);
18270 +       }
18271 +
18272 +       /* Reset the timer */
18273 +       if (!test_bit(__IGB_DOWN, &adapter->state))
18274 +               mod_timer(&adapter->watchdog_timer,
18275 +                         round_jiffies(jiffies + 2 * HZ));
18276 +}
18277 +
18278 +enum latency_range {
18279 +       lowest_latency = 0,
18280 +       low_latency = 1,
18281 +       bulk_latency = 2,
18282 +       latency_invalid = 255
18283 +};
18284 +
18285 +
18286 +/**
18287 + * igb_update_ring_itr - update the dynamic ITR value based on packet size
18288 + *
18289 + *      Stores a new ITR value based on strictly on packet size.  This
18290 + *      algorithm is less sophisticated than that used in igb_update_itr,
18291 + *      due to the difficulty of synchronizing statistics across multiple
18292 + *      receive rings.  The divisors and thresholds used by this fuction
18293 + *      were determined based on theoretical maximum wire speed and testing
18294 + *      data, in order to minimize response time while increasing bulk
18295 + *      throughput.
18296 + *      This functionality is controlled by the InterruptThrottleRate module
18297 + *      parameter (see igb_param.c)
18298 + *      NOTE:  This function is called only when operating in a multiqueue
18299 + *             receive environment.
18300 + * @q_vector: pointer to q_vector
18301 + **/
18302 +static void igb_update_ring_itr(struct igb_q_vector *q_vector)
18303 +{
18304 +       int new_val = q_vector->itr_val;
18305 +       int avg_wire_size = 0;
18306 +       struct igb_adapter *adapter = q_vector->adapter;
18307 +
18308 +       /* For non-gigabit speeds, just fix the interrupt rate at 4000
18309 +        * ints/sec - ITR timer value of 120 ticks.
18310 +        */
18311 +       if (adapter->link_speed != SPEED_1000) {
18312 +               new_val = 976;
18313 +               goto set_itr_val;
18314 +       }
18315 +
18316 +       if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
18317 +               struct igb_ring *ring = q_vector->rx_ring;
18318 +               avg_wire_size = ring->total_bytes / ring->total_packets;
18319 +       }
18320 +
18321 +       if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
18322 +               struct igb_ring *ring = q_vector->tx_ring;
18323 +               avg_wire_size = max_t(u32, avg_wire_size,
18324 +                                     (ring->total_bytes /
18325 +                                      ring->total_packets));
18326 +       }
18327 +
18328 +       /* if avg_wire_size isn't set no work was done */
18329 +       if (!avg_wire_size)
18330 +               goto clear_counts;
18331 +
18332 +       /* Add 24 bytes to size to account for CRC, preamble, and gap */
18333 +       avg_wire_size += 24;
18334 +
18335 +       /* Don't starve jumbo frames */
18336 +       avg_wire_size = min(avg_wire_size, 3000);
18337 +
18338 +       /* Give a little boost to mid-size frames */
18339 +       if ((avg_wire_size > 300) && (avg_wire_size < 1200))
18340 +               new_val = avg_wire_size / 3;
18341 +       else
18342 +               new_val = avg_wire_size / 2;
18343 +
18344 +set_itr_val:
18345 +       if (new_val != q_vector->itr_val) {
18346 +               q_vector->itr_val = new_val;
18347 +               q_vector->set_itr = 1;
18348 +       }
18349 +clear_counts:
18350 +       if (q_vector->rx_ring) {
18351 +               q_vector->rx_ring->total_bytes = 0;
18352 +               q_vector->rx_ring->total_packets = 0;
18353 +       }
18354 +       if (q_vector->tx_ring) {
18355 +               q_vector->tx_ring->total_bytes = 0;
18356 +               q_vector->tx_ring->total_packets = 0;
18357 +       }
18358 +}
18359 +
18360 +/**
18361 + * igb_update_itr - update the dynamic ITR value based on statistics
18362 + *      Stores a new ITR value based on packets and byte
18363 + *      counts during the last interrupt.  The advantage of per interrupt
18364 + *      computation is faster updates and more accurate ITR for the current
18365 + *      traffic pattern.  Constants in this function were computed
18366 + *      based on theoretical maximum wire speed and thresholds were set based
18367 + *      on testing data as well as attempting to minimize response time
18368 + *      while increasing bulk throughput.
18369 + *      this functionality is controlled by the InterruptThrottleRate module
18370 + *      parameter (see igb_param.c)
18371 + *      NOTE:  These calculations are only valid when operating in a single-
18372 + *             queue environment.
18373 + * @adapter: pointer to adapter
18374 + * @itr_setting: current adapter->itr
18375 + * @packets: the number of packets during this measurement interval
18376 + * @bytes: the number of bytes during this measurement interval
18377 + **/
18378 +static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
18379 +                                   int packets, int bytes)
18380 +{
18381 +       unsigned int retval = itr_setting;
18382 +
18383 +       if (packets == 0)
18384 +               goto update_itr_done;
18385 +
18386 +       switch (itr_setting) {
18387 +       case lowest_latency:
18388 +               /* handle TSO and jumbo frames */
18389 +               if (bytes/packets > 8000)
18390 +                       retval = bulk_latency;
18391 +               else if ((packets < 5) && (bytes > 512))
18392 +                       retval = low_latency;
18393 +               break;
18394 +       case low_latency:  /* 50 usec aka 20000 ints/s */
18395 +               if (bytes > 10000) {
18396 +                       /* this if handles the TSO accounting */
18397 +                       if (bytes/packets > 8000) {
18398 +                               retval = bulk_latency;
18399 +                       } else if ((packets < 10) || ((bytes/packets) > 1200)) {
18400 +                               retval = bulk_latency;
18401 +                       } else if ((packets > 35)) {
18402 +                               retval = lowest_latency;
18403 +                       }
18404 +               } else if (bytes/packets > 2000) {
18405 +                       retval = bulk_latency;
18406 +               } else if (packets <= 2 && bytes < 512) {
18407 +                       retval = lowest_latency;
18408 +               }
18409 +               break;
18410 +       case bulk_latency: /* 250 usec aka 4000 ints/s */
18411 +               if (bytes > 25000) {
18412 +                       if (packets > 35)
18413 +                               retval = low_latency;
18414 +               } else if (bytes < 1500) {
18415 +                       retval = low_latency;
18416 +               }
18417 +               break;
18418 +       }
18419 +
18420 +update_itr_done:
18421 +       return retval;
18422 +}
18423 +static void igb_set_itr(struct igb_adapter *adapter)
18424 +{
18425 +       u16 current_itr;
18426 +       u32 new_itr = adapter->itr;
18427 +
18428 +       /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
18429 +       if (adapter->link_speed != SPEED_1000) {
18430 +               current_itr = 0;
18431 +               new_itr = 4000;
18432 +               goto set_itr_now;
18433 +       }
18434 +
18435 +       adapter->rx_itr = igb_update_itr(adapter,
18436 +                                   adapter->rx_itr,
18437 +                                   adapter->rx_ring->total_packets,
18438 +                                   adapter->rx_ring->total_bytes);
18439 +
18440 +       adapter->tx_itr = igb_update_itr(adapter,
18441 +                                   adapter->tx_itr,
18442 +                                   adapter->tx_ring->total_packets,
18443 +                                    adapter->tx_ring->total_bytes);
18444 +       current_itr = max(adapter->rx_itr, adapter->tx_itr);
18445 +
18446 +       /* conservative mode (itr 3) eliminates the lowest_latency setting */
18447 +       if (adapter->itr_setting == 3 && current_itr == lowest_latency)
18448 +               current_itr = low_latency;
18449 +
18450 +       switch (current_itr) {
18451 +       /* counts and packets in update_itr are dependent on these numbers */
18452 +       case lowest_latency:
18453 +               new_itr = 56;  /* aka 70,000 ints/sec */
18454 +               break;
18455 +       case low_latency:
18456 +               new_itr = 196; /* aka 20,000 ints/sec */
18457 +               break;
18458 +       case bulk_latency:
18459 +               new_itr = 980; /* aka 4,000 ints/sec */
18460 +               break;
18461 +       default:
18462 +               break;
18463 +       }
18464 +
18465 +set_itr_now:
18466 +       adapter->rx_ring->total_bytes = 0;
18467 +       adapter->rx_ring->total_packets = 0;
18468 +       adapter->tx_ring->total_bytes = 0;
18469 +       adapter->tx_ring->total_packets = 0;
18470 +
18471 +       if (new_itr != adapter->itr) {
18472 +               struct igb_q_vector *q_vector = adapter->q_vector[0];
18473 +               /* this attempts to bias the interrupt rate towards Bulk
18474 +                * by adding intermediate steps when interrupt rate is
18475 +                * increasing */
18476 +               new_itr = new_itr > adapter->itr ?
18477 +                            max((new_itr * adapter->itr) /
18478 +                                (new_itr + (adapter->itr >> 2)), new_itr) :
18479 +                            new_itr;
18480 +               /* Don't write the value here; it resets the adapter's
18481 +                * internal timer, and causes us to delay far longer than
18482 +                * we should between interrupts.  Instead, we write the ITR
18483 +                * value at the beginning of the next interrupt so the timing
18484 +                * ends up being correct.
18485 +                */
18486 +               adapter->itr = new_itr;
18487 +               q_vector->itr_val = new_itr;
18488 +               q_vector->set_itr = 1;
18489 +       }
18490 +
18491 +       return;
18492 +}
18493 +
18494 +#define IGB_TX_FLAGS_CSUM              0x00000001
18495 +#define IGB_TX_FLAGS_VLAN              0x00000002
18496 +#define IGB_TX_FLAGS_TSO               0x00000004
18497 +#define IGB_TX_FLAGS_IPV4              0x00000008
18498 +#define IGB_TX_FLAGS_TSTAMP             0x00000010
18499 +#define IGB_TX_FLAGS_VLAN_MASK         0xffff0000
18500 +#define IGB_TX_FLAGS_VLAN_SHIFT                16
18501 +
18502 +static inline int igb_tso_adv(struct igb_ring *tx_ring,
18503 +                              struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
18504 +{
18505 +#ifdef NETIF_F_TSO
18506 +       struct e1000_adv_tx_context_desc *context_desc;
18507 +       unsigned int i;
18508 +       int err;
18509 +       struct igb_buffer *buffer_info;
18510 +       u32 info = 0, tu_cmd = 0;
18511 +       u32 mss_l4len_idx, l4len;
18512 +       *hdr_len = 0;
18513 +
18514 +       if (skb_header_cloned(skb)) {
18515 +               err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
18516 +               if (err)
18517 +                       return err;
18518 +       }
18519 +
18520 +       l4len = tcp_hdrlen(skb);
18521 +       *hdr_len += l4len;
18522 +
18523 +       if (skb->protocol == htons(ETH_P_IP)) {
18524 +               struct iphdr *iph = ip_hdr(skb);
18525 +               iph->tot_len = 0;
18526 +               iph->check = 0;
18527 +               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
18528 +                                                        iph->daddr, 0,
18529 +                                                        IPPROTO_TCP,
18530 +                                                        0);
18531 +#ifdef NETIF_F_TSO6
18532 +       } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
18533 +               ipv6_hdr(skb)->payload_len = 0;
18534 +               tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
18535 +                                                      &ipv6_hdr(skb)->daddr,
18536 +                                                      0, IPPROTO_TCP, 0);
18537 +#endif
18538 +       }
18539 +
18540 +       i = tx_ring->next_to_use;
18541 +
18542 +       buffer_info = &tx_ring->buffer_info[i];
18543 +       context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
18544 +       /* VLAN MACLEN IPLEN */
18545 +       if (tx_flags & IGB_TX_FLAGS_VLAN)
18546 +               info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
18547 +       info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
18548 +       *hdr_len += skb_network_offset(skb);
18549 +       info |= skb_network_header_len(skb);
18550 +       *hdr_len += skb_network_header_len(skb);
18551 +       context_desc->vlan_macip_lens = cpu_to_le32(info);
18552 +
18553 +       /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
18554 +       tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
18555 +
18556 +       if (skb->protocol == htons(ETH_P_IP))
18557 +               tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
18558 +       tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
18559 +
18560 +       context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
18561 +
18562 +       /* MSS L4LEN IDX */
18563 +       mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
18564 +       mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
18565 +       mss_l4len_idx |= tx_ring->ctx_idx;
18566 +
18567 +       context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
18568 +       context_desc->seqnum_seed = 0;
18569 +
18570 +       buffer_info->time_stamp = jiffies;
18571 +       buffer_info->next_to_watch = i;
18572 +       buffer_info->dma = 0;
18573 +       i++;
18574 +       if (i == tx_ring->count)
18575 +               i = 0;
18576 +
18577 +       tx_ring->next_to_use = i;
18578 +
18579 +       return TRUE;
18580 +#else
18581 +       return FALSE;
18582 +#endif  /* NETIF_F_TSO */
18583 +}
18584 +
18585 +static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
18586 +                                   struct sk_buff *skb, u32 tx_flags)
18587 +{
18588 +       struct e1000_adv_tx_context_desc *context_desc;
18589 +       struct pci_dev *pdev = tx_ring->pdev;
18590 +       struct igb_buffer *buffer_info;
18591 +       u32 info = 0, tu_cmd = 0;
18592 +       unsigned int i;
18593 +
18594 +       if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
18595 +           (tx_flags & IGB_TX_FLAGS_VLAN)) {
18596 +               i = tx_ring->next_to_use;
18597 +               buffer_info = &tx_ring->buffer_info[i];
18598 +               context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
18599 +
18600 +               if (tx_flags & IGB_TX_FLAGS_VLAN)
18601 +                       info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
18602 +
18603 +               info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
18604 +               if (skb->ip_summed == CHECKSUM_PARTIAL)
18605 +                       info |= skb_network_header_len(skb);
18606 +
18607 +               context_desc->vlan_macip_lens = cpu_to_le32(info);
18608 +
18609 +               tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
18610 +
18611 +               if (skb->ip_summed == CHECKSUM_PARTIAL) {
18612 +                       __be16 protocol;
18613 +
18614 +                       if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
18615 +                               const struct vlan_ethhdr *vhdr =
18616 +                                         (const struct vlan_ethhdr*)skb->data;
18617 +
18618 +                               protocol = vhdr->h_vlan_encapsulated_proto;
18619 +                       } else {
18620 +                               protocol = skb->protocol;
18621 +                       }
18622 +
18623 +                       switch (protocol) {
18624 +                       case __constant_htons(ETH_P_IP):
18625 +                               tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
18626 +                               if (ip_hdr(skb)->protocol == IPPROTO_TCP)
18627 +                                       tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
18628 +                               else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
18629 +                                       tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
18630 +                               break;
18631 +#ifdef NETIF_F_IPV6_CSUM
18632 +                       case __constant_htons(ETH_P_IPV6):
18633 +                               /* XXX what about other V6 headers?? */
18634 +                               if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
18635 +                                       tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
18636 +                               else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
18637 +                                       tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
18638 +                               break;
18639 +#endif
18640 +                       default:
18641 +                               if (unlikely(net_ratelimit())) {
18642 +                                       dev_warn(&pdev->dev,
18643 +                                        "partial checksum but proto=%x!\n",
18644 +                                        skb->protocol);
18645 +                               }
18646 +                               break;
18647 +                       }
18648 +               }
18649 +
18650 +               context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
18651 +               context_desc->seqnum_seed = 0;
18652 +               context_desc->mss_l4len_idx = cpu_to_le32(tx_ring->ctx_idx);
18653 +
18654 +               buffer_info->time_stamp = jiffies;
18655 +               buffer_info->next_to_watch = i;
18656 +               buffer_info->dma = 0;
18657 +
18658 +               i++;
18659 +               if (i == tx_ring->count)
18660 +                       i = 0;
18661 +               tx_ring->next_to_use = i;
18662 +
18663 +               return TRUE;
18664 +       }
18665 +       return FALSE;
18666 +}
18667 +
18668 +#define IGB_MAX_TXD_PWR        16
18669 +#define IGB_MAX_DATA_PER_TXD   (1<<IGB_MAX_TXD_PWR)
18670 +
18671 +static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
18672 +                                 unsigned int first)
18673 +{
18674 +       struct igb_buffer *buffer_info;
18675 +       unsigned int len = skb_headlen(skb);
18676 +       unsigned int count = 0, i;
18677 +       unsigned int f;
18678 +
18679 +       i = tx_ring->next_to_use;
18680 +
18681 +       buffer_info = &tx_ring->buffer_info[i];
18682 +       BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
18683 +       buffer_info->length = len;
18684 +       /* set time_stamp *before* dma to help avoid a possible race */
18685 +       buffer_info->time_stamp = jiffies;
18686 +       buffer_info->next_to_watch = i;
18687 +       buffer_info->dma = pci_map_single(tx_ring->pdev, skb->data, len,
18688 +                                         PCI_DMA_TODEVICE);
18689 +       count++;
18690 +
18691 +       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
18692 +               struct skb_frag_struct *frag;
18693 +
18694 +               frag = &skb_shinfo(skb)->frags[f];
18695 +               len = frag->size;
18696 +
18697 +               i++;
18698 +               if (i == tx_ring->count)
18699 +                       i = 0;
18700 +
18701 +               buffer_info = &tx_ring->buffer_info[i];
18702 +               BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
18703 +               buffer_info->length = len;
18704 +               buffer_info->time_stamp = jiffies;
18705 +               buffer_info->next_to_watch = i;
18706 +               buffer_info->page_dma = pci_map_page(tx_ring->pdev,
18707 +                                                    frag->page,
18708 +                                                    frag->page_offset,
18709 +                                                    len,
18710 +                                                    PCI_DMA_TODEVICE);
18711 +
18712 +               count++;
18713 +       }
18714 +
18715 +       tx_ring->buffer_info[i].skb = skb;
18716 +       tx_ring->buffer_info[first].next_to_watch = i;
18717 +
18718 +       return count;
18719 +}
18720 +
18721 +static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
18722 +                                    int tx_flags, int count, u32 paylen,
18723 +                                    u8 hdr_len)
18724 +{
18725 +       union e1000_adv_tx_desc *tx_desc;
18726 +       struct igb_buffer *buffer_info;
18727 +       u32 olinfo_status = 0, cmd_type_len;
18728 +       unsigned int i = tx_ring->next_to_use;
18729 +
18730 +       cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
18731 +                       E1000_ADVTXD_DCMD_DEXT);
18732 +
18733 +       if (tx_flags & IGB_TX_FLAGS_VLAN)
18734 +               cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
18735 +
18736 +       if (tx_flags & IGB_TX_FLAGS_TSTAMP)
18737 +               cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
18738 +
18739 +       if (tx_flags & IGB_TX_FLAGS_TSO) {
18740 +               cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
18741 +
18742 +               /* insert tcp checksum */
18743 +               olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
18744 +
18745 +               /* insert ip checksum */
18746 +               if (tx_flags & IGB_TX_FLAGS_IPV4)
18747 +                       olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
18748 +
18749 +       } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
18750 +               olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
18751 +       }
18752 +
18753 +       if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
18754 +                        IGB_TX_FLAGS_VLAN))
18755 +               olinfo_status |= tx_ring->ctx_idx;
18756 +
18757 +       olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
18758 +
18759 +       do {
18760 +               buffer_info = &tx_ring->buffer_info[i];
18761 +               tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
18762 +               tx_desc->read.buffer_addr = buffer_info->dma ?
18763 +                                           cpu_to_le64(buffer_info->dma) :
18764 +                                           cpu_to_le64(buffer_info->page_dma);
18765 +               tx_desc->read.cmd_type_len =
18766 +                       cpu_to_le32(cmd_type_len | buffer_info->length);
18767 +               tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
18768 +               count--;
18769 +               i++;
18770 +               if (i == tx_ring->count)
18771 +                       i = 0;
18772 +       } while (count > 0);
18773 +
18774 +       tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
18775 +       /* Force memory writes to complete before letting h/w
18776 +        * know there are new descriptors to fetch.  (Only
18777 +        * applicable for weak-ordered memory model archs,
18778 +        * such as IA-64). */
18779 +       wmb();
18780 +
18781 +       tx_ring->next_to_use = i;
18782 +       writel(i, tx_ring->tail);
18783 +       /* we need this if more than one processor can write to our tail
18784 +        * at a time, it syncronizes IO on IA64/Altix systems */
18785 +       mmiowb();
18786 +}
18787 +
18788 +static int __igb_maybe_stop_tx(struct net_device *netdev,
18789 +                               struct igb_ring *tx_ring, int size)
18790 +{
18791 +       if (netif_is_multiqueue(netdev))
18792 +               netif_stop_subqueue(netdev, tx_ring->queue_index);
18793 +       else
18794 +               netif_stop_queue(netdev);
18795 +
18796 +       /* Herbert's original patch had:
18797 +        *  smp_mb__after_netif_stop_queue();
18798 +        * but since that doesn't exist yet, just open code it. */
18799 +       smp_mb();
18800 +
18801 +       /* We need to check again in a case another CPU has just
18802 +        * made room available. */
18803 +       if (IGB_DESC_UNUSED(tx_ring) < size)
18804 +               return -EBUSY;
18805 +
18806 +       /* A reprieve! */
18807 +       if (netif_is_multiqueue(netdev))
18808 +               netif_wake_subqueue(netdev, tx_ring->queue_index);
18809 +       else
18810 +               netif_wake_queue(netdev);
18811 +       ++tx_ring->restart_queue;
18812 +       return 0;
18813 +}
18814 +
18815 +static int igb_maybe_stop_tx(struct net_device *netdev,
18816 +                             struct igb_ring *tx_ring, int size)
18817 +{
18818 +       if (IGB_DESC_UNUSED(tx_ring) >= size)
18819 +               return 0;
18820 +       return __igb_maybe_stop_tx(netdev, tx_ring, size);
18821 +}
18822 +
18823 +#define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
18824 +
18825 +static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
18826 +                                           struct net_device *netdev,
18827 +                                           struct igb_ring *tx_ring)
18828 +{
18829 +       struct igb_adapter *adapter = netdev_priv(netdev);
18830 +       unsigned int first;
18831 +       unsigned int tx_flags = 0;
18832 +       u8 hdr_len = 0;
18833 +       int tso = 0;
18834 +#ifdef SIOCSHWTSTAMP
18835 +       union skb_shared_tx *shtx = skb_tx(skb);
18836 +#endif
18837 +
18838 +       if (test_bit(__IGB_DOWN, &adapter->state)) {
18839 +               dev_kfree_skb_any(skb);
18840 +               return NETDEV_TX_OK;
18841 +       }
18842 +
18843 +       if (skb->len <= 0) {
18844 +               dev_kfree_skb_any(skb);
18845 +               return NETDEV_TX_OK;
18846 +       }
18847 +
18848 +       /* need: 1 descriptor per page,
18849 +        *       + 2 desc gap to keep tail from touching head,
18850 +        *       + 1 desc for skb->data,
18851 +        *       + 1 desc for context descriptor,
18852 +        * otherwise try next time */
18853 +       if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
18854 +               /* this is a hard error */
18855 +               return NETDEV_TX_BUSY;
18856 +       }
18857 +
18858 +#ifdef SIOCSHWTSTAMP
18859 +       if (unlikely(shtx->hardware)) {
18860 +               shtx->in_progress = 1;
18861 +               tx_flags |= IGB_TX_FLAGS_TSTAMP;
18862 +       }
18863 +
18864 +#endif
18865 +       if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
18866 +               tx_flags |= IGB_TX_FLAGS_VLAN;
18867 +               tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
18868 +       }
18869 +
18870 +       if (skb->protocol == htons(ETH_P_IP))
18871 +               tx_flags |= IGB_TX_FLAGS_IPV4;
18872 +
18873 +       first = tx_ring->next_to_use;
18874 +#ifdef NETIF_F_TSO
18875 +       if (skb_is_gso(skb)) {
18876 +               tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
18877 +
18878 +               if (tso < 0) {
18879 +                       dev_kfree_skb_any(skb);
18880 +                       return NETDEV_TX_OK;
18881 +               }
18882 +       }
18883 +
18884 +#endif
18885 +       if (tso)
18886 +               tx_flags |= IGB_TX_FLAGS_TSO;
18887 +       else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
18888 +                (skb->ip_summed == CHECKSUM_PARTIAL))
18889 +               tx_flags |= IGB_TX_FLAGS_CSUM;
18890 +
18891 +       igb_tx_queue_adv(tx_ring, tx_flags,
18892 +                        igb_tx_map_adv(tx_ring, skb, first),
18893 +                        skb->len, hdr_len);
18894 +
18895 +       netdev->trans_start = jiffies;
18896 +
18897 +       /* Make sure there is space in the ring for the next send. */
18898 +       igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
18899 +
18900 +       return NETDEV_TX_OK;
18901 +}
18902 +
18903 +static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
18904 +{
18905 +       struct igb_adapter *adapter = netdev_priv(netdev);
18906 +       struct igb_ring *tx_ring;
18907 +
18908 +#ifdef HAVE_TX_MQ
18909 +       int r_idx = 0;
18910 +       r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
18911 +       tx_ring = adapter->multi_tx_table[r_idx];
18912 +#else
18913 +       tx_ring = &adapter->tx_ring[0];
18914 +#endif
18915 +
18916 +       /* This goes back to the question of how to logically map a tx queue
18917 +        * to a flow.  Right now, performance is impacted slightly negatively
18918 +        * if using multiple tx queues.  If the stack breaks away from a
18919 +        * single qdisc implementation, we can look at this again. */
18920 +       return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
18921 +}
18922 +
18923 +/**
18924 + * igb_tx_timeout - Respond to a Tx Hang
18925 + * @netdev: network interface device structure
18926 + **/
18927 +static void igb_tx_timeout(struct net_device *netdev)
18928 +{
18929 +       struct igb_adapter *adapter = netdev_priv(netdev);
18930 +       struct e1000_hw *hw = &adapter->hw;
18931 +
18932 +       /* Do the reset outside of interrupt context */
18933 +       adapter->tx_timeout_count++;
18934 +
18935 +       schedule_work(&adapter->reset_task);
18936 +       E1000_WRITE_REG(hw, E1000_EICS,
18937 +                       (adapter->eims_enable_mask & ~adapter->eims_other));
18938 +}
18939 +
18940 +static void igb_reset_task(struct work_struct *work)
18941 +{
18942 +       struct igb_adapter *adapter;
18943 +       adapter = container_of(work, struct igb_adapter, reset_task);
18944 +
18945 +       igb_reinit_locked(adapter);
18946 +}
18947 +
18948 +/**
18949 + * igb_get_stats - Get System Network Statistics
18950 + * @netdev: network interface device structure
18951 + *
18952 + * Returns the address of the device statistics structure.
18953 + * The statistics are actually updated from the timer callback.
18954 + **/
18955 +static struct net_device_stats *igb_get_stats(struct net_device *netdev)
18956 +{
18957 +       struct igb_adapter *adapter = netdev_priv(netdev);
18958 +
18959 +       /* only return the current stats */
18960 +       return &adapter->net_stats;
18961 +}
18962 +
18963 +/**
18964 + * igb_change_mtu - Change the Maximum Transfer Unit
18965 + * @netdev: network interface device structure
18966 + * @new_mtu: new value for maximum frame size
18967 + *
18968 + * Returns 0 on success, negative on failure
18969 + **/
18970 +static int igb_change_mtu(struct net_device *netdev, int new_mtu)
18971 +{
18972 +       struct igb_adapter *adapter = netdev_priv(netdev);
18973 +       int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
18974 +       u32 rx_buffer_len, i;
18975 +       u16 rx_ps_hdr_size = 0;
18976 +
18977 +       if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
18978 +               DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
18979 +               return -EINVAL;
18980 +       }
18981 +
18982 +#define MAX_STD_JUMBO_FRAME_SIZE 9234
18983 +       if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
18984 +               DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
18985 +               return -EINVAL;
18986 +       }
18987 +
18988 +       while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
18989 +               msleep(1);
18990 +
18991 +       /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
18992 +        * means we reserve 2 more, this pushes us to allocate from the next
18993 +        * larger slab size.
18994 +        * i.e. RXBUFFER_2048 --> size-4096 slab
18995 +        */
18996 +
18997 +       /* igb_down has a dependency on max_frame_size */
18998 +       adapter->max_frame_size = max_frame;
18999 +
19000 +       if (max_frame <= IGB_RXBUFFER_1024)
19001 +               rx_buffer_len = IGB_RXBUFFER_1024;
19002 +       else if (max_frame <= IGB_RXBUFFER_2048)
19003 +               rx_buffer_len = IGB_RXBUFFER_2048;
19004 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
19005 +       else
19006 +#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
19007 +               rx_buffer_len = IGB_RXBUFFER_16384;
19008 +#else
19009 +               rx_buffer_len = PAGE_SIZE / 2;
19010 +#endif
19011 +#else
19012 +       else if (max_frame <= IGB_RXBUFFER_4096)
19013 +               rx_buffer_len = IGB_RXBUFFER_4096;
19014 +       else if (max_frame <= IGB_RXBUFFER_8192)
19015 +               rx_buffer_len = IGB_RXBUFFER_8192;
19016 +       else
19017 +               rx_buffer_len = IGB_RXBUFFER_16384;
19018 +#endif
19019 +
19020 +       /* adjust allocation if LPE protects us, and we aren't using SBP */
19021 +       if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
19022 +            (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
19023 +               rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
19024 +
19025 +       if (netif_running(netdev))
19026 +               igb_down(adapter);
19027 +
19028 +       DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
19029 +               netdev->mtu, new_mtu);
19030 +       netdev->mtu = new_mtu;
19031 +
19032 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
19033 +       /* 82575 and greater support packet-split where the protocol
19034 +        * header is placed in skb->data and the packet data is
19035 +        * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
19036 +        * In the case of a non-split, skb->data is linearly filled,
19037 +        * followed by the page buffers.  Therefore, skb->data is
19038 +        * sized to hold the largest protocol header.
19039 +        */
19040 +       /* allocations using alloc_page take too long for regular MTU
19041 +        * so only enable packet split for jumbo frames */
19042 +       if (new_mtu > ETH_DATA_LEN)
19043 +               rx_ps_hdr_size = IGB_RXBUFFER_128;
19044 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
19045 +
19046 +       for (i = 0; i < adapter->num_rx_queues; i++) {
19047 +               struct igb_ring *rx_ring = &adapter->rx_ring[i];
19048 +               rx_ring->rx_buffer_len = rx_buffer_len;
19049 +               rx_ring->rx_ps_hdr_size = rx_ps_hdr_size;
19050 +       }
19051 +
19052 +       if (netif_running(netdev))
19053 +               igb_up(adapter);
19054 +       else
19055 +               igb_reset(adapter);
19056 +
19057 +       clear_bit(__IGB_RESETTING, &adapter->state);
19058 +
19059 +       return 0;
19060 +}
19061 +
19062 +/**
19063 + * igb_update_stats - Update the board statistics counters
19064 + * @adapter: board private structure
19065 + **/
19066 +
19067 +void igb_update_stats(struct igb_adapter *adapter)
19068 +{
19069 +       struct e1000_hw *hw = &adapter->hw;
19070 +#ifdef HAVE_PCI_ERS
19071 +       struct pci_dev *pdev = adapter->pdev;
19072 +#endif
19073 +       u16 phy_tmp;
19074 +
19075 +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
19076 +
19077 +       /*
19078 +        * Prevent stats update while adapter is being reset, or if the pci
19079 +        * connection is down.
19080 +        */
19081 +       if (adapter->link_speed == 0)
19082 +               return;
19083 +#ifdef HAVE_PCI_ERS
19084 +       if (pci_channel_offline(pdev))
19085 +               return;
19086 +#endif
19087 +
19088 +       /* read stats registers */
19089 +       adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
19090 +       adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC);
19091 +       adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL);
19092 +       E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */
19093 +       adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC);
19094 +       adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC);
19095 +       adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC);
19096 +
19097 +       adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64);
19098 +       adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127);
19099 +       adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255);
19100 +       adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511);
19101 +       adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
19102 +       adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
19103 +       adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS);
19104 +       adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC);
19105 +
19106 +       adapter->stats.mpc += E1000_READ_REG(hw, E1000_MPC);
19107 +       adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC);
19108 +       adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL);
19109 +       adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC);
19110 +       adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL);
19111 +       adapter->stats.dc += E1000_READ_REG(hw, E1000_DC);
19112 +       adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC);
19113 +       adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
19114 +       adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC);
19115 +       adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC);
19116 +       adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
19117 +       adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC);
19118 +       adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC);
19119 +       adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL);
19120 +       E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */
19121 +       adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC);
19122 +       adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC);
19123 +       adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC);
19124 +       adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC);
19125 +       adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH);
19126 +       adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH);
19127 +       adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR);
19128 +
19129 +       adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64);
19130 +       adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127);
19131 +       adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255);
19132 +       adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511);
19133 +       adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
19134 +       adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
19135 +
19136 +       adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC);
19137 +       adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC);
19138 +
19139 +       adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT);
19140 +       adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC);
19141 +
19142 +       adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
19143 +       adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
19144 +       adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
19145 +       adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC);
19146 +       adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
19147 +
19148 +       adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC);
19149 +       adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
19150 +       adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
19151 +       adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
19152 +       adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
19153 +       adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
19154 +       adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
19155 +       adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
19156 +       adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
19157 +
19158 +       /* Fill out the OS statistics structure */
19159 +       adapter->net_stats.multicast = adapter->stats.mprc;
19160 +       adapter->net_stats.collisions = adapter->stats.colc;
19161 +
19162 +       /* Rx Errors */
19163 +
19164 +       /* RLEC on some newer hardware can be incorrect so build
19165 +       * our own version based on RUC and ROC */
19166 +       adapter->net_stats.rx_errors = adapter->stats.rxerrc +
19167 +               adapter->stats.crcerrs + adapter->stats.algnerrc +
19168 +               adapter->stats.ruc + adapter->stats.roc +
19169 +               adapter->stats.cexterr;
19170 +       adapter->net_stats.rx_length_errors = adapter->stats.ruc +
19171 +                                             adapter->stats.roc;
19172 +       adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
19173 +       adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
19174 +       adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
19175 +
19176 +       /* Tx Errors */
19177 +       adapter->net_stats.tx_errors = adapter->stats.ecol +
19178 +                                      adapter->stats.latecol;
19179 +       adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
19180 +       adapter->net_stats.tx_window_errors = adapter->stats.latecol;
19181 +       adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
19182 +
19183 +       /* Tx Dropped needs to be maintained elsewhere */
19184 +
19185 +       /* Phy Stats */
19186 +       if (hw->phy.media_type == e1000_media_type_copper) {
19187 +               if ((adapter->link_speed == SPEED_1000) &&
19188 +                  (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
19189 +                       phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
19190 +                       adapter->phy_stats.idle_errors += phy_tmp;
19191 +               }
19192 +       }
19193 +
19194 +       /* Management Stats */
19195 +       adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC);
19196 +       adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC);
19197 +       adapter->stats.mgpdc += E1000_READ_REG(hw, E1000_MGTPDC);
19198 +}
19199 +
19200 +static irqreturn_t igb_msix_other(int irq, void *data)
19201 +{
19202 +       struct igb_adapter *adapter = data;
19203 +       struct e1000_hw *hw = &adapter->hw;
19204 +       u32 icr = E1000_READ_REG(hw, E1000_ICR);
19205 +       /* reading ICR causes bit 31 of EICR to be cleared */
19206 +
19207 +       if (icr & E1000_ICR_DOUTSYNC) {
19208 +               /* HW is reporting DMA is out of sync */
19209 +               adapter->stats.doosync++;
19210 +       }
19211 +
19212 +       /* Check for a mailbox event */
19213 +       if (icr & E1000_ICR_VMMB)
19214 +               igb_msg_task(adapter);
19215 +
19216 +       if (!(icr & E1000_ICR_LSC))
19217 +               goto no_link_interrupt;
19218 +       hw->mac.get_link_status = 1;
19219 +       /* guard against interrupt when we're going down */
19220 +       if (!test_bit(__IGB_DOWN, &adapter->state))
19221 +               mod_timer(&adapter->watchdog_timer, jiffies + 1);
19222 +
19223 +no_link_interrupt:
19224 +       if (adapter->vfs_allocated_count)
19225 +               E1000_WRITE_REG(hw, E1000_IMS,
19226 +                               E1000_IMS_LSC |
19227 +                               E1000_IMS_VMMB |
19228 +                               E1000_IMS_DOUTSYNC);
19229 +       else
19230 +       E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
19231 +       E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other);
19232 +
19233 +       return IRQ_HANDLED;
19234 +}
19235 +
19236 +
19237 +static void igb_write_itr(struct igb_q_vector *q_vector)
19238 +{
19239 +       u32 itr_val = q_vector->itr_val & 0x7FFC;
19240 +
19241 +       if (!q_vector->set_itr)
19242 +               return;
19243 +
19244 +       if (!itr_val)
19245 +               itr_val = 0x4;
19246 +
19247 +       if (q_vector->itr_shift)
19248 +               itr_val |= itr_val << q_vector->itr_shift;
19249 +       else
19250 +               itr_val |= 0x8000000;
19251 +
19252 +       writel(itr_val, q_vector->itr_register);
19253 +       q_vector->set_itr = 0;
19254 +}
19255 +
19256 +static irqreturn_t igb_msix_ring(int irq, void *data)
19257 +{
19258 +       struct igb_q_vector *q_vector = data;
19259 +
19260 +       /* Write the ITR value calculated from the previous interrupt. */
19261 +       igb_write_itr(q_vector);
19262 +
19263 +       napi_schedule(&q_vector->napi);
19264 +
19265 +       return IRQ_HANDLED;
19266 +}
19267 +
19268 +#ifdef IGB_DCA
19269 +static void igb_update_dca(struct igb_q_vector *q_vector)
19270 +{
19271 +       struct igb_adapter *adapter = q_vector->adapter;
19272 +       struct e1000_hw *hw = &adapter->hw;
19273 +       int cpu = get_cpu();
19274 +
19275 +       if (q_vector->cpu == cpu)
19276 +               goto out_no_update;
19277 +
19278 +       if (q_vector->tx_ring) {
19279 +               int q = q_vector->tx_ring->reg_idx;
19280 +               u32 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(q));
19281 +               if (hw->mac.type == e1000_82575) {
19282 +                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
19283 +                       dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
19284 +               } else {
19285 +                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
19286 +                       dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
19287 +                                     E1000_DCA_TXCTRL_CPUID_SHIFT_82576;
19288 +               }
19289 +               dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
19290 +               E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(q), dca_txctrl);
19291 +       }
19292 +       if (q_vector->rx_ring) {
19293 +               int q = q_vector->rx_ring->reg_idx;
19294 +               u32 dca_rxctrl = E1000_READ_REG(hw, E1000_DCA_RXCTRL(q));
19295 +               if (hw->mac.type == e1000_82575) {
19296 +                       dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
19297 +                       dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
19298 +               } else {
19299 +                       dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
19300 +                       dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
19301 +                                     E1000_DCA_RXCTRL_CPUID_SHIFT_82576;
19302 +               }
19303 +               dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
19304 +               dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
19305 +               dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
19306 +               E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(q), dca_rxctrl);
19307 +       }
19308 +       q_vector->cpu = cpu;
19309 +out_no_update:
19310 +       put_cpu();
19311 +}
19312 +
19313 +static void igb_setup_dca(struct igb_adapter *adapter)
19314 +{
19315 +       struct e1000_hw *hw = &adapter->hw;
19316 +       int i;
19317 +
19318 +       if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
19319 +               return;
19320 +
19321 +       /* Always use CB2 mode, difference is masked in the CB driver. */
19322 +       E1000_WRITE_REG(hw, E1000_DCA_CTRL, 2);
19323 +
19324 +       for (i = 0; i < adapter->num_q_vectors; i++) {
19325 +               struct igb_q_vector *q_vector = adapter->q_vector[i];
19326 +               q_vector->cpu = -1;
19327 +               igb_update_dca(q_vector);
19328 +       }
19329 +}
19330 +
19331 +static int __igb_notify_dca(struct device *dev, void *data)
19332 +{
19333 +       struct net_device *netdev = dev_get_drvdata(dev);
19334 +       struct igb_adapter *adapter = netdev_priv(netdev);
19335 +       struct e1000_hw *hw = &adapter->hw;
19336 +       unsigned long event = *(unsigned long *)data;
19337 +
19338 +       switch (event) {
19339 +       case DCA_PROVIDER_ADD:
19340 +               /* if already enabled, don't do it again */
19341 +               if (adapter->flags & IGB_FLAG_DCA_ENABLED)
19342 +                       break;
19343 +               if (dca_add_requester(dev) == E1000_SUCCESS) {
19344 +                       adapter->flags |= IGB_FLAG_DCA_ENABLED;
19345 +                       DPRINTK(PROBE, INFO, "DCA enabled\n");
19346 +                       igb_setup_dca(adapter);
19347 +                       break;
19348 +               }
19349 +               /* Fall Through since DCA is disabled. */
19350 +       case DCA_PROVIDER_REMOVE:
19351 +               if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
19352 +                       /* without this a class_device is left
19353 +                        * hanging around in the sysfs model */
19354 +                       dca_remove_requester(dev);
19355 +                       DPRINTK(PROBE, INFO, "DCA disabled\n");
19356 +                       adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
19357 +                       E1000_WRITE_REG(hw, E1000_DCA_CTRL, 1);
19358 +               }
19359 +               break;
19360 +       }
19361 +
19362 +       return E1000_SUCCESS;
19363 +}
19364 +
19365 +static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
19366 +                          void *p)
19367 +{
19368 +       int ret_val;
19369 +
19370 +       ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
19371 +                                        __igb_notify_dca);
19372 +
19373 +       return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
19374 +}
19375 +#endif /* IGB_DCA */
19376 +
19377 +static void igb_ping_all_vfs(struct igb_adapter *adapter)
19378 +{
19379 +       struct e1000_hw *hw = &adapter->hw;
19380 +       u32 ping;
19381 +       int i;
19382 +
19383 +       for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
19384 +               ping = E1000_PF_CONTROL_MSG;
19385 +               if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
19386 +                       ping |= E1000_VT_MSGTYPE_CTS;
19387 +               e1000_write_mbx(hw, &ping, 1, i);
19388 +       }
19389 +}
19390 +
19391 +static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
19392 +{
19393 +       
19394 +       struct e1000_hw *hw = &adapter->hw;
19395 +       u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
19396 +       struct vf_data_storage *vf_data = &adapter->vf_data[vf];
19397 +
19398 +       vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
19399 +                           IGB_VF_FLAG_MULTI_PROMISC);
19400 +       vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
19401 +
19402 +#ifdef IGB_ENABLE_VF_PROMISC
19403 +       if (*msgbuf & E1000_VF_SET_PROMISC_UNICAST) {
19404 +               vmolr |= E1000_VMOLR_ROPE;
19405 +               vf_data->flags |= IGB_VF_FLAG_UNI_PROMISC;
19406 +               *msgbuf &= ~E1000_VF_SET_PROMISC_UNICAST;
19407 +       }
19408 +#endif
19409 +       if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
19410 +               vmolr |= E1000_VMOLR_MPME;
19411 +               *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
19412 +       } else {
19413 +               /*
19414 +                * if we have hashes and we are clearing a multicast promisc
19415 +                * flag we need to write the hashes to the MTA as this step
19416 +                * was previously skipped
19417 +                */
19418 +               if (vf_data->num_vf_mc_hashes > 30) {
19419 +                       vmolr |= E1000_VMOLR_MPME;
19420 +               } else if (vf_data->num_vf_mc_hashes) {
19421 +                       int j;
19422 +                       vmolr |= E1000_VMOLR_ROMPE;
19423 +                       for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
19424 +                               hw->mac.ops.mta_set(hw,
19425 +                                                   vf_data->vf_mc_hashes[j]);
19426 +               }
19427 +       }
19428 +
19429 +       E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
19430 +
19431 +       /* there are flags left unprocessed, likely not supported */
19432 +       if (*msgbuf & E1000_VT_MSGINFO_MASK)
19433 +               return -EINVAL;
19434 +
19435 +       return 0;
19436 +
19437 +}
19438 +
19439 +static int igb_set_vf_multicasts(struct igb_adapter *adapter,
19440 +                                 u32 *msgbuf, u32 vf)
19441 +{
19442 +       int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
19443 +       u16 *hash_list = (u16 *)&msgbuf[1];
19444 +       struct vf_data_storage *vf_data = &adapter->vf_data[vf];
19445 +       int i;
19446 +
19447 +       /* salt away the number of multicast addresses assigned
19448 +        * to this VF for later use to restore when the PF multi cast
19449 +        * list changes
19450 +        */
19451 +       vf_data->num_vf_mc_hashes = n;
19452 +
19453 +       /* only up to 30 hash values supported */
19454 +       if (n > 30)
19455 +               n = 30;
19456 +
19457 +       /* store the hashes for later use */
19458 +       for (i = 0; i < n; i++)
19459 +               vf_data->vf_mc_hashes[i] = hash_list[i];
19460 +
19461 +       /* Flush and reset the mta with the new values */
19462 +       igb_set_rx_mode(adapter->netdev);
19463 +
19464 +       return 0;
19465 +}
19466 +
19467 +static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
19468 +{
19469 +       struct e1000_hw *hw = &adapter->hw;
19470 +       struct vf_data_storage *vf_data;
19471 +       int i, j;
19472 +
19473 +       for (i = 0; i < adapter->vfs_allocated_count; i++) {
19474 +               u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
19475 +               vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
19476 +
19477 +               vf_data = &adapter->vf_data[i];
19478 +
19479 +               if ((vf_data->num_vf_mc_hashes > 30) ||
19480 +                   (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
19481 +                       vmolr |= E1000_VMOLR_MPME;
19482 +               } else if (vf_data->num_vf_mc_hashes) {
19483 +                       vmolr |= E1000_VMOLR_ROMPE;
19484 +                       for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
19485 +                               hw->mac.ops.mta_set(hw,
19486 +                                                   vf_data->vf_mc_hashes[j]);
19487 +               }
19488 +               E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
19489 +       }
19490 +}
19491 +
19492 +static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
19493 +{
19494 +       struct e1000_hw *hw = &adapter->hw;
19495 +       u32 pool_mask, reg, vid;
19496 +       u16 vlan_default;
19497 +       int i;
19498 +
19499 +       pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
19500 +
19501 +       /* Find the vlan filter for this id */
19502 +       for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
19503 +               reg = E1000_READ_REG(hw, E1000_VLVF(i));
19504 +
19505 +               /* remove the vf from the pool */
19506 +               reg &= ~pool_mask;
19507 +
19508 +               /* if pool is empty then remove entry from vfta */
19509 +               if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
19510 +                   (reg & E1000_VLVF_VLANID_ENABLE)) {
19511 +                       reg = 0;
19512 +                       vid = reg & E1000_VLVF_VLANID_MASK;
19513 +                       igb_vfta_set(hw, vid, FALSE);
19514 +               }
19515 +
19516 +               E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
19517 +       }
19518 +
19519 +       adapter->vf_data[vf].vlans_enabled = 0;
19520 +
19521 +       vlan_default = adapter->vf_data[vf].default_vf_vlan_id;
19522 +       if (vlan_default)
19523 +               igb_vlvf_set(adapter, vlan_default, true, vf);
19524 +}
19525 +
19526 +s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
19527 +{
19528 +       struct e1000_hw *hw = &adapter->hw;
19529 +       u32 reg, i;
19530 +
19531 +       /* The vlvf table only exists on 82576 hardware and newer */
19532 +       if (hw->mac.type < e1000_82576)
19533 +               return -1;
19534 +
19535 +       /* we only need to do this if VMDq is enabled */
19536 +       if (!adapter->VMDQ_queues)
19537 +               return -1;
19538 +
19539 +       /* Find the vlan filter for this id */
19540 +       for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
19541 +               reg = E1000_READ_REG(hw, E1000_VLVF(i));
19542 +               if ((reg & E1000_VLVF_VLANID_ENABLE) &&
19543 +                   vid == (reg & E1000_VLVF_VLANID_MASK))
19544 +                       break;
19545 +       }
19546 +
19547 +       if (add) {
19548 +               if (i == E1000_VLVF_ARRAY_SIZE) {
19549 +                       /* Did not find a matching VLAN ID entry that was
19550 +                        * enabled.  Search for a free filter entry, i.e.
19551 +                        * one without the enable bit set
19552 +                        */
19553 +                       for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
19554 +                               reg = E1000_READ_REG(hw, E1000_VLVF(i));
19555 +                               if (!(reg & E1000_VLVF_VLANID_ENABLE))
19556 +                                       break;
19557 +                       }
19558 +               }
19559 +               if (i < E1000_VLVF_ARRAY_SIZE) {
19560 +                       /* Found an enabled/available entry */
19561 +                       reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
19562 +
19563 +                       /* if !enabled we need to set this up in vfta */
19564 +                       if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
19565 +                               /* add VID to filter table */
19566 +                               igb_vfta_set(hw, vid, TRUE);
19567 +                               reg |= E1000_VLVF_VLANID_ENABLE;
19568 +                       }
19569 +                       reg &= ~E1000_VLVF_VLANID_MASK;
19570 +                       reg |= vid;
19571 +                       E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
19572 +
19573 +                       printk(KERN_INFO "VLAN Enabled for vf %d\n", vf);
19574 +                       /* do not modify RLPML for PF devices */
19575 +                       if (vf >= adapter->vfs_allocated_count)
19576 +                               return E1000_SUCCESS;
19577 +
19578 +                       if (!adapter->vf_data[vf].vlans_enabled) {
19579 +                               u32 size;
19580 +                               reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
19581 +                               size = reg & E1000_VMOLR_RLPML_MASK;
19582 +                               size += 4;
19583 +                               reg &= ~E1000_VMOLR_RLPML_MASK;
19584 +                               reg |= size;
19585 +                               E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
19586 +                       }
19587 +
19588 +                       adapter->vf_data[vf].vlans_enabled++;
19589 +                       return E1000_SUCCESS;
19590 +               }
19591 +       } else {
19592 +               if (i < E1000_VLVF_ARRAY_SIZE) {
19593 +                       /* remove vf from the pool */
19594 +                       reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
19595 +                       /* if pool is empty then remove entry from vfta */
19596 +                       if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
19597 +                               reg = 0;
19598 +                               igb_vfta_set(hw, vid, FALSE);
19599 +                       }
19600 +                       E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
19601 +
19602 +                       /* do not modify RLPML for PF devices */
19603 +                       if (vf >= adapter->vfs_allocated_count)
19604 +                               return E1000_SUCCESS;
19605 +
19606 +                       adapter->vf_data[vf].vlans_enabled--;
19607 +                       if (!adapter->vf_data[vf].vlans_enabled) {
19608 +                               u32 size;
19609 +                               reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
19610 +                               size = reg & E1000_VMOLR_RLPML_MASK;
19611 +                               size -= 4;
19612 +                               reg &= ~E1000_VMOLR_RLPML_MASK;
19613 +                               reg |= size;
19614 +                               E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
19615 +                       }
19616 +                       return E1000_SUCCESS;
19617 +               }
19618 +       }
19619 +       return -1;
19620 +}
19621 +
19622 +static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
19623 +{
19624 +       int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
19625 +       int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
19626 +
19627 +       return igb_vlvf_set(adapter, vid, add, vf);
19628 +}
19629 +
19630 +static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
19631 +{
19632 +       /* clear all flags */
19633 +       adapter->vf_data[vf].flags = 0;
19634 +       adapter->vf_data[vf].last_nack = jiffies;
19635 +
19636 +       /* reset offloads to defaults */
19637 +       igb_set_vmolr(adapter, vf);
19638 +
19639 +       /* reset vlans for device */
19640 +       igb_clear_vf_vfta(adapter, vf);
19641 +
19642 +       /* reset multicast table array for vf */
19643 +       adapter->vf_data[vf].num_vf_mc_hashes = 0;
19644 +
19645 +       /* Flush and reset the mta with the new values */
19646 +       igb_set_rx_mode(adapter->netdev);
19647 +}
19648 +
19649 +static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
19650 +{
19651 +       struct e1000_hw *hw = &adapter->hw;
19652 +       unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
19653 +       int rar_entry = hw->mac.rar_entry_count - (vf + 1);
19654 +       u32 reg, msgbuf[3];
19655 +       u8 *addr = (u8 *)(&msgbuf[1]);
19656 +
19657 +       /* process all the same items cleared in a function level reset */
19658 +       igb_vf_reset_event(adapter, vf);
19659 +
19660 +       /* set vf mac address */
19661 +       igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
19662 +
19663 +       /* enable transmit and receive for vf */
19664 +       reg = E1000_READ_REG(hw, E1000_VFTE);
19665 +       E1000_WRITE_REG(hw, E1000_VFTE, reg | (1 << vf));
19666 +       reg = E1000_READ_REG(hw, E1000_VFRE);
19667 +       E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf));
19668 +
19669 +       adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
19670 +
19671 +       /* reply to reset with ack and vf mac address */
19672 +       msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
19673 +       memcpy(addr, vf_mac, 6);
19674 +       e1000_write_mbx(hw, msgbuf, 3, vf);
19675 +}
19676 +
19677 +static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
19678 +{
19679 +       unsigned char *addr = (char *)&msg[1];
19680 +       int err = -1;
19681 +
19682 +       if (is_valid_ether_addr(addr))
19683 +               err = igb_set_vf_mac(adapter, vf, addr);
19684 +
19685 +       return err;
19686 +}
19687 +
19688 +static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
19689 +{
19690 +       struct e1000_hw *hw = &adapter->hw;
19691 +       struct vf_data_storage *vf_data = &adapter->vf_data[vf];
19692 +       u32 msg = E1000_VT_MSGTYPE_NACK;
19693 +
19694 +       /* if device isn't clear to send it shouldn't be reading either */
19695 +       if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
19696 +           time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
19697 +               e1000_write_mbx(hw, &msg, 1, vf);
19698 +               vf_data->last_nack = jiffies;
19699 +       }
19700 +}
19701 +
19702 +static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
19703 +{
19704 +       u32 msgbuf[E1000_VFMAILBOX_SIZE];
19705 +       struct e1000_hw *hw = &adapter->hw;
19706 +       struct vf_data_storage *vf_data = &adapter->vf_data[vf];
19707 +       s32 retval;
19708 +
19709 +       retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
19710 +
19711 +       if (retval)
19712 +               printk(KERN_ERR "Error receiving message from VF\n");
19713 +
19714 +       /* this is a message we already processed, do nothing */
19715 +       if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
19716 +               return;
19717 +
19718 +       /*
19719 +        * until the vf completes a virtual function reset it should not be
19720 +        * allowed to start any configuration.
19721 +        */
19722 +
19723 +       if (msgbuf[0] == E1000_VF_RESET) {
19724 +               igb_vf_reset_msg(adapter, vf);
19725 +               return;
19726 +       }
19727 +
19728 +       if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
19729 +               msgbuf[0] = E1000_VT_MSGTYPE_NACK;
19730 +               if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
19731 +                       e1000_write_mbx(hw, msgbuf, 1, vf);
19732 +                       vf_data->last_nack = jiffies;
19733 +               }
19734 +               return;
19735 +       }
19736 +
19737 +       switch ((msgbuf[0] & 0xFFFF)) {
19738 +       case E1000_VF_SET_MAC_ADDR:
19739 +#ifndef IGB_DISABLE_VF_MAC_SET
19740 +               retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
19741 +#else
19742 +               retval = -EINVAL;
19743 +#endif
19744 +               break;
19745 +       case E1000_VF_SET_PROMISC:
19746 +               retval = igb_set_vf_promisc(adapter, msgbuf, vf);
19747 +               break;
19748 +       case E1000_VF_SET_MULTICAST:
19749 +               retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
19750 +               break;
19751 +       case E1000_VF_SET_LPE:
19752 +               retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
19753 +               break;
19754 +       case E1000_VF_SET_VLAN:
19755 +               retval = igb_set_vf_vlan(adapter, msgbuf, vf);
19756 +               break;
19757 +       default:
19758 +               printk(KERN_ERR "Unhandled Msg %8.8x\n", msgbuf[0]);
19759 +               retval = -E1000_ERR_MBX;
19760 +               break;
19761 +       }
19762 +
19763 +       /* notify the VF of the results of what it sent us */
19764 +       if (retval)
19765 +               msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
19766 +       else
19767 +               msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
19768 +
19769 +       msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
19770 +
19771 +       e1000_write_mbx(hw, msgbuf, 1, vf);
19772 +}
19773 +
19774 +static void igb_msg_task(struct igb_adapter *adapter)
19775 +{
19776 +       struct e1000_hw *hw = &adapter->hw;
19777 +       u32 vf;
19778 +
19779 +       for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
19780 +               /* process any reset requests */
19781 +               if (!e1000_check_for_rst(hw, vf))
19782 +                       igb_vf_reset_event(adapter, vf);
19783 +
19784 +               /* process any messages pending */
19785 +               if (!e1000_check_for_msg(hw, vf))
19786 +                       igb_rcv_msg_from_vf(adapter, vf);
19787 +
19788 +               /* process any acks */
19789 +               if (!e1000_check_for_ack(hw, vf))
19790 +                       igb_rcv_ack_from_vf(adapter, vf);
19791 +       }
19792 +}
19793 +
19794 +/**
19795 + *  igb_set_uta - Set unicast filter table address
19796 + *  @adapter: board private structure
19797 + *
19798 + *  The unicast table address is a register array of 32-bit registers.
19799 + *  The table is meant to be used in a way similar to how the MTA is used
19800 + *  however due to certain limitations in the hardware it is necessary to
19801 + *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
19802 + *  enable bit to allow vlan tag stripping when promiscous mode is enabled
19803 + **/
19804 +static void igb_set_uta(struct igb_adapter *adapter)
19805 +{
19806 +       struct e1000_hw *hw = &adapter->hw;
19807 +       int i;
19808 +
19809 +       /* The UTA table only exists on 82576 hardware and newer */
19810 +       if (hw->mac.type < e1000_82576)
19811 +               return;
19812 +
19813 +       /* we only need to do this if VMDq is enabled */
19814 +       if (!adapter->VMDQ_queues)
19815 +               return;
19816 +
19817 +       for (i = 0; i < hw->mac.uta_reg_count; i++)
19818 +               E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, ~0);
19819 +}
19820 +
19821 +/**
19822 + * igb_intr_msi - Interrupt Handler
19823 + * @irq: interrupt number
19824 + * @data: pointer to a network interface device structure
19825 + **/
19826 +static irqreturn_t igb_intr_msi(int irq, void *data)
19827 +{
19828 +       struct igb_adapter *adapter = data;
19829 +       struct igb_q_vector *q_vector = adapter->q_vector[0];
19830 +       struct e1000_hw *hw = &adapter->hw;
19831 +       /* read ICR disables interrupts using IAM */
19832 +       u32 icr = E1000_READ_REG(hw, E1000_ICR);
19833 +
19834 +       igb_write_itr(q_vector);
19835 +
19836 +       if (icr & E1000_ICR_DOUTSYNC) {
19837 +               /* HW is reporting DMA is out of sync */
19838 +               adapter->stats.doosync++;
19839 +       }
19840 +
19841 +       if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
19842 +               hw->mac.get_link_status = 1;
19843 +               if (!test_bit(__IGB_DOWN, &adapter->state))
19844 +                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
19845 +       }
19846 +
19847 +       napi_schedule(&q_vector->napi);
19848 +
19849 +       return IRQ_HANDLED;
19850 +}
19851 +
19852 +/**
19853 + * igb_intr - Legacy Interrupt Handler
19854 + * @irq: interrupt number
19855 + * @data: pointer to a network interface device structure
19856 + **/
19857 +static irqreturn_t igb_intr(int irq, void *data)
19858 +{
19859 +       struct igb_adapter *adapter = data;
19860 +       struct igb_q_vector *q_vector = adapter->q_vector[0];
19861 +       struct e1000_hw *hw = &adapter->hw;
19862 +       /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
19863 +        * need for the IMC write */
19864 +       u32 icr = E1000_READ_REG(hw, E1000_ICR);
19865 +       if (!icr)
19866 +               return IRQ_NONE;  /* Not our interrupt */
19867 +
19868 +       igb_write_itr(q_vector);
19869 +
19870 +       /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
19871 +        * not set, then the adapter didn't send an interrupt */
19872 +       if (!(icr & E1000_ICR_INT_ASSERTED))
19873 +               return IRQ_NONE;
19874 +
19875 +       if (icr & E1000_ICR_DOUTSYNC) {
19876 +               /* HW is reporting DMA is out of sync */
19877 +               adapter->stats.doosync++;
19878 +       }
19879 +
19880 +       if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
19881 +               hw->mac.get_link_status = 1;
19882 +               /* guard against interrupt when we're going down */
19883 +               if (!test_bit(__IGB_DOWN, &adapter->state))
19884 +                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
19885 +       }
19886 +
19887 +       napi_schedule(&q_vector->napi);
19888 +
19889 +       return IRQ_HANDLED;
19890 +}
19891 +
19892 +static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
19893 +{
19894 +       struct igb_adapter *adapter = q_vector->adapter;
19895 +       struct e1000_hw *hw = &adapter->hw;
19896 +
19897 +       if (adapter->itr_setting & 3) {
19898 +               if (!adapter->msix_entries)
19899 +                       igb_set_itr(adapter);
19900 +               else
19901 +                       igb_update_ring_itr(q_vector);
19902 +       }
19903 +
19904 +       if (!test_bit(__IGB_DOWN, &adapter->state)) {
19905 +               if (adapter->msix_entries)
19906 +                       E1000_WRITE_REG(hw, E1000_EIMS, q_vector->eims_value);
19907 +               else
19908 +                       igb_irq_enable(adapter);
19909 +       }
19910 +}
19911 +
19912 +/**
19913 + * igb_poll - NAPI Rx polling callback
19914 + * @napi: napi polling structure
19915 + * @budget: count of how many packets we should handle
19916 + **/
19917 +static int igb_poll(struct napi_struct *napi, int budget)
19918 +{
19919 +       struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi);
19920 +       int tx_clean_complete = 1, work_done = 0;
19921 +
19922 +#ifdef IGB_DCA
19923 +       if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
19924 +               igb_update_dca(q_vector);
19925 +#endif
19926 +       if (q_vector->tx_ring)
19927 +               tx_clean_complete = igb_clean_tx_irq(q_vector);
19928 +
19929 +       if (q_vector->rx_ring)
19930 +               igb_clean_rx_irq_adv(q_vector, &work_done, budget);
19931 +
19932 +       if (!tx_clean_complete)
19933 +               work_done = budget;
19934 +
19935 +#ifndef HAVE_NETDEV_NAPI_LIST
19936 +       /* if netdev is disabled we need to stop polling */
19937 +       if (!netif_running(q_vector->adapter->netdev))
19938 +               work_done = 0;
19939 +
19940 +#endif
19941 +       /* If not enough Rx work done, exit the polling mode */
19942 +       if (work_done < budget) {
19943 +               napi_complete(napi);
19944 +               igb_ring_irq_enable(q_vector);
19945 +       }
19946 +
19947 +       return work_done;
19948 +}
19949 +
19950 +#ifdef SIOCSHWTSTAMP
19951 +/**
19952 + * igb_systim_to_hwtstamp - convert system time value to hw timestamp
19953 + * @adapter: board private structure
19954 + * @shhwtstamps: timestamp structure to update
19955 + * @regval: unsigned 64bit system time value.
19956 + *
19957 + * We need to convert the system time value stored in the RX/TXSTMP registers
19958 + * into a hwtstamp which can be used by the upper level timestamping functions
19959 + */
19960 +static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
19961 +                                   struct skb_shared_hwtstamps *shhwtstamps,
19962 +                                   u64 regval)
19963 +{
19964 +       u64 ns;
19965 +
19966 +       ns = timecounter_cyc2time(&adapter->clock, regval);
19967 +       timecompare_update(&adapter->compare, ns);
19968 +       memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
19969 +       shhwtstamps->hwtstamp = ns_to_ktime(ns);
19970 +       shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
19971 +}
19972 +
19973 +/**
19974 + * igb_tx_hwtstamp - utility function which checks for TX time stamp
19975 + * @adapter: board private structure
19976 + * @skb: packet that was just sent
19977 + *
19978 + * If we were asked to do hardware stamping and such a time stamp is
19979 + * available, then it must have been for this skb here because we only
19980 + * allow only one such packet into the queue.
19981 + */
19982 +static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
19983 +{
19984 +       union skb_shared_tx *shtx = skb_tx(skb);
19985 +       struct e1000_hw *hw = &adapter->hw;
19986 +       struct skb_shared_hwtstamps shhwtstamps;
19987 +       u64 regval;
19988 +
19989 +       /* if skb does not support hw timestamp or TX stamp not valid exit */
19990 +       if (likely(!shtx->hardware) ||
19991 +           !(E1000_READ_REG(hw, E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
19992 +               return;
19993 +
19994 +       regval = E1000_READ_REG(hw, E1000_TXSTMPL);
19995 +       regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32;
19996 +
19997 +       igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
19998 +       skb_tstamp_tx(skb, &shhwtstamps);
19999 +}
20000 +
20001 +#endif
20002 +/**
20003 + * igb_clean_tx_irq - Reclaim resources after transmit completes
20004 + * @q_vector: pointer to q_vector containing needed info
20005 + * returns TRUE if ring is completely cleaned
20006 + **/
20007 +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
20008 +{
20009 +       struct igb_adapter *adapter = q_vector->adapter;
20010 +       struct igb_ring *tx_ring = q_vector->tx_ring;
20011 +       struct net_device *netdev = adapter->netdev;
20012 +       struct e1000_hw *hw = &adapter->hw;
20013 +       struct igb_buffer *buffer_info;
20014 +       struct sk_buff *skb;
20015 +       union e1000_adv_tx_desc *tx_desc, *eop_desc;
20016 +       unsigned int total_bytes = 0, total_packets = 0;
20017 +       unsigned int i, eop, count = 0;
20018 +       bool cleaned = false;
20019 +
20020 +       i = tx_ring->next_to_clean;
20021 +       eop = tx_ring->buffer_info[i].next_to_watch;
20022 +       eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
20023 +
20024 +       while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
20025 +              (count < tx_ring->count)) {
20026 +               for (cleaned = false; !cleaned; count++) {
20027 +                       tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
20028 +                       buffer_info = &tx_ring->buffer_info[i];
20029 +                       cleaned = (i == eop);
20030 +                       skb = buffer_info->skb;
20031 +
20032 +                       if (skb) {
20033 +#ifdef NETIF_F_TSO
20034 +                               unsigned int segs, bytecount;
20035 +                               /* gso_segs is currently only valid for tcp */
20036 +                               segs = skb_shinfo(skb)->gso_segs ?: 1;
20037 +                               /* multiply data chunks by size of headers */
20038 +                               bytecount = ((segs - 1) * skb_headlen(skb)) +
20039 +                                           skb->len;
20040 +                               total_packets += segs;
20041 +                               total_bytes += bytecount;
20042 +#else
20043 +                               total_packets++;
20044 +                               total_bytes += skb->len;
20045 +#endif
20046 +#ifdef SIOCSHWTSTAMP
20047 +                               igb_tx_hwtstamp(adapter, skb);
20048 +#endif
20049 +                       }
20050 +
20051 +                       igb_unmap_and_free_tx_resource(tx_ring->pdev,
20052 +                                                      buffer_info);
20053 +                       tx_desc->wb.status = 0;
20054 +
20055 +                       i++;
20056 +                       if (i == tx_ring->count)
20057 +                               i = 0;
20058 +               }
20059 +               eop = tx_ring->buffer_info[i].next_to_watch;
20060 +               eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
20061 +       }
20062 +
20063 +       tx_ring->next_to_clean = i;
20064 +
20065 +       if (unlikely(count &&
20066 +                    netif_carrier_ok(netdev) &&
20067 +                    IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
20068 +               /* Make sure that anybody stopping the queue after this
20069 +                * sees the new next_to_clean.
20070 +                */
20071 +               smp_mb();
20072 +               if (netif_is_multiqueue(netdev)) {
20073 +                       if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
20074 +                           !(test_bit(__IGB_DOWN, &adapter->state))) {
20075 +                               netif_wake_subqueue(netdev, tx_ring->queue_index);
20076 +                               ++tx_ring->restart_queue;
20077 +                       }
20078 +               } else {
20079 +                       if (netif_queue_stopped(netdev) &&
20080 +                           !(test_bit(__IGB_DOWN, &adapter->state))) {
20081 +                               netif_wake_queue(netdev);
20082 +                               ++tx_ring->restart_queue;
20083 +                       }
20084 +               }
20085 +       }
20086 +
20087 +       if (tx_ring->detect_tx_hung) {
20088 +               /* Detect a transmit hang in hardware, this serializes the
20089 +                * check with the clearing of time_stamp and movement of i */
20090 +               tx_ring->detect_tx_hung = FALSE;
20091 +               if (tx_ring->buffer_info[i].time_stamp &&
20092 +                   time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
20093 +                              (adapter->tx_timeout_factor * HZ))
20094 +                   && !(E1000_READ_REG(hw, E1000_STATUS) &
20095 +                        E1000_STATUS_TXOFF)) {
20096 +
20097 +                       /* detected Tx unit hang */
20098 +                       DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
20099 +                                       "  Tx Queue             <%d>\n"
20100 +                                       "  TDH                  <%x>\n"
20101 +                                       "  TDT                  <%x>\n"
20102 +                                       "  next_to_use          <%x>\n"
20103 +                                       "  next_to_clean        <%x>\n"
20104 +                                       "buffer_info[next_to_clean]\n"
20105 +                                       "  time_stamp           <%lx>\n"
20106 +                                       "  next_to_watch        <%x>\n"
20107 +                                       "  jiffies              <%lx>\n"
20108 +                                       "  desc.status          <%x>\n",
20109 +                               tx_ring->queue_index,
20110 +                               readl(tx_ring->head),
20111 +                               readl(tx_ring->tail),
20112 +                               tx_ring->next_to_use,
20113 +                               tx_ring->next_to_clean,
20114 +                               tx_ring->buffer_info[eop].time_stamp,
20115 +                               eop,
20116 +                               jiffies,
20117 +                               eop_desc->wb.status);
20118 +                       if (netif_is_multiqueue(netdev))
20119 +                               netif_stop_subqueue(netdev,
20120 +                                                   tx_ring->queue_index);
20121 +                       else
20122 +                               netif_stop_queue(netdev);
20123 +               }
20124 +       }
20125 +       tx_ring->total_bytes += total_bytes;
20126 +       tx_ring->total_packets += total_packets;
20127 +       tx_ring->stats.bytes += total_bytes;
20128 +       tx_ring->stats.packets += total_packets;
20129 +       adapter->net_stats.tx_bytes += total_bytes;
20130 +       adapter->net_stats.tx_packets += total_packets;
20131 +       return (count < tx_ring->count);
20132 +}
20133 +
20134 +#ifdef IGB_LRO
20135 + /**
20136 + * igb_get_skb_hdr - helper function for LRO header processing
20137 + * @skb: pointer to sk_buff to be added to LRO packet
20138 + * @iphdr: pointer to ip header structure
20139 + * @tcph: pointer to tcp header structure
20140 + * @hdr_flags: pointer to header flags
20141 + * @priv: pointer to the receive descriptor for the current sk_buff
20142 + **/
20143 +static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
20144 +                           u64 *hdr_flags, void *priv)
20145 +{
20146 +       union e1000_adv_rx_desc *rx_desc = priv;
20147 +       u16 pkt_type = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info &
20148 +                      (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
20149 +
20150 +       /* Verify that this is a valid IPv4 TCP packet */
20151 +       if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
20152 +                         E1000_RXDADV_PKTTYPE_TCP))
20153 +               return -1;
20154 +
20155 +       /* Set network headers */
20156 +       skb_reset_network_header(skb);
20157 +       skb_set_transport_header(skb, ip_hdrlen(skb));
20158 +       *iphdr = ip_hdr(skb);
20159 +       *tcph = tcp_hdr(skb);
20160 +       *hdr_flags = LRO_IPV4 | LRO_TCP;
20161 +
20162 +       return 0;
20163 +
20164 +}
20165 +
20166 +#endif /* IGB_LRO */
20167 +/**
20168 + * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
20169 + * @adapter: address of board private structure
20170 + **/
20171 +int igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
20172 +{
20173 +       struct net_device *netdev = pci_get_drvdata(rx_ring->pdev);
20174 +       union e1000_adv_rx_desc *rx_desc;
20175 +       struct igb_buffer *buffer_info;
20176 +       struct sk_buff *skb;
20177 +       unsigned int i;
20178 +       int bufsz, err = 0;
20179 +
20180 +       i = rx_ring->next_to_use;
20181 +       buffer_info = &rx_ring->buffer_info[i];
20182 +
20183 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
20184 +       if (rx_ring->rx_ps_hdr_size)
20185 +               bufsz = rx_ring->rx_ps_hdr_size;
20186 +       else
20187 +               bufsz = rx_ring->rx_buffer_len;
20188 +#else
20189 +       bufsz = rx_ring->rx_buffer_len;
20190 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
20191 +
20192 +       while (cleaned_count--) {
20193 +               rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
20194 +
20195 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
20196 +               if (rx_ring->rx_ps_hdr_size && !buffer_info->page_dma) {
20197 +                       if (!buffer_info->page) {
20198 +                               buffer_info->page = netdev_alloc_page(netdev);
20199 +                               if (!buffer_info->page) {
20200 +                                       err = -ENOMEM;
20201 +                                       goto no_buffers;
20202 +                               }
20203 +                               buffer_info->page_offset = 0;
20204 +                       } else {
20205 +                               buffer_info->page_offset ^= PAGE_SIZE / 2;
20206 +                       }
20207 +                       buffer_info->page_dma =
20208 +                               pci_map_page(rx_ring->pdev, buffer_info->page,
20209 +                                            buffer_info->page_offset,
20210 +                                            rx_ring->rx_buffer_len,
20211 +                                            PCI_DMA_FROMDEVICE);
20212 +               }
20213 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
20214 +
20215 +               if (!buffer_info->skb) {
20216 +                       skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
20217 +                       if (!skb) {
20218 +                               err = -ENOMEM;
20219 +                               goto no_buffers;
20220 +                       }
20221 +
20222 +                       /* Make buffer alignment 2 beyond a 16 byte boundary
20223 +                        * this will result in a 16 byte aligned IP header after
20224 +                        * the 14 byte MAC header is removed
20225 +                        */
20226 +                       skb_reserve(skb, NET_IP_ALIGN);
20227 +
20228 +                       buffer_info->skb = skb;
20229 +               }
20230 +               if (!buffer_info->dma)
20231 +                       buffer_info->dma = pci_map_single(rx_ring->pdev,
20232 +                                                         buffer_info->skb->data,
20233 +                                                         bufsz,
20234 +                                                         PCI_DMA_FROMDEVICE);
20235 +               /* Refresh the desc even if buffer_addrs didn't change because
20236 +                * each write-back erases this info. */
20237 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
20238 +               if (rx_ring->rx_ps_hdr_size) {
20239 +                       rx_desc->read.pkt_addr =
20240 +                            cpu_to_le64(buffer_info->page_dma);
20241 +                       rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
20242 +               } else {
20243 +                       rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
20244 +                       rx_desc->read.hdr_addr = 0;
20245 +               }
20246 +#else
20247 +               rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
20248 +               rx_desc->read.hdr_addr = 0;
20249 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
20250 +
20251 +               i++;
20252 +               if (i == rx_ring->count)
20253 +                       i = 0;
20254 +               buffer_info = &rx_ring->buffer_info[i];
20255 +       }
20256 +
20257 +no_buffers:
20258 +       if (rx_ring->next_to_use != i) {
20259 +               rx_ring->next_to_use = i;
20260 +               if (i == 0)
20261 +                       i = (rx_ring->count - 1);
20262 +               else
20263 +                       i--;
20264 +
20265 +               /* Force memory writes to complete before letting h/w
20266 +                * know there are new descriptors to fetch.  (Only
20267 +                * applicable for weak-ordered memory model archs,
20268 +                * such as IA-64). */
20269 +               wmb();
20270 +               writel(i, rx_ring->tail);
20271 +       }
20272 +
20273 +       return err;
20274 +}
20275 +
20276 +/**
20277 + * igb_receive_skb - helper function to handle rx indications
20278 + * @ring: pointer to receive ring receving this packet
20279 + * @status: descriptor status field as written by hardware
20280 + * @rx_desc: receive descriptor containing vlan and type information.
20281 + * @skb: pointer to sk_buff to be indicated to stack
20282 + **/
20283 +static void igb_receive_skb(struct igb_ring *ring, u8 status,
20284 +                            union e1000_adv_rx_desc *rx_desc,
20285 +                            struct sk_buff *skb)
20286 +{
20287 +       struct igb_q_vector *q_vector = ring->q_vector;
20288 +       struct igb_adapter *adapter = q_vector->adapter;
20289 +       bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
20290 +
20291 +#ifdef IGB_LRO
20292 +       if (adapter->netdev->features & NETIF_F_LRO &&
20293 +           skb->ip_summed == CHECKSUM_UNNECESSARY) {
20294 +               if (vlan_extracted)
20295 +                       lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
20296 +                                                    adapter->vlgrp,
20297 +                                                    le16_to_cpu(rx_desc->wb.upper.vlan),
20298 +                                                    rx_desc);
20299 +               else
20300 +                       lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
20301 +               ring->lro_used = TRUE;
20302 +       } else {
20303 +#endif
20304 +               if (vlan_extracted)
20305 +                       vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
20306 +                                        le16_to_cpu(rx_desc->wb.upper.vlan),
20307 +                                        skb);
20308 +               else
20309 +
20310 +                       napi_gro_receive(&q_vector->napi, skb);
20311 +#ifdef IGB_LRO
20312 +       }
20313 +#endif
20314 +}
20315 +
20316 +static inline void igb_rx_checksum_adv(struct igb_ring *ring,
20317 +                                       u32 status_err, struct sk_buff *skb)
20318 +{
20319 +       struct igb_adapter *adapter = ring->q_vector->adapter;
20320 +       skb->ip_summed = CHECKSUM_NONE;
20321 +
20322 +       /* Ignore Checksum bit is set or checksum is disabled through ethtool */
20323 +       if (!ring->rx_csum || (status_err & E1000_RXD_STAT_IXSM))
20324 +               return;
20325 +
20326 +       /* TCP/UDP checksum error bit is set */
20327 +       if (status_err &
20328 +           (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
20329 +               /*
20330 +                * work around errata with sctp packets where the TCPE aka
20331 +                * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
20332 +                * packets, (aka let the stack check the crc32c)
20333 +                */
20334 +               if (!((adapter->hw.mac.type >= e1000_82576) &&
20335 +                       (skb->len == 60)))
20336 +                       ring->hw_csum_err++;
20337 +
20338 +               /* let the stack verify checksum errors */
20339 +               return;
20340 +       }
20341 +       /* It must be a TCP or UDP packet with a valid checksum */
20342 +       if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
20343 +               skb->ip_summed = CHECKSUM_UNNECESSARY;
20344 +
20345 +       ring->hw_csum_good++;
20346 +}
20347 +
20348 +#ifdef SIOCSHWTSTAMP
20349 +static inline void igb_rx_hwtstamp(struct igb_adapter *adapter, u32 staterr,
20350 +                                   struct sk_buff *skb)
20351 +{
20352 +       struct e1000_hw *hw = &adapter->hw;
20353 +       u64 regval;
20354 +
20355 +       /*
20356 +        * If this bit is set, then the RX registers contain the time stamp. No
20357 +        * other packet will be time stamped until we read these registers, so
20358 +        * read the registers to make them available again. Because only one
20359 +        * packet can be time stamped at a time, we know that the register
20360 +        * values must belong to this one here and therefore we don't need to
20361 +        * compare any of the additional attributes stored for it.
20362 +        *
20363 +        * If nothing went wrong, then it should have a skb_shared_tx that we
20364 +        * can turn into a skb_shared_hwtstamps.
20365 +        */
20366 +       if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
20367 +               return;
20368 +       if(!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
20369 +               return;
20370 +
20371 +       regval = E1000_READ_REG(hw, E1000_RXSTMPL);
20372 +       regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32;
20373 +
20374 +       igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
20375 +}
20376 +#endif
20377 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
20378 +static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
20379 +                               union e1000_adv_rx_desc *rx_desc)
20380 +{
20381 +       /* HW will not DMA in data larger than the given buffer, even if it
20382 +        * parses the (NFS, of course) header to be larger.  In that case, it
20383 +        * fills the header buffer and spills the rest into the page.
20384 +        */
20385 +       u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
20386 +                  E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
20387 +       if (hlen > rx_ring->rx_ps_hdr_size)
20388 +               hlen = rx_ring->rx_ps_hdr_size;
20389 +       return hlen;
20390 +}
20391 +
20392 +#endif
20393 +static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
20394 +                                 int *work_done, int budget)
20395 +{
20396 +       struct igb_adapter *adapter = q_vector->adapter;
20397 +       struct net_device *netdev = adapter->netdev;
20398 +       struct igb_ring *rx_ring = q_vector->rx_ring;
20399 +       struct pci_dev *pdev = rx_ring->pdev;
20400 +       union e1000_adv_rx_desc *rx_desc , *next_rxd;
20401 +       struct igb_buffer *buffer_info , *next_buffer;
20402 +       struct sk_buff *skb;
20403 +       bool cleaned = FALSE;
20404 +       int cleaned_count = 0;
20405 +       unsigned int total_bytes = 0, total_packets = 0;
20406 +       unsigned int i;
20407 +       u32 staterr;
20408 +       u16 length;
20409 +
20410 +       i = rx_ring->next_to_clean;
20411 +       buffer_info = &rx_ring->buffer_info[i];
20412 +       rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
20413 +       staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
20414 +
20415 +       while (staterr & E1000_RXD_STAT_DD) {
20416 +               if (*work_done >= budget)
20417 +                       break;
20418 +               (*work_done)++;
20419 +
20420 +               skb = buffer_info->skb;
20421 +               prefetch(skb->data - NET_IP_ALIGN);
20422 +               buffer_info->skb = NULL;
20423 +
20424 +               i++;
20425 +               if (i == rx_ring->count)
20426 +                       i = 0;
20427 +
20428 +               next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
20429 +               prefetch(next_rxd);
20430 +               next_buffer = &rx_ring->buffer_info[i];
20431 +
20432 +               length = le16_to_cpu(rx_desc->wb.upper.length);
20433 +               cleaned = TRUE;
20434 +               cleaned_count++;
20435 +
20436 +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
20437 +               pci_unmap_single(pdev, buffer_info->dma,
20438 +                                rx_ring->rx_buffer_len,
20439 +                                PCI_DMA_FROMDEVICE);
20440 +               buffer_info->dma = 0;
20441 +               skb_put(skb, length);
20442 +
20443 +#else
20444 +               if (!rx_ring->rx_ps_hdr_size) {
20445 +                       pci_unmap_single(pdev, buffer_info->dma,
20446 +                                        rx_ring->rx_buffer_len,
20447 +                                        PCI_DMA_FROMDEVICE);
20448 +                       buffer_info->dma = 0;
20449 +                       skb_put(skb, length);
20450 +                       goto send_up;
20451 +               }
20452 +
20453 +               if (buffer_info->dma) {
20454 +                       u16 hlen = igb_get_hlen(rx_ring, rx_desc);
20455 +                       pci_unmap_single(pdev, buffer_info->dma,
20456 +                                        rx_ring->rx_ps_hdr_size,
20457 +                                        PCI_DMA_FROMDEVICE);
20458 +                       buffer_info->dma = 0;
20459 +                       skb_put(skb, hlen);
20460 +               }
20461 +
20462 +               if (length) {
20463 +                       pci_unmap_page(pdev, buffer_info->page_dma,
20464 +                                      rx_ring->rx_buffer_len,
20465 +                                      PCI_DMA_FROMDEVICE);
20466 +                       buffer_info->page_dma = 0;
20467 +
20468 +                       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
20469 +                                          buffer_info->page,
20470 +                                          buffer_info->page_offset,
20471 +                                          length);
20472 +
20473 +                       if (page_count(buffer_info->page) != 1)
20474 +                               buffer_info->page = NULL;
20475 +                       else
20476 +                               get_page(buffer_info->page);
20477 +
20478 +                       skb->len += length;
20479 +                       skb->data_len += length;
20480 +                       skb->truesize += length;
20481 +               }
20482 +
20483 +               if (!(staterr & E1000_RXD_STAT_EOP)) {
20484 +                       buffer_info->skb = next_buffer->skb;
20485 +                       buffer_info->dma = next_buffer->dma;
20486 +                       next_buffer->skb = skb;
20487 +                       next_buffer->dma = 0;
20488 +                       goto next_desc;
20489 +               }
20490 +send_up:
20491 +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
20492 +               if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
20493 +                       dev_kfree_skb_irq(skb);
20494 +                       goto next_desc;
20495 +               }
20496 +
20497 +#ifdef SIOCSHWTSTAMP
20498 +               igb_rx_hwtstamp(adapter, staterr, skb);
20499 +#endif
20500 +               total_bytes += skb->len;
20501 +               total_packets++;
20502 +
20503 +               igb_rx_checksum_adv(rx_ring, staterr, skb);
20504 +
20505 +#ifndef ETH_TYPE_TRANS_SETS_DEV
20506 +               skb->dev = netdev;
20507 +#endif
20508 +               skb->protocol = eth_type_trans(skb, netdev);
20509 +
20510 +               igb_receive_skb(rx_ring, staterr, rx_desc, skb);
20511 +
20512 +               netdev->last_rx = jiffies;
20513 +
20514 +next_desc:
20515 +               rx_desc->wb.upper.status_error = 0;
20516 +
20517 +               /* return some buffers to hardware, one at a time is too slow */
20518 +               if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
20519 +                       if (igb_alloc_rx_buffers_adv(rx_ring, cleaned_count))
20520 +                               adapter->alloc_rx_buff_failed++;
20521 +                       cleaned_count = 0;
20522 +               }
20523 +
20524 +               /* use prefetched values */
20525 +               rx_desc = next_rxd;
20526 +               buffer_info = next_buffer;
20527 +               staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
20528 +       }
20529 +
20530 +       rx_ring->next_to_clean = i;
20531 +       cleaned_count = IGB_DESC_UNUSED(rx_ring);
20532 +
20533 +#ifdef IGB_LRO
20534 +       if (rx_ring->lro_used) {
20535 +               lro_flush_all(&rx_ring->lro_mgr);
20536 +               rx_ring->lro_used = FALSE;
20537 +       }
20538 +#endif
20539 +
20540 +       if (cleaned_count)
20541 +               if (igb_alloc_rx_buffers_adv(rx_ring, cleaned_count))
20542 +                               adapter->alloc_rx_buff_failed++;
20543 +
20544 +       rx_ring->total_packets += total_packets;
20545 +       rx_ring->total_bytes += total_bytes;
20546 +       rx_ring->stats.packets += total_packets;
20547 +       rx_ring->stats.bytes += total_bytes;
20548 +       adapter->net_stats.rx_bytes += total_bytes;
20549 +       adapter->net_stats.rx_packets += total_packets;
20550 +       return cleaned;
20551 +}
20552 +
20553 +#ifdef SIOCGMIIPHY
20554 +/**
20555 + * igb_mii_ioctl -
20556 + * @netdev:
20557 + * @ifreq:
20558 + * @cmd:
20559 + **/
20560 +static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
20561 +{
20562 +       struct igb_adapter *adapter = netdev_priv(netdev);
20563 +       struct mii_ioctl_data *data = if_mii(ifr);
20564 +
20565 +       if (adapter->hw.phy.media_type != e1000_media_type_copper)
20566 +               return -EOPNOTSUPP;
20567 +
20568 +       switch (cmd) {
20569 +       case SIOCGMIIPHY:
20570 +               data->phy_id = adapter->hw.phy.addr;
20571 +               break;
20572 +       case SIOCGMIIREG:
20573 +               if (!capable(CAP_NET_ADMIN))
20574 +                       return -EPERM;
20575 +               if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
20576 +                                  &data->val_out))
20577 +                       return -EIO;
20578 +               break;
20579 +       case SIOCSMIIREG:
20580 +       default:
20581 +               return -EOPNOTSUPP;
20582 +       }
20583 +       return E1000_SUCCESS;
20584 +}
20585 +
20586 +#endif
20587 +#ifdef SIOCSHWTSTAMP
20588 +/**
20589 + * igb_hwtstamp_ioctl - control hardware time stamping
20590 + * @netdev:
20591 + * @ifreq:
20592 + * @cmd:
20593 + *
20594 + * Outgoing time stamping can be enabled and disabled. Play nice and
20595 + * disable it when requested, although it shouldn't case any overhead
20596 + * when no packet needs it. At most one packet in the queue may be
20597 + * marked for time stamping, otherwise it would be impossible to tell
20598 + * for sure to which packet the hardware time stamp belongs.
20599 + *
20600 + * Incoming time stamping has to be configured via the hardware
20601 + * filters. Not all combinations are supported, in particular event
20602 + * type has to be specified. Matching the kind of event packet is
20603 + * not supported, with the exception of "all V2 events regardless of
20604 + * level 2 or 4".
20605 + *
20606 + **/
20607 +static int igb_hwtstamp_ioctl(struct net_device *netdev,
20608 +                             struct ifreq *ifr, int cmd)
20609 +{
20610 +       struct igb_adapter *adapter = netdev_priv(netdev);
20611 +       struct e1000_hw *hw = &adapter->hw;
20612 +       struct hwtstamp_config config;
20613 +       u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
20614 +       u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
20615 +       u32 tsync_rx_cfg = 0;
20616 +       bool is_l4 = false;
20617 +       bool is_l2 = false;
20618 +       u32 regval;
20619 +
20620 +       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
20621 +               return -EFAULT;
20622 +
20623 +       /* reserved for future extensions */
20624 +       if (config.flags)
20625 +               return -EINVAL;
20626 +
20627 +       switch (config.tx_type) {
20628 +       case HWTSTAMP_TX_OFF:
20629 +               tsync_tx_ctl = 0;
20630 +       case HWTSTAMP_TX_ON:
20631 +               break;
20632 +       default:
20633 +               return -ERANGE;
20634 +       }
20635 +
20636 +       switch (config.rx_filter) {
20637 +       case HWTSTAMP_FILTER_NONE:
20638 +               tsync_rx_ctl = 0;
20639 +               break;
20640 +       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
20641 +       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
20642 +       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
20643 +       case HWTSTAMP_FILTER_ALL:
20644 +               /*
20645 +                * register TSYNCRXCFG must be set, therefore it is not
20646 +                * possible to time stamp both Sync and Delay_Req messages
20647 +                * => fall back to time stamping all packets
20648 +                */
20649 +               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
20650 +               config.rx_filter = HWTSTAMP_FILTER_ALL;
20651 +               break;
20652 +       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
20653 +               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
20654 +               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
20655 +               is_l4 = true;
20656 +               break;
20657 +       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
20658 +               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
20659 +               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
20660 +               is_l4 = true;
20661 +               break;
20662 +       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
20663 +       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
20664 +               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
20665 +               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
20666 +               is_l2 = true;
20667 +               is_l4 = true;
20668 +               config.rx_filter = HWTSTAMP_FILTER_SOME;
20669 +               break;
20670 +       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
20671 +       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
20672 +               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
20673 +               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
20674 +               is_l2 = true;
20675 +               is_l4 = true;
20676 +               config.rx_filter = HWTSTAMP_FILTER_SOME;
20677 +               break;
20678 +       case HWTSTAMP_FILTER_PTP_V2_EVENT:
20679 +       case HWTSTAMP_FILTER_PTP_V2_SYNC:
20680 +       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
20681 +               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
20682 +               config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
20683 +               is_l2 = true;
20684 +               break;
20685 +       default:
20686 +               return -ERANGE;
20687 +       }
20688 +
20689 +       if (hw->mac.type == e1000_82575) {
20690 +               if (tsync_rx_ctl | tsync_tx_ctl)
20691 +                       return -EINVAL;
20692 +               return 0;
20693 +       }
20694 +
20695 +       /* enable/disable TX */
20696 +       regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
20697 +       regval &= ~E1000_TSYNCTXCTL_ENABLED;
20698 +       regval |= tsync_tx_ctl;
20699 +       E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval);
20700 +
20701 +       /* enable/disable RX */
20702 +       regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
20703 +       regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
20704 +       regval |= tsync_rx_ctl;
20705 +       E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval);
20706 +
20707 +       /* define which PTP packets are time stamped */
20708 +       E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg);
20709 +
20710 +       /* define ethertype filter for timestamped packets */
20711 +       if (is_l2)
20712 +               E1000_WRITE_REG(hw, E1000_ETQF(3),
20713 +                               (E1000_ETQF_FILTER_ENABLE | /* enable filter */
20714 +                                E1000_ETQF_1588 | /* enable timestamping */
20715 +                                ETH_P_1588));     /* 1588 eth protocol type */
20716 +       else
20717 +               E1000_WRITE_REG(hw, E1000_ETQF(3), 0);
20718 +
20719 +#define PTP_PORT 319
20720 +       /* L4 Queue Filter[3]: filter by destination port and protocol */
20721 +       if (is_l4) {
20722 +               u32 ftqf = (IPPROTO_UDP /* UDP */
20723 +                       | E1000_FTQF_VF_BP /* VF not compared */
20724 +                       | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
20725 +                       | E1000_FTQF_MASK); /* mask all inputs */
20726 +               ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
20727 +
20728 +               E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_PORT));
20729 +               E1000_WRITE_REG(hw, E1000_IMIREXT(3),
20730 +                               (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
20731 +               if (hw->mac.type == e1000_82576) {
20732 +                       /* enable source port check */
20733 +                       E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_PORT));
20734 +                       ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
20735 +               }
20736 +               E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf);
20737 +       } else {
20738 +               E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK);
20739 +       }
20740 +       E1000_WRITE_FLUSH(hw);
20741 +
20742 +       adapter->hwtstamp_config = config;
20743 +
20744 +       /* clear TX/RX time stamp registers, just to be sure */
20745 +       regval = E1000_READ_REG(hw, E1000_TXSTMPH);
20746 +       regval = E1000_READ_REG(hw, E1000_RXSTMPH);
20747 +
20748 +       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
20749 +               -EFAULT : 0;
20750 +}
20751 +
20752 +#endif
20753 +/**
20754 + * igb_ioctl -
20755 + * @netdev:
20756 + * @ifreq:
20757 + * @cmd:
20758 + **/
20759 +static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
20760 +{
20761 +       switch (cmd) {
20762 +#ifdef SIOCGMIIPHY
20763 +       case SIOCGMIIPHY:
20764 +       case SIOCGMIIREG:
20765 +       case SIOCSMIIREG:
20766 +               return igb_mii_ioctl(netdev, ifr, cmd);
20767 +#endif
20768 +#ifdef SIOCSHWTSTAMP
20769 +       case SIOCSHWTSTAMP:
20770 +               return igb_hwtstamp_ioctl(netdev, ifr, cmd);
20771 +#endif
20772 +#ifdef ETHTOOL_OPS_COMPAT
20773 +       case SIOCETHTOOL:
20774 +               return ethtool_ioctl(ifr);
20775 +#endif
20776 +       default:
20777 +               return -EOPNOTSUPP;
20778 +       }
20779 +}
20780 +
20781 +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
20782 +{
20783 +       struct igb_adapter *adapter = hw->back;
20784 +       u16 cap_offset;
20785 +
20786 +       cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
20787 +       if (!cap_offset)
20788 +               return -E1000_ERR_CONFIG;
20789 +
20790 +       pci_read_config_word(adapter->pdev, cap_offset + reg, value);
20791 +
20792 +       return E1000_SUCCESS;
20793 +}
20794 +
20795 +s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
20796 +{
20797 +       struct igb_adapter *adapter = hw->back;
20798 +       u16 cap_offset;
20799 +
20800 +       cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
20801 +       if (!cap_offset)
20802 +               return -E1000_ERR_CONFIG;
20803 +
20804 +       pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
20805 +
20806 +       return E1000_SUCCESS;
20807 +}
20808 +
20809 +static void igb_vlan_rx_register(struct net_device *netdev,
20810 +                                 struct vlan_group *grp)
20811 +{
20812 +       struct igb_adapter *adapter = netdev_priv(netdev);
20813 +       struct e1000_hw *hw = &adapter->hw;
20814 +       u32 ctrl, rctl;
20815 +
20816 +       igb_irq_disable(adapter);
20817 +       adapter->vlgrp = grp;
20818 +
20819 +       if (grp) {
20820 +               /* enable VLAN tag insert/strip */
20821 +               ctrl = E1000_READ_REG(hw, E1000_CTRL);
20822 +               ctrl |= E1000_CTRL_VME;
20823 +               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
20824 +
20825 +               /* Disable CFI check */
20826 +               rctl = E1000_READ_REG(hw, E1000_RCTL);
20827 +               rctl &= ~E1000_RCTL_CFIEN;
20828 +               E1000_WRITE_REG(hw, E1000_RCTL, rctl);
20829 +       } else {
20830 +               /* disable VLAN tag insert/strip */
20831 +               ctrl = E1000_READ_REG(hw, E1000_CTRL);
20832 +               ctrl &= ~E1000_CTRL_VME;
20833 +               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
20834 +       }
20835 +
20836 +       igb_set_rlpml(adapter);
20837 +
20838 +       if (!test_bit(__IGB_DOWN, &adapter->state))
20839 +               igb_irq_enable(adapter);
20840 +}
20841 +
20842 +static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
20843 +{
20844 +       struct igb_adapter *adapter = netdev_priv(netdev);
20845 +       struct e1000_hw *hw = &adapter->hw;
20846 +       int pf_id = adapter->vfs_allocated_count;
20847 +#ifndef HAVE_NETDEV_VLAN_FEATURES
20848 +       struct net_device *v_netdev;
20849 +#endif
20850 +
20851 +       /* attempt to add filter to vlvf array */
20852 +       igb_vlvf_set(adapter, vid, TRUE, pf_id);
20853 +
20854 +       /* add the filter since PF can receive vlans w/o entry in vlvf */
20855 +       igb_vfta_set(hw, vid, TRUE);
20856 +#ifndef HAVE_NETDEV_VLAN_FEATURES
20857 +       /* Copy feature flags from netdev to the vlan netdev for this vid.
20858 +        * This allows things like TSO to bubble down to our vlan device.
20859 +        */
20860 +       v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
20861 +       v_netdev->features |= adapter->netdev->features;
20862 +       vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
20863 +#endif
20864 +}
20865 +
20866 +static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
20867 +{
20868 +       struct igb_adapter *adapter = netdev_priv(netdev);
20869 +       struct e1000_hw *hw = &adapter->hw;
20870 +       int pf_id = adapter->vfs_allocated_count;
20871 +       s32 err;
20872 +
20873 +       igb_irq_disable(adapter);
20874 +       vlan_group_set_device(adapter->vlgrp, vid, NULL);
20875 +
20876 +       if (!test_bit(__IGB_DOWN, &adapter->state))
20877 +               igb_irq_enable(adapter);
20878 +
20879 +       /* remove vlan from VLVF table array */
20880 +       err = igb_vlvf_set(adapter, vid, FALSE, pf_id);
20881 +
20882 +       /* if vid was not present in VLVF just remove it from table */
20883 +       if (err)
20884 +               igb_vfta_set(hw, vid, FALSE);
20885 +}
20886 +
20887 +static void igb_restore_vlan(struct igb_adapter *adapter)
20888 +{
20889 +       igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
20890 +
20891 +       if (adapter->vlgrp) {
20892 +               u16 vid;
20893 +               for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
20894 +                       if (!vlan_group_get_device(adapter->vlgrp, vid))
20895 +                               continue;
20896 +                       igb_vlan_rx_add_vid(adapter->netdev, vid);
20897 +               }
20898 +       }
20899 +}
20900 +
20901 +int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
20902 +{
20903 +       struct e1000_mac_info *mac = &adapter->hw.mac;
20904 +
20905 +       mac->autoneg = 0;
20906 +
20907 +       switch (spddplx) {
20908 +       case SPEED_10 + DUPLEX_HALF:
20909 +               mac->forced_speed_duplex = ADVERTISE_10_HALF;
20910 +               break;
20911 +       case SPEED_10 + DUPLEX_FULL:
20912 +               mac->forced_speed_duplex = ADVERTISE_10_FULL;
20913 +               break;
20914 +       case SPEED_100 + DUPLEX_HALF:
20915 +               mac->forced_speed_duplex = ADVERTISE_100_HALF;
20916 +               break;
20917 +       case SPEED_100 + DUPLEX_FULL:
20918 +               mac->forced_speed_duplex = ADVERTISE_100_FULL;
20919 +               break;
20920 +       case SPEED_1000 + DUPLEX_FULL:
20921 +               mac->autoneg = 1;
20922 +               adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
20923 +               break;
20924 +       case SPEED_1000 + DUPLEX_HALF: /* not supported */
20925 +       default:
20926 +               DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
20927 +               return -EINVAL;
20928 +       }
20929 +       return 0;
20930 +}
20931 +
20932 +#ifdef USE_REBOOT_NOTIFIER
20933 +/* only want to do this for 2.4 kernels? */
20934 +static int igb_notify_reboot(struct notifier_block *nb, unsigned long event,
20935 +                             void *p)
20936 +{
20937 +       struct pci_dev *pdev = NULL;
20938 +
20939 +       switch (event) {
20940 +       case SYS_DOWN:
20941 +       case SYS_HALT:
20942 +       case SYS_POWER_OFF:
20943 +               while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
20944 +                       if (pci_dev_driver(pdev) == &igb_driver)
20945 +                               igb_suspend(pdev, PMSG_SUSPEND);
20946 +               }
20947 +       }
20948 +       return NOTIFY_DONE;
20949 +}
20950 +
20951 +#endif
20952 +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
20953 +{
20954 +       struct net_device *netdev = pci_get_drvdata(pdev);
20955 +       struct igb_adapter *adapter = netdev_priv(netdev);
20956 +       struct e1000_hw *hw = &adapter->hw;
20957 +       u32 ctrl, rctl, status;
20958 +       u32 wufc = adapter->wol;
20959 +#ifdef CONFIG_PM
20960 +       int retval = 0;
20961 +#endif
20962 +
20963 +       netif_device_detach(netdev);
20964 +
20965 +       if (netif_running(netdev))
20966 +               igb_close(netdev);
20967 +
20968 +       igb_clear_interrupt_scheme(adapter);
20969 +
20970 +#ifdef CONFIG_PM
20971 +       retval = pci_save_state(pdev);
20972 +       if (retval)
20973 +               return retval;
20974 +#endif
20975 +
20976 +       status = E1000_READ_REG(hw, E1000_STATUS);
20977 +       if (status & E1000_STATUS_LU)
20978 +               wufc &= ~E1000_WUFC_LNKC;
20979 +
20980 +       if (wufc) {
20981 +               igb_setup_rctl(adapter);
20982 +               igb_set_rx_mode(netdev);
20983 +
20984 +               /* turn on all-multi mode if wake on multicast is enabled */
20985 +               if (wufc & E1000_WUFC_MC) {
20986 +                       rctl = E1000_READ_REG(hw, E1000_RCTL);
20987 +                       rctl |= E1000_RCTL_MPE;
20988 +                       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
20989 +               }
20990 +
20991 +               ctrl = E1000_READ_REG(hw, E1000_CTRL);
20992 +               /* phy power management enable */
20993 +               #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
20994 +               ctrl |= E1000_CTRL_ADVD3WUC;
20995 +               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
20996 +
20997 +               /* Allow time for pending master requests to run */
20998 +               e1000_disable_pcie_master(hw);
20999 +
21000 +               E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN);
21001 +               E1000_WRITE_REG(hw, E1000_WUFC, wufc);
21002 +       } else {
21003 +               E1000_WRITE_REG(hw, E1000_WUC, 0);
21004 +               E1000_WRITE_REG(hw, E1000_WUFC, 0);
21005 +       }
21006 +
21007 +       *enable_wake = wufc || adapter->en_mng_pt;
21008 +       if (!*enable_wake)
21009 +               e1000_shutdown_fiber_serdes_link(hw);
21010 +
21011 +       /* Release control of h/w to f/w.  If f/w is AMT enabled, this
21012 +        * would have already happened in close and is redundant. */
21013 +       igb_release_hw_control(adapter);
21014 +
21015 +       pci_disable_device(pdev);
21016 +
21017 +       return 0;
21018 +}
21019 +
21020 +#ifdef CONFIG_PM
21021 +static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
21022 +{
21023 +       int retval;
21024 +       bool wake;
21025 +
21026 +       retval = __igb_shutdown(pdev, &wake);
21027 +       if (retval)
21028 +               return retval;
21029 +
21030 +       if (wake) {
21031 +               pci_prepare_to_sleep(pdev);
21032 +       } else {
21033 +               pci_wake_from_d3(pdev, false);
21034 +               pci_set_power_state(pdev, PCI_D3hot);
21035 +       }
21036 +
21037 +       return 0;
21038 +}
21039 +
21040 +static int igb_resume(struct pci_dev *pdev)
21041 +{
21042 +       struct net_device *netdev = pci_get_drvdata(pdev);
21043 +       struct igb_adapter *adapter = netdev_priv(netdev);
21044 +       struct e1000_hw *hw = &adapter->hw;
21045 +       u32 err;
21046 +
21047 +       pci_set_power_state(pdev, PCI_D0);
21048 +       pci_restore_state(pdev);
21049 +       err = pci_enable_device_mem(pdev);
21050 +       if (err) {
21051 +               dev_err(&pdev->dev, "igb: Cannot enable PCI device "
21052 +                       "from suspend\n");
21053 +               return err;
21054 +       }
21055 +       pci_set_master(pdev);
21056 +
21057 +       pci_enable_wake(pdev, PCI_D3hot, 0);
21058 +       pci_enable_wake(pdev, PCI_D3cold, 0);
21059 +
21060 +       if (igb_init_interrupt_scheme(adapter)) {
21061 +               DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
21062 +               return -ENOMEM;
21063 +       }
21064 +
21065 +       /* e1000_power_up_phy(adapter); */
21066 +
21067 +       igb_reset(adapter);
21068 +
21069 +       /* let the f/w know that the h/w is now under the control of the
21070 +        * driver. */
21071 +       igb_get_hw_control(adapter);
21072 +
21073 +       E1000_WRITE_REG(hw, E1000_WUS, ~0);
21074 +
21075 +       if (netif_running(netdev)) {
21076 +               err = igb_open(netdev);
21077 +               if (err)
21078 +                       return err;
21079 +       }
21080 +
21081 +       netif_device_attach(netdev);
21082 +
21083 +       return 0;
21084 +}
21085 +#endif
21086 +
21087 +#ifndef USE_REBOOT_NOTIFIER
21088 +static void igb_shutdown(struct pci_dev *pdev)
21089 +{
21090 +       bool wake;
21091 +
21092 +       __igb_shutdown(pdev, &wake);
21093 +
21094 +       if (system_state == SYSTEM_POWER_OFF) {
21095 +               pci_wake_from_d3(pdev, wake);
21096 +               pci_set_power_state(pdev, PCI_D3hot);
21097 +       }
21098 +}
21099 +
21100 +#endif
21101 +#ifdef CONFIG_NET_POLL_CONTROLLER
21102 +/*
21103 + * Polling 'interrupt' - used by things like netconsole to send skbs
21104 + * without having to re-enable interrupts. It's not called while
21105 + * the interrupt routine is executing.
21106 + */
21107 +static void igb_netpoll(struct net_device *netdev)
21108 +{
21109 +       struct igb_adapter *adapter = netdev_priv(netdev);
21110 +       struct e1000_hw *hw = &adapter->hw;
21111 +       int i;
21112 +
21113 +       if (!adapter->msix_entries) {
21114 +               struct igb_q_vector *q_vector = adapter->q_vector[0];
21115 +               igb_irq_disable(adapter);
21116 +               napi_schedule(&q_vector->napi);
21117 +               return;
21118 +       }
21119 +
21120 +       for (i = 0; i < adapter->num_q_vectors; i++) {
21121 +               struct igb_q_vector *q_vector = adapter->q_vector[i];
21122 +               E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value);
21123 +               napi_schedule(&q_vector->napi);
21124 +       }
21125 +}
21126 +#endif /* CONFIG_NET_POLL_CONTROLLER */
21127 +
21128 +#ifdef HAVE_PCI_ERS
21129 +/**
21130 + * igb_io_error_detected - called when PCI error is detected
21131 + * @pdev: Pointer to PCI device
21132 + * @state: The current pci connection state
21133 + *
21134 + * This function is called after a PCI bus error affecting
21135 + * this device has been detected.
21136 + */
21137 +static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
21138 +                                              pci_channel_state_t state)
21139 +{
21140 +       struct net_device *netdev = pci_get_drvdata(pdev);
21141 +       struct igb_adapter *adapter = netdev_priv(netdev);
21142 +
21143 +       netif_device_detach(netdev);
21144 +
21145 +       if (state == pci_channel_io_perm_failure)
21146 +               return PCI_ERS_RESULT_DISCONNECT;
21147 +
21148 +       if (netif_running(netdev))
21149 +               igb_down(adapter);
21150 +       pci_disable_device(pdev);
21151 +
21152 +       /* Request a slot slot reset. */
21153 +       return PCI_ERS_RESULT_NEED_RESET;
21154 +}
21155 +
21156 +/**
21157 + * igb_io_slot_reset - called after the pci bus has been reset.
21158 + * @pdev: Pointer to PCI device
21159 + *
21160 + * Restart the card from scratch, as if from a cold-boot. Implementation
21161 + * resembles the first-half of the igb_resume routine.
21162 + */
21163 +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
21164 +{
21165 +       struct net_device *netdev = pci_get_drvdata(pdev);
21166 +       struct igb_adapter *adapter = netdev_priv(netdev);
21167 +       struct e1000_hw *hw = &adapter->hw;
21168 +       pci_ers_result_t result;
21169 +
21170 +       if (pci_enable_device_mem(pdev)) {
21171 +               dev_err(&pdev->dev,
21172 +                       "Cannot re-enable PCI device after reset.\n");
21173 +               result = PCI_ERS_RESULT_DISCONNECT;
21174 +       } else {
21175 +               pci_set_master(pdev);
21176 +               pci_restore_state(pdev);
21177 +
21178 +               pci_enable_wake(pdev, PCI_D3hot, 0);
21179 +               pci_enable_wake(pdev, PCI_D3cold, 0);
21180 +
21181 +               igb_reset(adapter);
21182 +               E1000_WRITE_REG(hw, E1000_WUS, ~0);
21183 +               result = PCI_ERS_RESULT_RECOVERED;
21184 +       }
21185 +
21186 +       pci_cleanup_aer_uncorrect_error_status(pdev);
21187 +
21188 +       return result;
21189 +}
21190 +
21191 +/**
21192 + * igb_io_resume - called when traffic can start flowing again.
21193 + * @pdev: Pointer to PCI device
21194 + *
21195 + * This callback is called when the error recovery driver tells us that
21196 + * its OK to resume normal operation. Implementation resembles the
21197 + * second-half of the igb_resume routine.
21198 + */
21199 +static void igb_io_resume(struct pci_dev *pdev)
21200 +{
21201 +       struct net_device *netdev = pci_get_drvdata(pdev);
21202 +       struct igb_adapter *adapter = netdev_priv(netdev);
21203 +
21204 +       if (netif_running(netdev)) {
21205 +               if (igb_up(adapter)) {
21206 +                       dev_err(&pdev->dev, "igb_up failed after reset\n");
21207 +                       return;
21208 +               }
21209 +       }
21210 +
21211 +       netif_device_attach(netdev);
21212 +
21213 +       /* let the f/w know that the h/w is now under the control of the
21214 +        * driver. */
21215 +       igb_get_hw_control(adapter);
21216 +}
21217 +
21218 +#endif /* HAVE_PCI_ERS */
21219 +static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
21220 +                             u8 qsel)
21221 +{
21222 +       u32 rar_low, rar_high;
21223 +       struct e1000_hw *hw = &adapter->hw;
21224 +
21225 +       /* HW expects these in little endian so we reverse the byte order
21226 +        * from network order (big endian) to little endian
21227 +        */
21228 +       rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
21229 +                 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
21230 +       rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
21231 +
21232 +       /* Indicate to hardware the Address is Valid. */
21233 +       rar_high |= E1000_RAH_AV;
21234 +
21235 +       if (hw->mac.type == e1000_82575)
21236 +               rar_high |= E1000_RAH_POOL_1 * qsel;
21237 +       else
21238 +               rar_high |= E1000_RAH_POOL_1 << qsel;
21239 +
21240 +       E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
21241 +       E1000_WRITE_FLUSH(hw);
21242 +       E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
21243 +       E1000_WRITE_FLUSH(hw);
21244 +}
21245 +
21246 +int igb_set_vf_mac(struct igb_adapter *adapter,
21247 +                          int vf, unsigned char *mac_addr)
21248 +{
21249 +       struct e1000_hw *hw = &adapter->hw;
21250 +       /* VF MAC addresses start at end of receive addresses and moves
21251 +        * torwards the first, as a result a collision should not be possible */
21252 +       int rar_entry = hw->mac.rar_entry_count - (vf + 1);
21253 +
21254 +       memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, 6);
21255 +
21256 +       igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
21257 +
21258 +       return 0;
21259 +}
21260 +
21261 +static void igb_vmm_control(struct igb_adapter *adapter)
21262 +{
21263 +       struct e1000_hw *hw = &adapter->hw;
21264 +
21265 +       /* replication is not supported for 82575 */
21266 +       if (hw->mac.type == e1000_82575)
21267 +               return;
21268 +
21269 +       if (adapter->vfs_allocated_count || adapter->VMDQ_queues) {
21270 +               e1000_vmdq_set_loopback_pf(hw, true);
21271 +               e1000_vmdq_set_replication_pf(hw, true);
21272 +       } else {
21273 +               e1000_vmdq_set_loopback_pf(hw, false);
21274 +               e1000_vmdq_set_replication_pf(hw, false);
21275 +       }
21276 +}
21277 +
21278 +static void igb_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
21279 +{
21280 +       unsigned char my_mac_addr[6];
21281 +       unsigned char oui[OUI_LEN] = {0x02, 0xAA, 0x00};
21282 +       struct net_device *netdev = pci_get_drvdata(pdev);
21283 +       struct igb_adapter *adapter = netdev_priv(netdev);
21284 +       unsigned int vfn = (event_mask & 7);
21285 +
21286 +       bool enable = ((event_mask & 0x10000000U) != 0);
21287 +
21288 +       if (enable) {
21289 +               random_ether_addr(my_mac_addr);
21290 +               memcpy(my_mac_addr, oui, OUI_LEN);
21291 +               printk(KERN_INFO "IOV1: VF %d is enabled\n", vfn);
21292 +               printk(KERN_INFO "Assigned MAC: "
21293 +                      "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
21294 +                      my_mac_addr[0], my_mac_addr[1], my_mac_addr[2],
21295 +                      my_mac_addr[3], my_mac_addr[4], my_mac_addr[5]);
21296 +               igb_set_vf_mac(adapter, vfn, my_mac_addr);
21297 +       } else {
21298 +               printk(KERN_INFO "IOV1: VF %d is disabled\n", vfn);
21299 +       }
21300 +}
21301 +
21302 +/* igb_main.c */
21303 Index: linux-2.6.22/drivers/net/igb/igb_param.c
21304 ===================================================================
21305 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
21306 +++ linux-2.6.22/drivers/net/igb/igb_param.c    2009-12-18 12:39:22.000000000 -0500
21307 @@ -0,0 +1,599 @@
21308 +/*******************************************************************************
21309 +
21310 +  Intel(R) Gigabit Ethernet Linux driver
21311 +  Copyright(c) 2007-2009 Intel Corporation.
21312 +
21313 +  This program is free software; you can redistribute it and/or modify it
21314 +  under the terms and conditions of the GNU General Public License,
21315 +  version 2, as published by the Free Software Foundation.
21316 +
21317 +  This program is distributed in the hope it will be useful, but WITHOUT
21318 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21319 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
21320 +  more details.
21321 +
21322 +  You should have received a copy of the GNU General Public License along with
21323 +  this program; if not, write to the Free Software Foundation, Inc.,
21324 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21325 +
21326 +  The full GNU General Public License is included in this distribution in
21327 +  the file called "COPYING".
21328 +
21329 +  Contact Information:
21330 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21331 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21332 +
21333 +*******************************************************************************/
21334 +
21335 +
21336 +#include <linux/netdevice.h>
21337 +
21338 +#include "igb.h"
21339 +
21340 +/* This is the only thing that needs to be changed to adjust the
21341 + * maximum number of ports that the driver can manage.
21342 + */
21343 +
21344 +#define IGB_MAX_NIC 32
21345 +
21346 +#define OPTION_UNSET   -1
21347 +#define OPTION_DISABLED 0
21348 +#define OPTION_ENABLED  1
21349 +
21350 +/* All parameters are treated the same, as an integer array of values.
21351 + * This macro just reduces the need to repeat the same declaration code
21352 + * over and over (plus this helps to avoid typo bugs).
21353 + */
21354 +
21355 +#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET }
21356 +#ifndef module_param_array
21357 +/* Module Parameters are always initialized to -1, so that the driver
21358 + * can tell the difference between no user specified value or the
21359 + * user asking for the default value.
21360 + * The true default values are loaded in when igb_check_options is called.
21361 + *
21362 + * This is a GCC extension to ANSI C.
21363 + * See the item "Labeled Elements in Initializers" in the section
21364 + * "Extensions to the C Language Family" of the GCC documentation.
21365 + */
21366 +
21367 +#define IGB_PARAM(X, desc) \
21368 +       static const int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
21369 +       MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \
21370 +       MODULE_PARM_DESC(X, desc);
21371 +#else
21372 +#define IGB_PARAM(X, desc) \
21373 +       static int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
21374 +       static unsigned int num_##X; \
21375 +       module_param_array_named(X, X, int, &num_##X, 0); \
21376 +       MODULE_PARM_DESC(X, desc);
21377 +#endif
21378 +
21379 +/* Interrupt Throttle Rate (interrupts/sec)
21380 + *
21381 + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
21382 + */
21383 +IGB_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
21384 +#define DEFAULT_ITR                    3
21385 +#define MAX_ITR                   100000
21386 +#define MIN_ITR                      120
21387 +/* IntMode (Interrupt Mode)
21388 + *
21389 + * Valid Range: 0 - 2
21390 + *
21391 + * Default Value: 2 (MSI-X)
21392 + */
21393 +IGB_PARAM(IntMode, "Interrupt Mode");
21394 +#define MAX_INTMODE                    IGB_INT_MODE_MSIX
21395 +#define MIN_INTMODE                    IGB_INT_MODE_LEGACY
21396 +
21397 +/* LLIPort (Low Latency Interrupt TCP Port)
21398 + *
21399 + * Valid Range: 0 - 65535
21400 + *
21401 + * Default Value: 0 (disabled)
21402 + */
21403 +IGB_PARAM(LLIPort, "Low Latency Interrupt TCP Port");
21404 +
21405 +#define DEFAULT_LLIPORT                0
21406 +#define MAX_LLIPORT               0xFFFF
21407 +#define MIN_LLIPORT                    0
21408 +
21409 +/* LLIPush (Low Latency Interrupt on TCP Push flag)
21410 + *
21411 + * Valid Range: 0, 1
21412 + *
21413 + * Default Value: 0 (disabled)
21414 + */
21415 +IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag");
21416 +
21417 +#define DEFAULT_LLIPUSH                0
21418 +#define MAX_LLIPUSH                    1
21419 +#define MIN_LLIPUSH                    0
21420 +
21421 +/* LLISize (Low Latency Interrupt on Packet Size)
21422 + *
21423 + * Valid Range: 0 - 1500
21424 + *
21425 + * Default Value: 0 (disabled)
21426 + */
21427 +IGB_PARAM(LLISize, "Low Latency Interrupt on Packet Size");
21428 +
21429 +#define DEFAULT_LLISIZE                0
21430 +#define MAX_LLISIZE                 1500
21431 +#define MIN_LLISIZE                    0
21432 +
21433 +#ifdef IGB_LRO
21434 +/* LROAggr (Large Receive Offload)
21435 + *
21436 + * Valid Range: 2 - 44
21437 + *
21438 + * Default Value:  32
21439 + */
21440 +IGB_PARAM(LROAggr, "LRO - Maximum packets to aggregate");
21441 +
21442 +#define DEFAULT_LRO_AGGR              32
21443 +#define MAX_LRO_AGGR                  44
21444 +#define MIN_LRO_AGGR                   2
21445 +#endif
21446 +
21447 +/* RSS (Enable RSS multiqueue receive)
21448 + *
21449 + * Valid Range: 0 - 8
21450 + *
21451 + * Default Value:  1
21452 + */
21453 +IGB_PARAM(RSS, "RSS - multiqueue receive count");
21454 +
21455 +#define DEFAULT_RSS       1
21456 +#define MAX_RSS          ((adapter->hw.mac.type == e1000_82575) ? 4 : 8)
21457 +#define MIN_RSS           0 
21458 +
21459 +/* VMDQ (Enable VMDq multiqueue receive)
21460 + *
21461 + * Valid Range: 0 - 8
21462 + *
21463 + * Default Value:  0
21464 + */
21465 +IGB_PARAM(VMDQ, "VMDQ - VMDq multiqueue receive");
21466 +
21467 +#define DEFAULT_VMDQ      0
21468 +#define MAX_VMDQ          MAX_RSS
21469 +#define MIN_VMDQ          0
21470 +
21471 +#ifdef CONFIG_PCI_IOV
21472 +/* max_vfs (Enable SR-IOV VF devices)
21473 + *
21474 + * Valid Range: 0 - 7
21475 + *
21476 + * Default Value:  0
21477 + */
21478 +IGB_PARAM(max_vfs, "max_vfs - SR-IOV VF devices");
21479 +
21480 +#define DEFAULT_SRIOV     0
21481 +#define MAX_SRIOV         7
21482 +#define MIN_SRIOV         0
21483 +
21484 +#endif /* CONFIG_PCI_IOV */
21485 +
21486 +/* QueuePairs (Enable TX/RX queue pairs for interrupt handling)
21487 + *
21488 + * Valid Range: 0 - 1
21489 + *
21490 + * Default Value:  1
21491 + */
21492 +IGB_PARAM(QueuePairs, "QueuePairs - TX/RX queue pairs for interrupt handling");
21493 +
21494 +#define DEFAULT_QUEUE_PAIRS           1
21495 +#define MAX_QUEUE_PAIRS               1
21496 +#define MIN_QUEUE_PAIRS               0
21497 +
21498 +struct igb_option {
21499 +       enum { enable_option, range_option, list_option } type;
21500 +       const char *name;
21501 +       const char *err;
21502 +       int def;
21503 +       union {
21504 +               struct { /* range_option info */
21505 +                       int min;
21506 +                       int max;
21507 +               } r;
21508 +               struct { /* list_option info */
21509 +                       int nr;
21510 +                       struct igb_opt_list { int i; char *str; } *p;
21511 +               } l;
21512 +       } arg;
21513 +};
21514 +
21515 +static int __devinit igb_validate_option(unsigned int *value,
21516 +                                         struct igb_option *opt,
21517 +                                         struct igb_adapter *adapter)
21518 +{
21519 +       if (*value == OPTION_UNSET) {
21520 +               *value = opt->def;
21521 +               return 0;
21522 +       }
21523 +
21524 +       switch (opt->type) {
21525 +       case enable_option:
21526 +               switch (*value) {
21527 +               case OPTION_ENABLED:
21528 +                       DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
21529 +                       return 0;
21530 +               case OPTION_DISABLED:
21531 +                       DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
21532 +                       return 0;
21533 +               }
21534 +               break;
21535 +       case range_option:
21536 +               if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
21537 +                       DPRINTK(PROBE, INFO,
21538 +                                       "%s set to %d\n", opt->name, *value);
21539 +                       return 0;
21540 +               }
21541 +               break;
21542 +       case list_option: {
21543 +               int i;
21544 +               struct igb_opt_list *ent;
21545 +
21546 +               for (i = 0; i < opt->arg.l.nr; i++) {
21547 +                       ent = &opt->arg.l.p[i];
21548 +                       if (*value == ent->i) {
21549 +                               if (ent->str[0] != '\0')
21550 +                                       DPRINTK(PROBE, INFO, "%s\n", ent->str);
21551 +                               return 0;
21552 +                       }
21553 +               }
21554 +       }
21555 +               break;
21556 +       default:
21557 +               BUG();
21558 +       }
21559 +
21560 +       DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n",
21561 +              opt->name, *value, opt->err);
21562 +       *value = opt->def;
21563 +       return -1;
21564 +}
21565 +
21566 +/**
21567 + * igb_check_options - Range Checking for Command Line Parameters
21568 + * @adapter: board private structure
21569 + *
21570 + * This routine checks all command line parameters for valid user
21571 + * input.  If an invalid value is given, or if no user specified
21572 + * value exists, a default value is used.  The final value is stored
21573 + * in a variable in the adapter structure.
21574 + **/
21575 +
21576 +void __devinit igb_check_options(struct igb_adapter *adapter)
21577 +{
21578 +       int bd = adapter->bd_number;
21579 +
21580 +       if (bd >= IGB_MAX_NIC) {
21581 +               DPRINTK(PROBE, NOTICE,
21582 +                      "Warning: no configuration for board #%d\n", bd);
21583 +               DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
21584 +#ifndef module_param_array
21585 +               bd = IGB_MAX_NIC;
21586 +#endif
21587 +       }
21588 +
21589 +       { /* Interrupt Throttling Rate */
21590 +               struct igb_option opt = {
21591 +                       .type = range_option,
21592 +                       .name = "Interrupt Throttling Rate (ints/sec)",
21593 +                       .err  = "using default of " __MODULE_STRING(DEFAULT_ITR),
21594 +                       .def  = DEFAULT_ITR,
21595 +                       .arg  = { .r = { .min = MIN_ITR,
21596 +                                        .max = MAX_ITR } }
21597 +               };
21598 +
21599 +#ifdef module_param_array
21600 +               if (num_InterruptThrottleRate > bd) {
21601 +#endif
21602 +                       adapter->itr = InterruptThrottleRate[bd];
21603 +                       switch (adapter->itr) {
21604 +                       case 0:
21605 +                               DPRINTK(PROBE, INFO, "%s turned off\n",
21606 +                                       opt.name);
21607 +                               break;
21608 +                       case 1:
21609 +                               DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
21610 +                                       opt.name);
21611 +                               adapter->itr_setting = adapter->itr;
21612 +                               adapter->itr = IGB_START_ITR;
21613 +                               break;
21614 +                       case 3:
21615 +                               DPRINTK(PROBE, INFO,
21616 +                                       "%s set to dynamic conservative mode\n",
21617 +                                       opt.name);
21618 +                               adapter->itr_setting = adapter->itr;
21619 +                               adapter->itr = IGB_START_ITR;
21620 +                               break;
21621 +                       default:
21622 +                               igb_validate_option(&adapter->itr, &opt,
21623 +                                       adapter);
21624 +                               /* Save the setting, because the dynamic bits
21625 +                                * change itr.  In case of invalid user value,
21626 +                                * default to conservative mode, else need to
21627 +                                * clear the lower two bits because they are
21628 +                                * used as control */
21629 +                               if (adapter->itr == 3) {
21630 +                                       adapter->itr_setting = adapter->itr;
21631 +                                       adapter->itr = IGB_START_ITR;
21632 +                               } else {
21633 +                                       adapter->itr = 1000000000 / (adapter->itr * 256);
21634 +                                       adapter->itr_setting = adapter->itr & ~3;
21635 +                               }
21636 +                               break;
21637 +                       }
21638 +#ifdef module_param_array
21639 +               } else {
21640 +                       adapter->itr_setting = opt.def;
21641 +                       adapter->itr = 8000;
21642 +               }
21643 +#endif
21644 +       }
21645 +       { /* Interrupt Mode */
21646 +               struct igb_option opt = {
21647 +                       .type = range_option,
21648 +                       .name = "Interrupt Mode",
21649 +                       .err  = "defaulting to 2 (MSI-X)",
21650 +                       .def  = IGB_INT_MODE_MSIX,
21651 +                       .arg  = { .r = { .min = MIN_INTMODE,
21652 +                                        .max = MAX_INTMODE } }
21653 +               };
21654 +
21655 +#ifdef module_param_array
21656 +               if (num_IntMode > bd) {
21657 +#endif
21658 +                       unsigned int int_mode = IntMode[bd];
21659 +                       igb_validate_option(&int_mode, &opt, adapter);
21660 +                       adapter->int_mode = int_mode;
21661 +#ifdef module_param_array
21662 +               } else {
21663 +                       adapter->int_mode = opt.def;
21664 +               }
21665 +#endif
21666 +       }
21667 +       { /* Low Latency Interrupt TCP Port */
21668 +               struct igb_option opt = {
21669 +                       .type = range_option,
21670 +                       .name = "Low Latency Interrupt TCP Port",
21671 +                       .err  = "using default of " __MODULE_STRING(DEFAULT_LLIPORT),
21672 +                       .def  = DEFAULT_LLIPORT,
21673 +                       .arg  = { .r = { .min = MIN_LLIPORT,
21674 +                                        .max = MAX_LLIPORT } }
21675 +               };
21676 +
21677 +#ifdef module_param_array
21678 +               if (num_LLIPort > bd) {
21679 +#endif
21680 +                       adapter->lli_port = LLIPort[bd];
21681 +                       if (adapter->lli_port) {
21682 +                               igb_validate_option(&adapter->lli_port, &opt,
21683 +                                       adapter);
21684 +                       } else {
21685 +                               DPRINTK(PROBE, INFO, "%s turned off\n",
21686 +                                       opt.name);
21687 +                       }
21688 +#ifdef module_param_array
21689 +               } else {
21690 +                       adapter->lli_port = opt.def;
21691 +               }
21692 +#endif
21693 +       }
21694 +       { /* Low Latency Interrupt on Packet Size */
21695 +               struct igb_option opt = {
21696 +                       .type = range_option,
21697 +                       .name = "Low Latency Interrupt on Packet Size",
21698 +                       .err  = "using default of " __MODULE_STRING(DEFAULT_LLISIZE),
21699 +                       .def  = DEFAULT_LLISIZE,
21700 +                       .arg  = { .r = { .min = MIN_LLISIZE,
21701 +                                        .max = MAX_LLISIZE } }
21702 +               };
21703 +
21704 +#ifdef module_param_array
21705 +               if (num_LLISize > bd) {
21706 +#endif
21707 +                       adapter->lli_size = LLISize[bd];
21708 +                       if (adapter->lli_size) {
21709 +                               igb_validate_option(&adapter->lli_size, &opt,
21710 +                                       adapter);
21711 +                       } else {
21712 +                               DPRINTK(PROBE, INFO, "%s turned off\n",
21713 +                                       opt.name);
21714 +                       }
21715 +#ifdef module_param_array
21716 +               } else {
21717 +                       adapter->lli_size = opt.def;
21718 +               }
21719 +#endif
21720 +       }
21721 +       { /* Low Latency Interrupt on TCP Push flag */
21722 +               struct igb_option opt = {
21723 +                       .type = enable_option,
21724 +                       .name = "Low Latency Interrupt on TCP Push flag",
21725 +                       .err  = "defaulting to Disabled",
21726 +                       .def  = OPTION_DISABLED
21727 +               };
21728 +
21729 +#ifdef module_param_array
21730 +               if (num_LLIPush > bd) {
21731 +#endif
21732 +                       unsigned int lli_push = LLIPush[bd];
21733 +                       igb_validate_option(&lli_push, &opt, adapter);
21734 +                       adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0;
21735 +#ifdef module_param_array
21736 +               } else {
21737 +                       adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0;
21738 +               }
21739 +#endif
21740 +       }
21741 +#ifdef IGB_LRO
21742 +       { /* Large Receive Offload - Maximum packets to aggregate */
21743 +               struct igb_option opt = {
21744 +                       .type = range_option,
21745 +                       .name = "LRO - Maximum packets to aggregate",
21746 +                       .err  = "using default of " __MODULE_STRING(DEFAULT_LRO_AGGR),
21747 +                       .def  = DEFAULT_LRO_AGGR,
21748 +                       .arg  = { .r = { .min = MIN_LRO_AGGR,
21749 +                                        .max = MAX_LRO_AGGR } }
21750 +               };
21751 +
21752 +#ifdef module_param_array
21753 +               if (num_LROAggr > bd) {
21754 +#endif
21755 +                       adapter->lro_max_aggr = LROAggr[bd];
21756 +                       igb_validate_option(&adapter->lro_max_aggr, &opt, adapter);
21757 +
21758 +#ifdef module_param_array
21759 +               } else {
21760 +                       adapter->lro_max_aggr = opt.def;
21761 +               }
21762 +#endif
21763 +       }
21764 +#endif /* IGB_LRO */
21765 +#ifdef CONFIG_PCI_IOV
21766 +       { /* SRIOV - Enable SR-IOV VF devices */
21767 +               struct igb_option opt = {
21768 +                       .type = range_option,
21769 +                       .name = "max_vfs - SR-IOV VF devices",
21770 +                       .err  = "using default of " __MODULE_STRING(DEFAULT_SRIOV),
21771 +                       .def  = DEFAULT_SRIOV,
21772 +                       .arg  = { .r = { .min = MIN_SRIOV,
21773 +                                        .max = MAX_SRIOV } }
21774 +               };
21775 +
21776 +#ifdef module_param_array
21777 +               if (num_max_vfs > bd) {
21778 +#endif
21779 +                       adapter->vfs_allocated_count = max_vfs[bd];
21780 +                       igb_validate_option(&adapter->vfs_allocated_count, &opt, adapter);
21781 +
21782 +#ifdef module_param_array
21783 +               } else {
21784 +                       adapter->vfs_allocated_count = opt.def;
21785 +               }
21786 +#endif
21787 +               if (adapter->hw.mac.type != e1000_82576 && adapter->vfs_allocated_count) {
21788 +                       adapter->vfs_allocated_count = 0;
21789 +                       DPRINTK(PROBE, INFO, "SR-IOV option max_vfs only supported on 82576.\n");
21790 +               }
21791 +       }
21792 +#endif /* CONFIG_PCI_IOV */
21793 +       { /* VMDQ - Enable VMDq multiqueue receive */
21794 +               struct igb_option opt = {
21795 +                       .type = range_option,
21796 +                       .name = "VMDQ - VMDq multiqueue receive count",
21797 +                       .err  = "using default of " __MODULE_STRING(DEFAULT_VMDQ),
21798 +                       .def  = DEFAULT_VMDQ,
21799 +                       .arg  = { .r = { .min = MIN_VMDQ,
21800 +                                        .max = (MAX_VMDQ - adapter->vfs_allocated_count) } }
21801 +               };
21802 +#ifdef module_param_array
21803 +               if (num_VMDQ > bd) {
21804 +#endif
21805 +                       adapter->VMDQ_queues = VMDQ[bd];
21806 +                       if (adapter->vfs_allocated_count && !adapter->VMDQ_queues) {
21807 +                               DPRINTK(PROBE, INFO, "Enabling SR-IOV requires VMDq be set to at least 1\n");
21808 +                               adapter->VMDQ_queues = 1;
21809 +                       }
21810 +                       igb_validate_option(&adapter->VMDQ_queues, &opt, adapter);
21811 +
21812 +#ifdef module_param_array
21813 +               } else {
21814 +                       if (!adapter->vfs_allocated_count)
21815 +                               adapter->VMDQ_queues = opt.def;
21816 +                       else
21817 +                               adapter->VMDQ_queues = 1;
21818 +               }
21819 +#endif
21820 +       }
21821 +       { /* RSS - Enable RSS multiqueue receives */
21822 +               struct igb_option opt = {
21823 +                       .type = range_option,
21824 +                       .name = "RSS - RSS multiqueue receive count",
21825 +                       .err  = "using default of " __MODULE_STRING(DEFAULT_RSS),
21826 +                       .def  = DEFAULT_RSS,
21827 +                       .arg  = { .r = { .min = MIN_RSS,
21828 +                                        .max = MAX_RSS } }
21829 +               };
21830 +
21831 +               if (adapter->VMDQ_queues) {
21832 +                       switch (adapter->hw.mac.type) {
21833 +                       case e1000_82576:
21834 +                               opt.arg.r.max = 2;
21835 +                               break;
21836 +                       case e1000_82575:
21837 +                               if (adapter->VMDQ_queues == 2)
21838 +                                       opt.arg.r.max = 3;
21839 +                               if (adapter->VMDQ_queues <= 2)
21840 +                                       break;
21841 +                       default:
21842 +                               opt.arg.r.max = 1;
21843 +                               break;
21844 +                       }
21845 +               }
21846 +
21847 +#ifdef module_param_array
21848 +               if (num_RSS > bd) {
21849 +#endif
21850 +                       adapter->RSS_queues = RSS[bd];
21851 +                       switch (adapter->RSS_queues) {
21852 +                       case 1:
21853 +                               break;
21854 +                       default:
21855 +                               igb_validate_option(&adapter->RSS_queues, &opt, adapter);
21856 +                               if (adapter->RSS_queues)
21857 +                                       break;
21858 +                       case 0:
21859 +                               adapter->RSS_queues = min_t(u32, opt.arg.r.max, num_online_cpus());
21860 +                               break;
21861 +                       }
21862 +#ifdef module_param_array
21863 +               } else {
21864 +                       adapter->RSS_queues = opt.def;
21865 +               }
21866 +#endif
21867 +       }
21868 +       { /* QueuePairs - Enable TX/RX queue pairs for interrupt handling */
21869 +               struct igb_option opt = {
21870 +                       .type = enable_option,
21871 +                       .name = "QueuePairs - TX/RX queue pairs for interrupt handling",
21872 +                       .err  = "defaulting to Enabled",
21873 +                       .def  = OPTION_ENABLED
21874 +               };
21875 +
21876 +#ifdef module_param_array
21877 +               if (num_QueuePairs > bd) {
21878 +#endif
21879 +                       unsigned int qp = QueuePairs[bd];
21880 +                       /*
21881 +                        * we must enable queue pairs if the number of queues
21882 +                        * exceeds the number of avaialble interrupts.  We are
21883 +                        * limited to 10, or 3 per unallocated vf. 
21884 +                        */
21885 +                       if ((adapter->RSS_queues > 4) ||
21886 +                           (adapter->VMDQ_queues > 4) ||
21887 +                           ((adapter->RSS_queues > 1) &&
21888 +                            ((adapter->VMDQ_queues > 3) ||
21889 +                             (adapter->vfs_allocated_count > 6)))) {
21890 +                               if (qp == OPTION_DISABLED) {
21891 +                                       qp = OPTION_ENABLED;
21892 +                                       DPRINTK(PROBE, INFO,
21893 +                                               "Number of queues exceeds available interrupts, %s\n",opt.err);
21894 +                               }
21895 +                       }
21896 +                       igb_validate_option(&qp, &opt, adapter);
21897 +                       adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0;
21898 +                           
21899 +#ifdef module_param_array
21900 +               } else {
21901 +                       adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0;
21902 +               }
21903 +#endif
21904 +       }
21905 +}
21906 +
21907 Index: linux-2.6.22/drivers/net/igb/igb_regtest.h
21908 ===================================================================
21909 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
21910 +++ linux-2.6.22/drivers/net/igb/igb_regtest.h  2009-12-18 12:39:22.000000000 -0500
21911 @@ -0,0 +1,135 @@
21912 +/*******************************************************************************
21913 +
21914 +  Intel(R) Gigabit Ethernet Linux driver
21915 +  Copyright(c) 2007-2009 Intel Corporation.
21916 +
21917 +  This program is free software; you can redistribute it and/or modify it
21918 +  under the terms and conditions of the GNU General Public License,
21919 +  version 2, as published by the Free Software Foundation.
21920 +
21921 +  This program is distributed in the hope it will be useful, but WITHOUT
21922 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21923 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
21924 +  more details.
21925 +
21926 +  You should have received a copy of the GNU General Public License along with
21927 +  this program; if not, write to the Free Software Foundation, Inc.,
21928 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21929 +
21930 +  The full GNU General Public License is included in this distribution in
21931 +  the file called "COPYING".
21932 +
21933 +  Contact Information:
21934 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21935 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21936 +
21937 +*******************************************************************************/
21938 +
21939 +/* ethtool register test data */
21940 +struct igb_reg_test {
21941 +       u16 reg;
21942 +       u16 reg_offset;
21943 +       u16 array_len;
21944 +       u16 test_type;
21945 +       u32 mask;
21946 +       u32 write;
21947 +};
21948 +
21949 +/* In the hardware, registers are laid out either singly, in arrays
21950 + * spaced 0x100 bytes apart, or in contiguous tables.  We assume
21951 + * most tests take place on arrays or single registers (handled
21952 + * as a single-element array) and special-case the tables.
21953 + * Table tests are always pattern tests.
21954 + *
21955 + * We also make provision for some required setup steps by specifying
21956 + * registers to be written without any read-back testing.
21957 + */
21958 +
21959 +#define PATTERN_TEST   1
21960 +#define SET_READ_TEST  2
21961 +#define WRITE_NO_TEST  3
21962 +#define TABLE32_TEST   4
21963 +#define TABLE64_TEST_LO        5
21964 +#define TABLE64_TEST_HI        6
21965 +
21966 +/* 82576 reg test */
21967 +static struct igb_reg_test reg_test_82576[] = {
21968 +       { E1000_FCAL,      0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21969 +       { E1000_FCAH,      0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
21970 +       { E1000_FCT,       0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
21971 +       { E1000_VET,       0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21972 +       { E1000_RDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
21973 +       { E1000_RDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21974 +       { E1000_RDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
21975 +       { E1000_RDBAL(4),  0x40,  12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
21976 +       { E1000_RDBAH(4),  0x40,  12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21977 +       { E1000_RDLEN(4),  0x40,  12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
21978 +       /* Enable all queues before testing. */
21979 +       { E1000_RXDCTL(0), 0x100, 4,  WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
21980 +       { E1000_RXDCTL(4), 0x40,  12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
21981 +       /* RDH is read-only for 82576, only test RDT. */
21982 +       { E1000_RDT(0),    0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
21983 +       { E1000_RDT(4),    0x40,  12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
21984 +       { E1000_RXDCTL(0), 0x100, 4,  WRITE_NO_TEST, 0, 0 },
21985 +       { E1000_RXDCTL(4), 0x40,  12, WRITE_NO_TEST, 0, 0 },
21986 +       { E1000_FCRTH,     0x100, 1,  PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
21987 +       { E1000_FCTTV,     0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
21988 +       { E1000_TIPG,      0x100, 1,  PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
21989 +       { E1000_TDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
21990 +       { E1000_TDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21991 +       { E1000_TDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
21992 +       { E1000_TDBAL(4),  0x40,  12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
21993 +       { E1000_TDBAH(4),  0x40,  12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
21994 +       { E1000_TDLEN(4),  0x40,  12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
21995 +       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
21996 +       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
21997 +       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
21998 +       { E1000_TCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
21999 +       { E1000_RA,        0, 16, TABLE64_TEST_LO,
22000 +                                               0xFFFFFFFF, 0xFFFFFFFF },
22001 +       { E1000_RA,        0, 16, TABLE64_TEST_HI,
22002 +                                               0x83FFFFFF, 0xFFFFFFFF },
22003 +       { E1000_RA2,       0, 8, TABLE64_TEST_LO,
22004 +                                               0xFFFFFFFF, 0xFFFFFFFF },
22005 +       { E1000_RA2,       0, 8, TABLE64_TEST_HI,
22006 +                                               0x83FFFFFF, 0xFFFFFFFF },
22007 +       { E1000_MTA,       0, 128, TABLE32_TEST,
22008 +                                               0xFFFFFFFF, 0xFFFFFFFF },
22009 +       { 0, 0, 0, 0 }
22010 +};
22011 +
22012 +/* 82575 register test */
22013 +static struct igb_reg_test reg_test_82575[] = {
22014 +       { E1000_FCAL,   0x100,  1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
22015 +       { E1000_FCAH,   0x100,  1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
22016 +       { E1000_FCT,    0x100,  1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
22017 +       { E1000_VET,    0x100,  1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
22018 +       { E1000_RDBAL(0),       0x100,  4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
22019 +       { E1000_RDBAH(0),       0x100,  4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
22020 +       { E1000_RDLEN(0),       0x100,  4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
22021 +       /* Enable all four RX queues before testing. */
22022 +       { E1000_RXDCTL(0),      0x100,  4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
22023 +       /* RDH is read-only for 82575, only test RDT. */
22024 +       { E1000_RDT(0), 0x100,  4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
22025 +       { E1000_RXDCTL(0),      0x100,  4, WRITE_NO_TEST, 0, 0 },
22026 +       { E1000_FCRTH,  0x100,  1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
22027 +       { E1000_FCTTV,  0x100,  1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
22028 +       { E1000_TIPG,   0x100,  1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
22029 +       { E1000_TDBAL(0),       0x100,  4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
22030 +       { E1000_TDBAH(0),       0x100,  4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
22031 +       { E1000_TDLEN(0),       0x100,  4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
22032 +       { E1000_RCTL,   0x100,  1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
22033 +       { E1000_RCTL,   0x100,  1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
22034 +       { E1000_RCTL,   0x100,  1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
22035 +       { E1000_TCTL,   0x100,  1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
22036 +       { E1000_TXCW,   0x100,  1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
22037 +       { E1000_RA,     0,      16, TABLE64_TEST_LO,
22038 +                                               0xFFFFFFFF, 0xFFFFFFFF },
22039 +       { E1000_RA,     0,      16, TABLE64_TEST_HI,
22040 +                                               0x800FFFFF, 0xFFFFFFFF },
22041 +       { E1000_MTA,    0,      128, TABLE32_TEST,
22042 +                                               0xFFFFFFFF, 0xFFFFFFFF },
22043 +       { 0, 0, 0, 0 }
22044 +};
22045 +
22046 +
22047 Index: linux-2.6.22/drivers/net/igb/kcompat.c
22048 ===================================================================
22049 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
22050 +++ linux-2.6.22/drivers/net/igb/kcompat.c      2009-12-18 12:39:22.000000000 -0500
22051 @@ -0,0 +1,552 @@
22052 +/*******************************************************************************
22053 +
22054 +  Intel(R) Gigabit Ethernet Linux driver
22055 +  Copyright(c) 2007-2009 Intel Corporation.
22056 +
22057 +  This program is free software; you can redistribute it and/or modify it
22058 +  under the terms and conditions of the GNU General Public License,
22059 +  version 2, as published by the Free Software Foundation.
22060 +
22061 +  This program is distributed in the hope it will be useful, but WITHOUT
22062 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22063 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
22064 +  more details.
22065 +
22066 +  You should have received a copy of the GNU General Public License along with
22067 +  this program; if not, write to the Free Software Foundation, Inc.,
22068 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22069 +
22070 +  The full GNU General Public License is included in this distribution in
22071 +  the file called "COPYING".
22072 +
22073 +  Contact Information:
22074 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22075 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22076 +
22077 +*******************************************************************************/
22078 +
22079 +#include "igb.h"
22080 +#include "kcompat.h"
22081 +
22082 +/*****************************************************************************/
22083 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) )
22084 +struct sk_buff *
22085 +_kc_skb_pad(struct sk_buff *skb, int pad)
22086 +{
22087 +        struct sk_buff *nskb;
22088 +        
22089 +        /* If the skbuff is non linear tailroom is always zero.. */
22090 +        if(skb_tailroom(skb) >= pad)
22091 +        {
22092 +                memset(skb->data+skb->len, 0, pad);
22093 +                return skb;
22094 +        }
22095 +        
22096 +        nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
22097 +        kfree_skb(skb);
22098 +        if(nskb)
22099 +                memset(nskb->data+nskb->len, 0, pad);
22100 +        return nskb;
22101 +} 
22102 +#endif /* < 2.4.21 */
22103 +
22104 +/*****************************************************************************/
22105 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
22106 +
22107 +/**************************************/
22108 +/* PCI DMA MAPPING */
22109 +
22110 +#if defined(CONFIG_HIGHMEM)
22111 +
22112 +#ifndef PCI_DRAM_OFFSET
22113 +#define PCI_DRAM_OFFSET 0
22114 +#endif
22115 +
22116 +u64
22117 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
22118 +                 size_t size, int direction)
22119 +{
22120 +       return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
22121 +               PCI_DRAM_OFFSET);
22122 +}
22123 +
22124 +#else /* CONFIG_HIGHMEM */
22125 +
22126 +u64
22127 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
22128 +                 size_t size, int direction)
22129 +{
22130 +       return pci_map_single(dev, (void *)page_address(page) + offset, size,
22131 +                             direction);
22132 +}
22133 +
22134 +#endif /* CONFIG_HIGHMEM */
22135 +
22136 +void
22137 +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
22138 +                   int direction)
22139 +{
22140 +       return pci_unmap_single(dev, dma_addr, size, direction);
22141 +}
22142 +
22143 +#endif /* 2.4.13 => 2.4.3 */
22144 +
22145 +/*****************************************************************************/
22146 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
22147 +
22148 +/**************************************/
22149 +/* PCI DRIVER API */
22150 +
22151 +int
22152 +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
22153 +{
22154 +       if (!pci_dma_supported(dev, mask))
22155 +               return -EIO;
22156 +       dev->dma_mask = mask;
22157 +       return 0;
22158 +}
22159 +
22160 +int
22161 +_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
22162 +{
22163 +       int i;
22164 +
22165 +       for (i = 0; i < 6; i++) {
22166 +               if (pci_resource_len(dev, i) == 0)
22167 +                       continue;
22168 +
22169 +               if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
22170 +                       if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
22171 +                               pci_release_regions(dev);
22172 +                               return -EBUSY;
22173 +                       }
22174 +               } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
22175 +                       if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
22176 +                               pci_release_regions(dev);
22177 +                               return -EBUSY;
22178 +                       }
22179 +               }
22180 +       }
22181 +       return 0;
22182 +}
22183 +
22184 +void
22185 +_kc_pci_release_regions(struct pci_dev *dev)
22186 +{
22187 +       int i;
22188 +
22189 +       for (i = 0; i < 6; i++) {
22190 +               if (pci_resource_len(dev, i) == 0)
22191 +                       continue;
22192 +
22193 +               if (pci_resource_flags(dev, i) & IORESOURCE_IO)
22194 +                       release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
22195 +
22196 +               else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
22197 +                       release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
22198 +       }
22199 +}
22200 +
22201 +/**************************************/
22202 +/* NETWORK DRIVER API */
22203 +
22204 +struct net_device *
22205 +_kc_alloc_etherdev(int sizeof_priv)
22206 +{
22207 +       struct net_device *dev;
22208 +       int alloc_size;
22209 +
22210 +       alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
22211 +       dev = kmalloc(alloc_size, GFP_KERNEL);
22212 +       if (!dev)
22213 +               return NULL;
22214 +       memset(dev, 0, alloc_size);
22215 +
22216 +       if (sizeof_priv)
22217 +               dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
22218 +       dev->name[0] = '\0';
22219 +       ether_setup(dev);
22220 +
22221 +       return dev;
22222 +}
22223 +
22224 +int
22225 +_kc_is_valid_ether_addr(u8 *addr)
22226 +{
22227 +       const char zaddr[6] = { 0, };
22228 +
22229 +       return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
22230 +}
22231 +
22232 +#endif /* 2.4.3 => 2.4.0 */
22233 +
22234 +/*****************************************************************************/
22235 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
22236 +
22237 +int
22238 +_kc_pci_set_power_state(struct pci_dev *dev, int state)
22239 +{
22240 +       return 0;
22241 +}
22242 +
22243 +int
22244 +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
22245 +{
22246 +       return 0;
22247 +}
22248 +
22249 +#endif /* 2.4.6 => 2.4.3 */
22250 +
22251 +/*****************************************************************************/
22252 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
22253 +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
22254 +                            int off, int size)
22255 +{
22256 +       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
22257 +       frag->page = page;
22258 +       frag->page_offset = off;
22259 +       frag->size = size;
22260 +       skb_shinfo(skb)->nr_frags = i + 1;
22261 +}
22262 +
22263 +/*
22264 + * Original Copyright:
22265 + * find_next_bit.c: fallback find next bit implementation
22266 + *
22267 + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
22268 + * Written by David Howells (dhowells@redhat.com)
22269 + */
22270 +
22271 +/**
22272 + * find_next_bit - find the next set bit in a memory region
22273 + * @addr: The address to base the search on
22274 + * @offset: The bitnumber to start searching at
22275 + * @size: The maximum size to search
22276 + */
22277 +unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
22278 +                            unsigned long offset)
22279 +{
22280 +       const unsigned long *p = addr + BITOP_WORD(offset);
22281 +       unsigned long result = offset & ~(BITS_PER_LONG-1);
22282 +       unsigned long tmp;
22283 +
22284 +       if (offset >= size)
22285 +               return size;
22286 +       size -= result;
22287 +       offset %= BITS_PER_LONG;
22288 +       if (offset) {
22289 +               tmp = *(p++);
22290 +               tmp &= (~0UL << offset);
22291 +               if (size < BITS_PER_LONG)
22292 +                       goto found_first;
22293 +               if (tmp)
22294 +                       goto found_middle;
22295 +               size -= BITS_PER_LONG;
22296 +               result += BITS_PER_LONG;
22297 +       }
22298 +       while (size & ~(BITS_PER_LONG-1)) {
22299 +               if ((tmp = *(p++)))
22300 +                       goto found_middle;
22301 +               result += BITS_PER_LONG;
22302 +               size -= BITS_PER_LONG;
22303 +       }
22304 +       if (!size)
22305 +               return result;
22306 +       tmp = *p;
22307 +
22308 +found_first:
22309 +       tmp &= (~0UL >> (BITS_PER_LONG - size));
22310 +       if (tmp == 0UL)         /* Are any bits set? */
22311 +               return result + size;   /* Nope. */
22312 +found_middle:
22313 +       return result + ffs(tmp);
22314 +}
22315 +
22316 +#endif /* 2.6.0 => 2.4.6 */
22317 +
22318 +/*****************************************************************************/
22319 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
22320 +void *_kc_kzalloc(size_t size, int flags)
22321 +{
22322 +       void *ret = kmalloc(size, flags);
22323 +       if (ret)
22324 +               memset(ret, 0, size);
22325 +       return ret;
22326 +}
22327 +#endif /* <= 2.6.13 */
22328 +
22329 +/*****************************************************************************/
22330 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
22331 +struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
22332 +                                     unsigned int length)
22333 +{
22334 +       /* 16 == NET_PAD_SKB */
22335 +       struct sk_buff *skb;
22336 +       skb = alloc_skb(length + 16, GFP_ATOMIC);
22337 +       if (likely(skb != NULL)) {
22338 +               skb_reserve(skb, 16);
22339 +               skb->dev = dev;
22340 +       }
22341 +       return skb;
22342 +}
22343 +#endif /* <= 2.6.17 */
22344 +
22345 +/*****************************************************************************/
22346 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
22347 +int _kc_pci_save_state(struct pci_dev *pdev)
22348 +{
22349 +       struct net_device *netdev = pci_get_drvdata(pdev);
22350 +       struct adapter_struct *adapter = netdev_priv(netdev);
22351 +       int size = PCI_CONFIG_SPACE_LEN, i;
22352 +       u16 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
22353 +       u16 pcie_link_status;
22354 +
22355 +       if (pcie_cap_offset) {
22356 +               if (!pci_read_config_word(pdev,
22357 +                                         pcie_cap_offset + PCIE_LINK_STATUS,
22358 +                                         &pcie_link_status))
22359 +               size = PCIE_CONFIG_SPACE_LEN;
22360 +       }
22361 +       pci_config_space_ich8lan();
22362 +#ifdef HAVE_PCI_ERS
22363 +       if (adapter->config_space == NULL)
22364 +#else
22365 +       WARN_ON(adapter->config_space != NULL);
22366 +#endif
22367 +               adapter->config_space = kmalloc(size, GFP_KERNEL);
22368 +       if (!adapter->config_space) {
22369 +               printk(KERN_ERR "Out of memory in pci_save_state\n");
22370 +               return -ENOMEM;
22371 +       }
22372 +       for (i = 0; i < (size / 4); i++)
22373 +               pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
22374 +       return 0;
22375 +}
22376 +
22377 +void _kc_pci_restore_state(struct pci_dev * pdev)
22378 +{
22379 +       struct net_device *netdev = pci_get_drvdata(pdev);
22380 +       struct adapter_struct *adapter = netdev_priv(netdev);
22381 +       int size = PCI_CONFIG_SPACE_LEN, i;
22382 +       u16 pcie_cap_offset;
22383 +       u16 pcie_link_status;
22384 +
22385 +       if (adapter->config_space != NULL) {
22386 +               pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
22387 +               if (pcie_cap_offset &&
22388 +                   !pci_read_config_word(pdev,
22389 +                                         pcie_cap_offset + PCIE_LINK_STATUS,
22390 +                                         &pcie_link_status))
22391 +                       size = PCIE_CONFIG_SPACE_LEN;
22392 +
22393 +               pci_config_space_ich8lan();
22394 +               for (i = 0; i < (size / 4); i++)
22395 +               pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
22396 +#ifndef HAVE_PCI_ERS
22397 +               kfree(adapter->config_space);
22398 +               adapter->config_space = NULL;
22399 +#endif
22400 +       }
22401 +}
22402 +
22403 +#ifdef HAVE_PCI_ERS
22404 +void _kc_free_netdev(struct net_device *netdev)
22405 +{
22406 +       struct adapter_struct *adapter = netdev_priv(netdev);
22407 +
22408 +       if (adapter->config_space != NULL)
22409 +               kfree(adapter->config_space);
22410 +#ifdef CONFIG_SYSFS
22411 +       if (netdev->reg_state == NETREG_UNINITIALIZED) {
22412 +               kfree((char *)netdev - netdev->padded);
22413 +       } else {
22414 +               BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
22415 +               netdev->reg_state = NETREG_RELEASED;
22416 +               class_device_put(&netdev->class_dev);
22417 +       }
22418 +#else
22419 +       kfree((char *)netdev - netdev->padded);
22420 +#endif
22421 +}
22422 +#endif
22423 +#endif /* <= 2.6.18 */
22424 +
22425 +/*****************************************************************************/
22426 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
22427 +#ifdef NAPI
22428 +struct net_device *napi_to_poll_dev(struct napi_struct *napi)
22429 +{
22430 +       struct adapter_q_vector *q_vector = container_of(napi,
22431 +                                                       struct adapter_q_vector,
22432 +                                                       napi);
22433 +       return &q_vector->poll_dev;
22434 +}
22435 +
22436 +int __kc_adapter_clean(struct net_device *netdev, int *budget)
22437 +{
22438 +       int work_done;
22439 +       int work_to_do = min(*budget, netdev->quota);
22440 +       /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
22441 +       struct napi_struct *napi = netdev->priv;
22442 +       work_done = napi->poll(napi, work_to_do);
22443 +       *budget -= work_done;
22444 +       netdev->quota -= work_done;
22445 +       return (work_done >= work_to_do) ? 1 : 0;
22446 +}
22447 +#endif /* NAPI */
22448 +#endif /* <= 2.6.24 */
22449 +
22450 +/*****************************************************************************/
22451 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
22452 +#ifdef HAVE_TX_MQ
22453 +void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
22454 +{
22455 +       struct adapter_struct *adapter = netdev_priv(netdev);
22456 +       int i;
22457 +
22458 +       netif_stop_queue(netdev);
22459 +       if (netif_is_multiqueue(netdev))
22460 +               for (i = 0; i < adapter->num_tx_queues; i++)
22461 +                       netif_stop_subqueue(netdev, i);
22462 +}
22463 +void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
22464 +{
22465 +       struct adapter_struct *adapter = netdev_priv(netdev);
22466 +       int i;
22467 +
22468 +       netif_wake_queue(netdev);
22469 +       if (netif_is_multiqueue(netdev))
22470 +               for (i = 0; i < adapter->num_tx_queues; i++)
22471 +                       netif_wake_subqueue(netdev, i);
22472 +}
22473 +void _kc_netif_tx_start_all_queues(struct net_device *netdev)
22474 +{
22475 +       struct adapter_struct *adapter = netdev_priv(netdev);
22476 +       int i;
22477 +
22478 +       netif_start_queue(netdev);
22479 +       if (netif_is_multiqueue(netdev))
22480 +               for (i = 0; i < adapter->num_tx_queues; i++)
22481 +                       netif_start_subqueue(netdev, i);
22482 +}
22483 +#endif /* HAVE_TX_MQ */
22484 +#endif /* < 2.6.27 */
22485 +
22486 +/*****************************************************************************/
22487 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
22488 +
22489 +int
22490 +_kc_pci_prepare_to_sleep(struct pci_dev *dev)
22491 +{
22492 +       pci_power_t target_state;
22493 +       int error;
22494 +
22495 +       target_state = pci_choose_state(dev, PMSG_SUSPEND);
22496 +
22497 +       pci_enable_wake(dev, target_state, true);
22498 +
22499 +       error = pci_set_power_state(dev, target_state);
22500 +
22501 +       if (error)
22502 +               pci_enable_wake(dev, target_state, false);
22503 +
22504 +       return error;
22505 +}
22506 +
22507 +int
22508 +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
22509 +{
22510 +       int err;
22511 +
22512 +       err = pci_enable_wake(dev, PCI_D3cold, enable);
22513 +       if (err)
22514 +               goto out;
22515 +
22516 +       err = pci_enable_wake(dev, PCI_D3hot, enable);
22517 +
22518 +out:
22519 +       return err;
22520 +}
22521 +#endif /* < 2.6.28 */
22522 +
22523 +/*****************************************************************************/
22524 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
22525 +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
22526 +{
22527 +       struct pci_dev *parent = pdev->bus->self;
22528 +       u16 link_state;
22529 +       int pos;
22530 +
22531 +       if (!parent)
22532 +               return;
22533 +
22534 +       pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
22535 +       if (pos) {
22536 +               pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
22537 +               link_state &= ~state;
22538 +               pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
22539 +       }
22540 +}
22541 +#endif /* < 2.6.29 */
22542 +
22543 +/*****************************************************************************/
22544 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
22545 +#ifdef HAVE_NETDEV_SELECT_QUEUE
22546 +#include <net/ip.h>
22547 +static u32 _kc_simple_tx_hashrnd;
22548 +static u32 _kc_simple_tx_hashrnd_initialized;
22549 +
22550 +u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
22551 +{
22552 +       u32 addr1, addr2, ports;
22553 +       u32 hash, ihl;
22554 +       u8 ip_proto = 0;
22555 +
22556 +       if (unlikely(!_kc_simple_tx_hashrnd_initialized)) {
22557 +               get_random_bytes(&_kc_simple_tx_hashrnd, 4);
22558 +               _kc_simple_tx_hashrnd_initialized = 1;
22559 +       }
22560 +
22561 +       switch (skb->protocol) {
22562 +       case htons(ETH_P_IP):
22563 +               if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
22564 +                       ip_proto = ip_hdr(skb)->protocol;
22565 +               addr1 = ip_hdr(skb)->saddr;
22566 +               addr2 = ip_hdr(skb)->daddr;
22567 +               ihl = ip_hdr(skb)->ihl;
22568 +               break;
22569 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
22570 +       case htons(ETH_P_IPV6):
22571 +               ip_proto = ipv6_hdr(skb)->nexthdr;
22572 +               addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
22573 +               addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
22574 +               ihl = (40 >> 2);
22575 +               break;
22576 +#endif
22577 +       default:
22578 +               return 0;
22579 +       }
22580 +
22581 +
22582 +       switch (ip_proto) {
22583 +       case IPPROTO_TCP:
22584 +       case IPPROTO_UDP:
22585 +       case IPPROTO_DCCP:
22586 +       case IPPROTO_ESP:
22587 +       case IPPROTO_AH:
22588 +       case IPPROTO_SCTP:
22589 +       case IPPROTO_UDPLITE:
22590 +               ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
22591 +               break;
22592 +
22593 +       default:
22594 +               ports = 0;
22595 +               break;
22596 +       }
22597 +
22598 +       hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd);
22599 +
22600 +       return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
22601 +}
22602 +#endif /* HAVE_NETDEV_SELECT_QUEUE */
22603 +#endif /* < 2.6.30 */
22604 Index: linux-2.6.22/drivers/net/igb/kcompat.h
22605 ===================================================================
22606 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
22607 +++ linux-2.6.22/drivers/net/igb/kcompat.h      2009-12-18 12:39:22.000000000 -0500
22608 @@ -0,0 +1,1793 @@
22609 +/*******************************************************************************
22610 +
22611 +  Intel(R) Gigabit Ethernet Linux driver
22612 +  Copyright(c) 2007-2009 Intel Corporation.
22613 +
22614 +  This program is free software; you can redistribute it and/or modify it
22615 +  under the terms and conditions of the GNU General Public License,
22616 +  version 2, as published by the Free Software Foundation.
22617 +
22618 +  This program is distributed in the hope it will be useful, but WITHOUT
22619 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22620 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
22621 +  more details.
22622 +
22623 +  You should have received a copy of the GNU General Public License along with
22624 +  this program; if not, write to the Free Software Foundation, Inc.,
22625 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22626 +
22627 +  The full GNU General Public License is included in this distribution in
22628 +  the file called "COPYING".
22629 +
22630 +  Contact Information:
22631 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22632 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22633 +
22634 +*******************************************************************************/
22635 +
22636 +#ifndef _KCOMPAT_H_
22637 +#define _KCOMPAT_H_
22638 +
22639 +#include <linux/version.h>
22640 +#include <linux/init.h>
22641 +#include <linux/types.h>
22642 +#include <linux/errno.h>
22643 +#include <linux/module.h>
22644 +#include <linux/pci.h>
22645 +#include <linux/netdevice.h>
22646 +#include <linux/etherdevice.h>
22647 +#include <linux/skbuff.h>
22648 +#include <linux/ioport.h>
22649 +#include <linux/slab.h>
22650 +#include <linux/list.h>
22651 +#include <linux/delay.h>
22652 +#include <linux/sched.h>
22653 +#include <linux/in.h>
22654 +#include <linux/ip.h>
22655 +#include <linux/udp.h>
22656 +#include <linux/mii.h>
22657 +#include <asm/io.h>
22658 +
22659 +/* NAPI enable/disable flags here */
22660 +#define NAPI
22661 +
22662 +#define adapter_struct igb_adapter
22663 +#define adapter_q_vector igb_q_vector
22664 +#define NAPI
22665 +
22666 +/* and finally set defines so that the code sees the changes */
22667 +#ifdef NAPI
22668 +#else
22669 +#endif /* NAPI */
22670 +
22671 +/* packet split disable/enable */
22672 +#ifdef DISABLE_PACKET_SPLIT
22673 +#undef CONFIG_E1000_DISABLE_PACKET_SPLIT
22674 +#define CONFIG_E1000_DISABLE_PACKET_SPLIT
22675 +#undef CONFIG_IGB_DISABLE_PACKET_SPLIT
22676 +#define CONFIG_IGB_DISABLE_PACKET_SPLIT
22677 +#endif
22678 +
22679 +/* MSI compatibility code for all kernels and drivers */
22680 +#ifdef DISABLE_PCI_MSI
22681 +#undef CONFIG_PCI_MSI
22682 +#endif
22683 +#ifndef CONFIG_PCI_MSI
22684 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
22685 +struct msix_entry {
22686 +       u16 vector; /* kernel uses to write allocated vector */
22687 +       u16 entry;  /* driver uses to specify entry, OS writes */
22688 +};
22689 +#endif
22690 +#define pci_enable_msi(a) -ENOTSUPP
22691 +#define pci_disable_msi(a) do {} while (0)
22692 +#define pci_enable_msix(a, b, c) -ENOTSUPP
22693 +#define pci_disable_msix(a) do {} while (0)
22694 +#define msi_remove_pci_irq_vectors(a) do {} while (0)
22695 +#endif /* CONFIG_PCI_MSI */
22696 +#ifdef DISABLE_PM
22697 +#undef CONFIG_PM
22698 +#endif
22699 +
22700 +#ifdef DISABLE_NET_POLL_CONTROLLER
22701 +#undef CONFIG_NET_POLL_CONTROLLER
22702 +#endif
22703 +
22704 +#ifndef PMSG_SUSPEND
22705 +#define PMSG_SUSPEND 3
22706 +#endif
22707 +
22708 +/* generic boolean compatibility */
22709 +#undef TRUE
22710 +#undef FALSE
22711 +#define TRUE true
22712 +#define FALSE false
22713 +#ifdef GCC_VERSION
22714 +#if ( GCC_VERSION < 3000 )
22715 +#define _Bool char
22716 +#endif
22717 +#else
22718 +#define _Bool char
22719 +#endif
22720 +#ifndef bool
22721 +#define bool _Bool
22722 +#define true 1
22723 +#define false 0
22724 +#endif
22725 +
22726 +
22727 +#ifndef module_param
22728 +#define module_param(v,t,p) MODULE_PARM(v, "i");
22729 +#endif
22730 +
22731 +#ifndef DMA_64BIT_MASK
22732 +#define DMA_64BIT_MASK  0xffffffffffffffffULL
22733 +#endif
22734 +
22735 +#ifndef DMA_32BIT_MASK
22736 +#define DMA_32BIT_MASK  0x00000000ffffffffULL
22737 +#endif
22738 +
22739 +#ifndef PCI_CAP_ID_EXP
22740 +#define PCI_CAP_ID_EXP 0x10
22741 +#endif
22742 +
22743 +#ifndef PCIE_LINK_STATE_L0S
22744 +#define PCIE_LINK_STATE_L0S 1
22745 +#endif
22746 +
22747 +#ifndef mmiowb
22748 +#ifdef CONFIG_IA64
22749 +#define mmiowb() asm volatile ("mf.a" ::: "memory")
22750 +#else
22751 +#define mmiowb()
22752 +#endif
22753 +#endif
22754 +
22755 +#ifndef SET_NETDEV_DEV
22756 +#define SET_NETDEV_DEV(net, pdev)
22757 +#endif
22758 +
22759 +#ifndef HAVE_FREE_NETDEV
22760 +#define free_netdev(x) kfree(x)
22761 +#endif
22762 +
22763 +#ifdef HAVE_POLL_CONTROLLER
22764 +#define CONFIG_NET_POLL_CONTROLLER
22765 +#endif
22766 +
22767 +#ifndef NETDEV_TX_OK
22768 +#define NETDEV_TX_OK 0
22769 +#endif
22770 +
22771 +#ifndef NETDEV_TX_BUSY
22772 +#define NETDEV_TX_BUSY 1
22773 +#endif
22774 +
22775 +#ifndef NETDEV_TX_LOCKED
22776 +#define NETDEV_TX_LOCKED -1
22777 +#endif
22778 +
22779 +#ifdef CONFIG_PCI_IOV
22780 +#define VMDQ_P(p)   ((p) + adapter->num_vfs)
22781 +#else
22782 +#define VMDQ_P(p)   (p)
22783 +#endif
22784 +
22785 +#ifndef SKB_DATAREF_SHIFT
22786 +/* if we do not have the infrastructure to detect if skb_header is cloned
22787 +   just return false in all cases */
22788 +#define skb_header_cloned(x) 0
22789 +#endif
22790 +
22791 +#ifndef NETIF_F_GSO
22792 +#define gso_size tso_size
22793 +#define gso_segs tso_segs
22794 +#endif
22795 +
22796 +#ifndef NETIF_F_GRO
22797 +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
22798 +               vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
22799 +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
22800 +#endif
22801 +
22802 +#ifndef NETIF_F_SCTP_CSUM
22803 +#define NETIF_F_SCTP_CSUM 0
22804 +#endif
22805 +
22806 +#ifndef IPPROTO_SCTP
22807 +#define IPPROTO_SCTP 132
22808 +#endif
22809 +
22810 +#ifndef CHECKSUM_PARTIAL
22811 +#define CHECKSUM_PARTIAL CHECKSUM_HW
22812 +#define CHECKSUM_COMPLETE CHECKSUM_HW
22813 +#endif
22814 +
22815 +#ifndef __read_mostly
22816 +#define __read_mostly
22817 +#endif
22818 +
22819 +#ifndef HAVE_NETIF_MSG
22820 +#define HAVE_NETIF_MSG 1
22821 +enum {
22822 +       NETIF_MSG_DRV           = 0x0001,
22823 +       NETIF_MSG_PROBE         = 0x0002,
22824 +       NETIF_MSG_LINK          = 0x0004,
22825 +       NETIF_MSG_TIMER         = 0x0008,
22826 +       NETIF_MSG_IFDOWN        = 0x0010,
22827 +       NETIF_MSG_IFUP          = 0x0020,
22828 +       NETIF_MSG_RX_ERR        = 0x0040,
22829 +       NETIF_MSG_TX_ERR        = 0x0080,
22830 +       NETIF_MSG_TX_QUEUED     = 0x0100,
22831 +       NETIF_MSG_INTR          = 0x0200,
22832 +       NETIF_MSG_TX_DONE       = 0x0400,
22833 +       NETIF_MSG_RX_STATUS     = 0x0800,
22834 +       NETIF_MSG_PKTDATA       = 0x1000,
22835 +       NETIF_MSG_HW            = 0x2000,
22836 +       NETIF_MSG_WOL           = 0x4000,
22837 +};
22838 +
22839 +#else
22840 +#define NETIF_MSG_HW   0x2000
22841 +#define NETIF_MSG_WOL  0x4000
22842 +#endif /* HAVE_NETIF_MSG */
22843 +
22844 +#ifndef MII_RESV1
22845 +#define MII_RESV1              0x17            /* Reserved...          */
22846 +#endif
22847 +
22848 +#ifndef unlikely
22849 +#define unlikely(_x) _x
22850 +#define likely(_x) _x
22851 +#endif
22852 +
22853 +#ifndef WARN_ON
22854 +#define WARN_ON(x)
22855 +#endif
22856 +
22857 +#ifndef PCI_DEVICE
22858 +#define PCI_DEVICE(vend,dev) \
22859 +       .vendor = (vend), .device = (dev), \
22860 +       .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
22861 +#endif
22862 +
22863 +#ifndef num_online_cpus
22864 +#define num_online_cpus() smp_num_cpus
22865 +#endif
22866 +
22867 +
22868 +#ifndef _LINUX_RANDOM_H
22869 +#include <linux/random.h>
22870 +#endif
22871 +
22872 +#ifndef DECLARE_BITMAP
22873 +#ifndef BITS_TO_LONGS
22874 +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
22875 +#endif
22876 +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
22877 +#endif
22878 +
22879 +#ifndef VLAN_HLEN
22880 +#define VLAN_HLEN 4
22881 +#endif
22882 +
22883 +#ifndef VLAN_ETH_HLEN
22884 +#define VLAN_ETH_HLEN 18
22885 +#endif
22886 +
22887 +#ifndef VLAN_ETH_FRAME_LEN
22888 +#define VLAN_ETH_FRAME_LEN 1518
22889 +#endif
22890 +
22891 +#ifndef DCA_GET_TAG_TWO_ARGS
22892 +#define dca3_get_tag(a,b) dca_get_tag(b)
22893 +#endif
22894 +
22895 +/*****************************************************************************/
22896 +/* Installations with ethtool version without eeprom, adapter id, or statistics
22897 + * support */
22898 +
22899 +#ifndef ETH_GSTRING_LEN
22900 +#define ETH_GSTRING_LEN 32
22901 +#endif
22902 +
22903 +#ifndef ETHTOOL_GSTATS
22904 +#define ETHTOOL_GSTATS 0x1d
22905 +#undef ethtool_drvinfo
22906 +#define ethtool_drvinfo k_ethtool_drvinfo
22907 +struct k_ethtool_drvinfo {
22908 +       u32 cmd;
22909 +       char driver[32];
22910 +       char version[32];
22911 +       char fw_version[32];
22912 +       char bus_info[32];
22913 +       char reserved1[32];
22914 +       char reserved2[16];
22915 +       u32 n_stats;
22916 +       u32 testinfo_len;
22917 +       u32 eedump_len;
22918 +       u32 regdump_len;
22919 +};
22920 +
22921 +struct ethtool_stats {
22922 +       u32 cmd;
22923 +       u32 n_stats;
22924 +       u64 data[0];
22925 +};
22926 +#endif /* ETHTOOL_GSTATS */
22927 +
22928 +#ifndef ETHTOOL_PHYS_ID
22929 +#define ETHTOOL_PHYS_ID 0x1c
22930 +#endif /* ETHTOOL_PHYS_ID */
22931 +
22932 +#ifndef ETHTOOL_GSTRINGS
22933 +#define ETHTOOL_GSTRINGS 0x1b
22934 +enum ethtool_stringset {
22935 +       ETH_SS_TEST             = 0,
22936 +       ETH_SS_STATS,
22937 +};
22938 +struct ethtool_gstrings {
22939 +       u32 cmd;            /* ETHTOOL_GSTRINGS */
22940 +       u32 string_set;     /* string set id e.c. ETH_SS_TEST, etc*/
22941 +       u32 len;            /* number of strings in the string set */
22942 +       u8 data[0];
22943 +};
22944 +#endif /* ETHTOOL_GSTRINGS */
22945 +
22946 +#ifndef ETHTOOL_TEST
22947 +#define ETHTOOL_TEST 0x1a
22948 +enum ethtool_test_flags {
22949 +       ETH_TEST_FL_OFFLINE     = (1 << 0),
22950 +       ETH_TEST_FL_FAILED      = (1 << 1),
22951 +};
22952 +struct ethtool_test {
22953 +       u32 cmd;
22954 +       u32 flags;
22955 +       u32 reserved;
22956 +       u32 len;
22957 +       u64 data[0];
22958 +};
22959 +#endif /* ETHTOOL_TEST */
22960 +
22961 +#ifndef ETHTOOL_GEEPROM
22962 +#define ETHTOOL_GEEPROM 0xb
22963 +#undef ETHTOOL_GREGS
22964 +struct ethtool_eeprom {
22965 +       u32 cmd;
22966 +       u32 magic;
22967 +       u32 offset;
22968 +       u32 len;
22969 +       u8 data[0];
22970 +};
22971 +
22972 +struct ethtool_value {
22973 +       u32 cmd;
22974 +       u32 data;
22975 +};
22976 +#endif /* ETHTOOL_GEEPROM */
22977 +
22978 +#ifndef ETHTOOL_GLINK
22979 +#define ETHTOOL_GLINK 0xa
22980 +#endif /* ETHTOOL_GLINK */
22981 +
22982 +#ifndef ETHTOOL_GREGS
22983 +#define ETHTOOL_GREGS          0x00000004 /* Get NIC registers */
22984 +#define ethtool_regs _kc_ethtool_regs
22985 +/* for passing big chunks of data */
22986 +struct _kc_ethtool_regs {
22987 +       u32 cmd;
22988 +       u32 version; /* driver-specific, indicates different chips/revs */
22989 +       u32 len; /* bytes */
22990 +       u8 data[0];
22991 +};
22992 +#endif /* ETHTOOL_GREGS */
22993 +
22994 +#ifndef ETHTOOL_GMSGLVL
22995 +#define ETHTOOL_GMSGLVL                0x00000007 /* Get driver message level */
22996 +#endif
22997 +#ifndef ETHTOOL_SMSGLVL
22998 +#define ETHTOOL_SMSGLVL                0x00000008 /* Set driver msg level, priv. */
22999 +#endif
23000 +#ifndef ETHTOOL_NWAY_RST
23001 +#define ETHTOOL_NWAY_RST       0x00000009 /* Restart autonegotiation, priv */
23002 +#endif
23003 +#ifndef ETHTOOL_GLINK
23004 +#define ETHTOOL_GLINK          0x0000000a /* Get link status */
23005 +#endif
23006 +#ifndef ETHTOOL_GEEPROM
23007 +#define ETHTOOL_GEEPROM                0x0000000b /* Get EEPROM data */
23008 +#endif
23009 +#ifndef ETHTOOL_SEEPROM
23010 +#define ETHTOOL_SEEPROM                0x0000000c /* Set EEPROM data */
23011 +#endif
23012 +#ifndef ETHTOOL_GCOALESCE
23013 +#define ETHTOOL_GCOALESCE      0x0000000e /* Get coalesce config */
23014 +/* for configuring coalescing parameters of chip */
23015 +#define ethtool_coalesce _kc_ethtool_coalesce
23016 +struct _kc_ethtool_coalesce {
23017 +       u32     cmd;    /* ETHTOOL_{G,S}COALESCE */
23018 +
23019 +       /* How many usecs to delay an RX interrupt after
23020 +        * a packet arrives.  If 0, only rx_max_coalesced_frames
23021 +        * is used.
23022 +        */
23023 +       u32     rx_coalesce_usecs;
23024 +
23025 +       /* How many packets to delay an RX interrupt after
23026 +        * a packet arrives.  If 0, only rx_coalesce_usecs is
23027 +        * used.  It is illegal to set both usecs and max frames
23028 +        * to zero as this would cause RX interrupts to never be
23029 +        * generated.
23030 +        */
23031 +       u32     rx_max_coalesced_frames;
23032 +
23033 +       /* Same as above two parameters, except that these values
23034 +        * apply while an IRQ is being serviced by the host.  Not
23035 +        * all cards support this feature and the values are ignored
23036 +        * in that case.
23037 +        */
23038 +       u32     rx_coalesce_usecs_irq;
23039 +       u32     rx_max_coalesced_frames_irq;
23040 +
23041 +       /* How many usecs to delay a TX interrupt after
23042 +        * a packet is sent.  If 0, only tx_max_coalesced_frames
23043 +        * is used.
23044 +        */
23045 +       u32     tx_coalesce_usecs;
23046 +
23047 +       /* How many packets to delay a TX interrupt after
23048 +        * a packet is sent.  If 0, only tx_coalesce_usecs is
23049 +        * used.  It is illegal to set both usecs and max frames
23050 +        * to zero as this would cause TX interrupts to never be
23051 +        * generated.
23052 +        */
23053 +       u32     tx_max_coalesced_frames;
23054 +
23055 +       /* Same as above two parameters, except that these values
23056 +        * apply while an IRQ is being serviced by the host.  Not
23057 +        * all cards support this feature and the values are ignored
23058 +        * in that case.
23059 +        */
23060 +       u32     tx_coalesce_usecs_irq;
23061 +       u32     tx_max_coalesced_frames_irq;
23062 +
23063 +       /* How many usecs to delay in-memory statistics
23064 +        * block updates.  Some drivers do not have an in-memory
23065 +        * statistic block, and in such cases this value is ignored.
23066 +        * This value must not be zero.
23067 +        */
23068 +       u32     stats_block_coalesce_usecs;
23069 +
23070 +       /* Adaptive RX/TX coalescing is an algorithm implemented by
23071 +        * some drivers to improve latency under low packet rates and
23072 +        * improve throughput under high packet rates.  Some drivers
23073 +        * only implement one of RX or TX adaptive coalescing.  Anything
23074 +        * not implemented by the driver causes these values to be
23075 +        * silently ignored.
23076 +        */
23077 +       u32     use_adaptive_rx_coalesce;
23078 +       u32     use_adaptive_tx_coalesce;
23079 +
23080 +       /* When the packet rate (measured in packets per second)
23081 +        * is below pkt_rate_low, the {rx,tx}_*_low parameters are
23082 +        * used.
23083 +        */
23084 +       u32     pkt_rate_low;
23085 +       u32     rx_coalesce_usecs_low;
23086 +       u32     rx_max_coalesced_frames_low;
23087 +       u32     tx_coalesce_usecs_low;
23088 +       u32     tx_max_coalesced_frames_low;
23089 +
23090 +       /* When the packet rate is below pkt_rate_high but above
23091 +        * pkt_rate_low (both measured in packets per second) the
23092 +        * normal {rx,tx}_* coalescing parameters are used.
23093 +        */
23094 +
23095 +       /* When the packet rate is (measured in packets per second)
23096 +        * is above pkt_rate_high, the {rx,tx}_*_high parameters are
23097 +        * used.
23098 +        */
23099 +       u32     pkt_rate_high;
23100 +       u32     rx_coalesce_usecs_high;
23101 +       u32     rx_max_coalesced_frames_high;
23102 +       u32     tx_coalesce_usecs_high;
23103 +       u32     tx_max_coalesced_frames_high;
23104 +
23105 +       /* How often to do adaptive coalescing packet rate sampling,
23106 +        * measured in seconds.  Must not be zero.
23107 +        */
23108 +       u32     rate_sample_interval;
23109 +};
23110 +#endif /* ETHTOOL_GCOALESCE */
23111 +
23112 +#ifndef ETHTOOL_SCOALESCE
23113 +#define ETHTOOL_SCOALESCE      0x0000000f /* Set coalesce config. */
23114 +#endif
23115 +#ifndef ETHTOOL_GRINGPARAM
23116 +#define ETHTOOL_GRINGPARAM     0x00000010 /* Get ring parameters */
23117 +/* for configuring RX/TX ring parameters */
23118 +#define ethtool_ringparam _kc_ethtool_ringparam
23119 +struct _kc_ethtool_ringparam {
23120 +       u32     cmd;    /* ETHTOOL_{G,S}RINGPARAM */
23121 +
23122 +       /* Read only attributes.  These indicate the maximum number
23123 +        * of pending RX/TX ring entries the driver will allow the
23124 +        * user to set.
23125 +        */
23126 +       u32     rx_max_pending;
23127 +       u32     rx_mini_max_pending;
23128 +       u32     rx_jumbo_max_pending;
23129 +       u32     tx_max_pending;
23130 +
23131 +       /* Values changeable by the user.  The valid values are
23132 +        * in the range 1 to the "*_max_pending" counterpart above.
23133 +        */
23134 +       u32     rx_pending;
23135 +       u32     rx_mini_pending;
23136 +       u32     rx_jumbo_pending;
23137 +       u32     tx_pending;
23138 +};
23139 +#endif /* ETHTOOL_GRINGPARAM */
23140 +
23141 +#ifndef ETHTOOL_SRINGPARAM
23142 +#define ETHTOOL_SRINGPARAM     0x00000011 /* Set ring parameters, priv. */
23143 +#endif
23144 +#ifndef ETHTOOL_GPAUSEPARAM
23145 +#define ETHTOOL_GPAUSEPARAM    0x00000012 /* Get pause parameters */
23146 +/* for configuring link flow control parameters */
23147 +#define ethtool_pauseparam _kc_ethtool_pauseparam
23148 +struct _kc_ethtool_pauseparam {
23149 +       u32     cmd;    /* ETHTOOL_{G,S}PAUSEPARAM */
23150 +
23151 +       /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
23152 +        * being true) the user may set 'autoneg' here non-zero to have the
23153 +        * pause parameters be auto-negotiated too.  In such a case, the
23154 +        * {rx,tx}_pause values below determine what capabilities are
23155 +        * advertised.
23156 +        *
23157 +        * If 'autoneg' is zero or the link is not being auto-negotiated,
23158 +        * then {rx,tx}_pause force the driver to use/not-use pause
23159 +        * flow control.
23160 +        */
23161 +       u32     autoneg;
23162 +       u32     rx_pause;
23163 +       u32     tx_pause;
23164 +};
23165 +#endif /* ETHTOOL_GPAUSEPARAM */
23166 +
23167 +#ifndef ETHTOOL_SPAUSEPARAM
23168 +#define ETHTOOL_SPAUSEPARAM    0x00000013 /* Set pause parameters. */
23169 +#endif
23170 +#ifndef ETHTOOL_GRXCSUM
23171 +#define ETHTOOL_GRXCSUM                0x00000014 /* Get RX hw csum enable (ethtool_value) */
23172 +#endif
23173 +#ifndef ETHTOOL_SRXCSUM
23174 +#define ETHTOOL_SRXCSUM                0x00000015 /* Set RX hw csum enable (ethtool_value) */
23175 +#endif
23176 +#ifndef ETHTOOL_GTXCSUM
23177 +#define ETHTOOL_GTXCSUM                0x00000016 /* Get TX hw csum enable (ethtool_value) */
23178 +#endif
23179 +#ifndef ETHTOOL_STXCSUM
23180 +#define ETHTOOL_STXCSUM                0x00000017 /* Set TX hw csum enable (ethtool_value) */
23181 +#endif
23182 +#ifndef ETHTOOL_GSG
23183 +#define ETHTOOL_GSG            0x00000018 /* Get scatter-gather enable
23184 +                                           * (ethtool_value) */
23185 +#endif
23186 +#ifndef ETHTOOL_SSG
23187 +#define ETHTOOL_SSG            0x00000019 /* Set scatter-gather enable
23188 +                                           * (ethtool_value). */
23189 +#endif
23190 +#ifndef ETHTOOL_TEST
23191 +#define ETHTOOL_TEST           0x0000001a /* execute NIC self-test, priv. */
23192 +#endif
23193 +#ifndef ETHTOOL_GSTRINGS
23194 +#define ETHTOOL_GSTRINGS       0x0000001b /* get specified string set */
23195 +#endif
23196 +#ifndef ETHTOOL_PHYS_ID
23197 +#define ETHTOOL_PHYS_ID                0x0000001c /* identify the NIC */
23198 +#endif
23199 +#ifndef ETHTOOL_GSTATS
23200 +#define ETHTOOL_GSTATS         0x0000001d /* get NIC-specific statistics */
23201 +#endif
23202 +#ifndef ETHTOOL_GTSO
23203 +#define ETHTOOL_GTSO           0x0000001e /* Get TSO enable (ethtool_value) */
23204 +#endif
23205 +#ifndef ETHTOOL_STSO
23206 +#define ETHTOOL_STSO           0x0000001f /* Set TSO enable (ethtool_value) */
23207 +#endif
23208 +
23209 +#ifndef ETHTOOL_BUSINFO_LEN
23210 +#define ETHTOOL_BUSINFO_LEN    32
23211 +#endif
23212 +
23213 +/*****************************************************************************/
23214 +/* 2.4.3 => 2.4.0 */
23215 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
23216 +
23217 +/**************************************/
23218 +/* PCI DRIVER API */
23219 +
23220 +#ifndef pci_set_dma_mask
23221 +#define pci_set_dma_mask _kc_pci_set_dma_mask
23222 +extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
23223 +#endif
23224 +
23225 +#ifndef pci_request_regions
23226 +#define pci_request_regions _kc_pci_request_regions
23227 +extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
23228 +#endif
23229 +
23230 +#ifndef pci_release_regions
23231 +#define pci_release_regions _kc_pci_release_regions
23232 +extern void _kc_pci_release_regions(struct pci_dev *pdev);
23233 +#endif
23234 +
23235 +/**************************************/
23236 +/* NETWORK DRIVER API */
23237 +
23238 +#ifndef alloc_etherdev
23239 +#define alloc_etherdev _kc_alloc_etherdev
23240 +extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
23241 +#endif
23242 +
23243 +#ifndef is_valid_ether_addr
23244 +#define is_valid_ether_addr _kc_is_valid_ether_addr
23245 +extern int _kc_is_valid_ether_addr(u8 *addr);
23246 +#endif
23247 +
23248 +/**************************************/
23249 +/* MISCELLANEOUS */
23250 +
23251 +#ifndef INIT_TQUEUE
23252 +#define INIT_TQUEUE(_tq, _routine, _data)              \
23253 +       do {                                            \
23254 +               INIT_LIST_HEAD(&(_tq)->list);           \
23255 +               (_tq)->sync = 0;                        \
23256 +               (_tq)->routine = _routine;              \
23257 +               (_tq)->data = _data;                    \
23258 +       } while (0)
23259 +#endif
23260 +
23261 +#endif /* 2.4.3 => 2.4.0 */
23262 +
23263 +/*****************************************************************************/
23264 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
23265 +/* Generic MII registers. */
23266 +#define MII_BMCR            0x00        /* Basic mode control register */
23267 +#define MII_BMSR            0x01        /* Basic mode status register  */
23268 +#define MII_PHYSID1         0x02        /* PHYS ID 1                   */
23269 +#define MII_PHYSID2         0x03        /* PHYS ID 2                   */
23270 +#define MII_ADVERTISE       0x04        /* Advertisement control reg   */
23271 +#define MII_LPA             0x05        /* Link partner ability reg    */
23272 +#define MII_EXPANSION       0x06        /* Expansion register          */
23273 +/* Basic mode control register. */
23274 +#define BMCR_FULLDPLX           0x0100  /* Full duplex                 */
23275 +#define BMCR_ANENABLE           0x1000  /* Enable auto negotiation     */
23276 +/* Basic mode status register. */
23277 +#define BMSR_ERCAP              0x0001  /* Ext-reg capability          */
23278 +#define BMSR_ANEGCAPABLE        0x0008  /* Able to do auto-negotiation */
23279 +#define BMSR_10HALF             0x0800  /* Can do 10mbps, half-duplex  */
23280 +#define BMSR_10FULL             0x1000  /* Can do 10mbps, full-duplex  */
23281 +#define BMSR_100HALF            0x2000  /* Can do 100mbps, half-duplex */
23282 +#define BMSR_100FULL            0x4000  /* Can do 100mbps, full-duplex */
23283 +/* Advertisement control register. */
23284 +#define ADVERTISE_CSMA          0x0001  /* Only selector supported     */
23285 +#define ADVERTISE_10HALF        0x0020  /* Try for 10mbps half-duplex  */
23286 +#define ADVERTISE_10FULL        0x0040  /* Try for 10mbps full-duplex  */
23287 +#define ADVERTISE_100HALF       0x0080  /* Try for 100mbps half-duplex */
23288 +#define ADVERTISE_100FULL       0x0100  /* Try for 100mbps full-duplex */
23289 +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
23290 +                       ADVERTISE_100HALF | ADVERTISE_100FULL)
23291 +/* Expansion register for auto-negotiation. */
23292 +#define EXPANSION_ENABLENPAGE   0x0004  /* This enables npage words    */
23293 +#endif
23294 +
23295 +/*****************************************************************************/
23296 +/* 2.4.6 => 2.4.3 */
23297 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
23298 +
23299 +#ifndef pci_set_power_state
23300 +#define pci_set_power_state _kc_pci_set_power_state
23301 +extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
23302 +#endif
23303 +
23304 +#ifndef pci_enable_wake
23305 +#define pci_enable_wake _kc_pci_enable_wake
23306 +extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
23307 +#endif
23308 +
23309 +#ifndef pci_disable_device
23310 +#define pci_disable_device _kc_pci_disable_device
23311 +extern void _kc_pci_disable_device(struct pci_dev *pdev);
23312 +#endif
23313 +
23314 +/* PCI PM entry point syntax changed, so don't support suspend/resume */
23315 +#undef CONFIG_PM
23316 +
23317 +#endif /* 2.4.6 => 2.4.3 */
23318 +
23319 +#ifndef HAVE_PCI_SET_MWI
23320 +#define pci_set_mwi(X) pci_write_config_word(X, \
23321 +                              PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
23322 +                              PCI_COMMAND_INVALIDATE);
23323 +#define pci_clear_mwi(X) pci_write_config_word(X, \
23324 +                              PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
23325 +                              ~PCI_COMMAND_INVALIDATE);
23326 +#endif
23327 +
23328 +/*****************************************************************************/
23329 +/* 2.4.10 => 2.4.9 */
23330 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
23331 +
23332 +/**************************************/
23333 +/* MODULE API */
23334 +
23335 +#ifndef MODULE_LICENSE
23336 +       #define MODULE_LICENSE(X)
23337 +#endif
23338 +
23339 +/**************************************/
23340 +/* OTHER */
23341 +
23342 +#undef min
23343 +#define min(x,y) ({ \
23344 +       const typeof(x) _x = (x);       \
23345 +       const typeof(y) _y = (y);       \
23346 +       (void) (&_x == &_y);            \
23347 +       _x < _y ? _x : _y; })
23348 +
23349 +#undef max
23350 +#define max(x,y) ({ \
23351 +       const typeof(x) _x = (x);       \
23352 +       const typeof(y) _y = (y);       \
23353 +       (void) (&_x == &_y);            \
23354 +       _x > _y ? _x : _y; })
23355 +
23356 +#define min_t(type,x,y) ({ \
23357 +       type _x = (x); \
23358 +       type _y = (y); \
23359 +       _x < _y ? _x : _y; })
23360 +
23361 +#define max_t(type,x,y) ({ \
23362 +       type _x = (x); \
23363 +       type _y = (y); \
23364 +       _x > _y ? _x : _y; })
23365 +
23366 +#ifndef list_for_each_safe
23367 +#define list_for_each_safe(pos, n, head) \
23368 +       for (pos = (head)->next, n = pos->next; pos != (head); \
23369 +               pos = n, n = pos->next)
23370 +#endif
23371 +
23372 +#endif /* 2.4.10 -> 2.4.6 */
23373 +
23374 +
23375 +/*****************************************************************************/
23376 +/* 2.4.13 => 2.4.10 */
23377 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
23378 +
23379 +/**************************************/
23380 +/* PCI DMA MAPPING */
23381 +
23382 +#ifndef virt_to_page
23383 +       #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
23384 +#endif
23385 +
23386 +#ifndef pci_map_page
23387 +#define pci_map_page _kc_pci_map_page
23388 +extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
23389 +#endif
23390 +
23391 +#ifndef pci_unmap_page
23392 +#define pci_unmap_page _kc_pci_unmap_page
23393 +extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
23394 +#endif
23395 +
23396 +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
23397 +
23398 +#undef DMA_32BIT_MASK
23399 +#define DMA_32BIT_MASK 0xffffffff
23400 +#undef DMA_64BIT_MASK
23401 +#define DMA_64BIT_MASK 0xffffffff
23402 +
23403 +/**************************************/
23404 +/* OTHER */
23405 +
23406 +#ifndef cpu_relax
23407 +#define cpu_relax()    rep_nop()
23408 +#endif
23409 +
23410 +struct vlan_ethhdr {
23411 +       unsigned char h_dest[ETH_ALEN];
23412 +       unsigned char h_source[ETH_ALEN];
23413 +       unsigned short h_vlan_proto;
23414 +       unsigned short h_vlan_TCI;
23415 +       unsigned short h_vlan_encapsulated_proto;
23416 +};
23417 +#endif /* 2.4.13 => 2.4.10 */
23418 +
23419 +/*****************************************************************************/
23420 +/* 2.4.17 => 2.4.12 */
23421 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
23422 +
23423 +#ifndef __devexit_p
23424 +       #define __devexit_p(x) &(x)
23425 +#endif
23426 +
23427 +#endif /* 2.4.17 => 2.4.13 */
23428 +
23429 +/*****************************************************************************/
23430 +/* 2.4.20 => 2.4.19 */
23431 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
23432 +
23433 +/* we won't support NAPI on less than 2.4.20 */
23434 +#ifdef NAPI
23435 +#undef NAPI
23436 +#endif
23437 +
23438 +#endif /* 2.4.20 => 2.4.19 */
23439 +
23440 +/*****************************************************************************/
23441 +/* < 2.4.21 */
23442 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) )
23443 +#define skb_pad(x,y) _kc_skb_pad(x, y)
23444 +struct sk_buff * _kc_skb_pad(struct sk_buff *skb, int pad);
23445 +#endif  /* < 2.4.21 */
23446 +
23447 +/*****************************************************************************/
23448 +/* 2.4.22 => 2.4.17 */
23449 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
23450 +#define pci_name(x)    ((x)->slot_name)
23451 +#endif
23452 +
23453 +/*****************************************************************************/
23454 +/*****************************************************************************/
23455 +/* 2.4.23 => 2.4.22 */
23456 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
23457 +/*****************************************************************************/
23458 +#ifdef NAPI
23459 +#ifndef netif_poll_disable
23460 +#define netif_poll_disable(x) _kc_netif_poll_disable(x)
23461 +static inline void _kc_netif_poll_disable(struct net_device *netdev)
23462 +{
23463 +       while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
23464 +               /* No hurry */
23465 +               current->state = TASK_INTERRUPTIBLE;
23466 +               schedule_timeout(1);
23467 +       }
23468 +}
23469 +#endif
23470 +#ifndef netif_poll_enable
23471 +#define netif_poll_enable(x) _kc_netif_poll_enable(x)
23472 +static inline void _kc_netif_poll_enable(struct net_device *netdev)
23473 +{
23474 +       clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
23475 +}
23476 +#endif
23477 +#endif /* NAPI */
23478 +#ifndef netif_tx_disable
23479 +#define netif_tx_disable(x) _kc_netif_tx_disable(x)
23480 +static inline void _kc_netif_tx_disable(struct net_device *dev)
23481 +{
23482 +       spin_lock_bh(&dev->xmit_lock);
23483 +       netif_stop_queue(dev);
23484 +       spin_unlock_bh(&dev->xmit_lock);
23485 +}
23486 +#endif
23487 +#endif /* 2.4.23 => 2.4.22 */
23488 +
23489 +/*****************************************************************************/
23490 +/* 2.6.4 => 2.6.0 */
23491 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
23492 +    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
23493 +      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
23494 +#define ETHTOOL_OPS_COMPAT
23495 +#endif /* 2.6.4 => 2.6.0 */
23496 +
23497 +/*****************************************************************************/
23498 +/* 2.5.71 => 2.4.x */
23499 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
23500 +#define sk_protocol protocol
23501 +#define pci_get_device pci_find_device
23502 +#endif /* 2.5.70 => 2.4.x */
23503 +
23504 +/*****************************************************************************/
23505 +/* < 2.4.27 or 2.6.0 <= 2.6.5 */
23506 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
23507 +    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
23508 +      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
23509 +
23510 +#ifndef netif_msg_init
23511 +#define netif_msg_init _kc_netif_msg_init
23512 +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
23513 +{
23514 +       /* use default */
23515 +       if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
23516 +               return default_msg_enable_bits;
23517 +       if (debug_value == 0) /* no output */
23518 +               return 0;
23519 +       /* set low N bits */
23520 +       return (1 << debug_value) -1;
23521 +}
23522 +#endif
23523 +
23524 +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
23525 +/*****************************************************************************/
23526 +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
23527 +     (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
23528 +      ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
23529 +#define netdev_priv(x) x->priv
23530 +#endif
23531 +
23532 +/*****************************************************************************/
23533 +/* <= 2.5.0 */
23534 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
23535 +#undef pci_register_driver
23536 +#define pci_register_driver pci_module_init
23537 +
23538 +#define dev_err(__unused_dev, format, arg...)            \
23539 +       printk(KERN_ERR "%s: " format, pci_name(pdev) , ## arg)
23540 +#define dev_warn(__unused_dev, format, arg...)            \
23541 +       printk(KERN_WARNING "%s: " format, pci_name(pdev) , ## arg)
23542 +
23543 +/* hlist_* code - double linked lists */
23544 +struct hlist_head {
23545 +       struct hlist_node *first;
23546 +};
23547 +
23548 +struct hlist_node {
23549 +       struct hlist_node *next, **pprev;
23550 +};
23551 +
23552 +static inline void __hlist_del(struct hlist_node *n)
23553 +{
23554 +       struct hlist_node *next = n->next;
23555 +       struct hlist_node **pprev = n->pprev;
23556 +       *pprev = next;
23557 +       if (next)
23558 +       next->pprev = pprev;
23559 +}
23560 +
23561 +static inline void hlist_del(struct hlist_node *n)
23562 +{
23563 +       __hlist_del(n);
23564 +       n->next = NULL;
23565 +       n->pprev = NULL;
23566 +}
23567 +
23568 +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
23569 +{
23570 +       struct hlist_node *first = h->first;
23571 +       n->next = first;
23572 +       if (first)
23573 +               first->pprev = &n->next;
23574 +       h->first = n;
23575 +       n->pprev = &h->first;
23576 +}
23577 +
23578 +static inline int hlist_empty(const struct hlist_head *h)
23579 +{
23580 +       return !h->first;
23581 +}
23582 +#define HLIST_HEAD_INIT { .first = NULL }
23583 +#define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
23584 +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
23585 +static inline void INIT_HLIST_NODE(struct hlist_node *h)
23586 +{
23587 +       h->next = NULL;
23588 +       h->pprev = NULL;
23589 +}
23590 +#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
23591 +
23592 +#define hlist_for_each_entry(tpos, pos, head, member)                    \
23593 +       for (pos = (head)->first;                                        \
23594 +            pos && ({ prefetch(pos->next); 1;}) &&                      \
23595 +               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
23596 +            pos = pos->next)
23597 +
23598 +#define hlist_for_each_entry_safe(tpos, pos, n, head, member)            \
23599 +       for (pos = (head)->first;                                        \
23600 +            pos && ({ n = pos->next; 1; }) &&                           \
23601 +               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
23602 +            pos = n)
23603 +
23604 +/* we ignore GFP here */
23605 +#define dma_alloc_coherent(dv, sz, dma, gfp) \
23606 +       pci_alloc_consistent(pdev, (sz), (dma))
23607 +#define dma_free_coherent(dv, sz, addr, dma_addr) \
23608 +       pci_free_consistent(pdev, (sz), (addr), (dma_addr))
23609 +
23610 +#ifndef might_sleep
23611 +#define might_sleep()
23612 +#endif
23613 +
23614 +#endif /* <= 2.5.0 */
23615 +
23616 +/*****************************************************************************/
23617 +/* 2.5.28 => 2.4.23 */
23618 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
23619 +
23620 +static inline void _kc_synchronize_irq(void)
23621 +{
23622 +       synchronize_irq();
23623 +}
23624 +#undef synchronize_irq
23625 +#define synchronize_irq(X) _kc_synchronize_irq()
23626 +
23627 +#include <linux/tqueue.h>
23628 +#define work_struct tq_struct
23629 +#undef INIT_WORK
23630 +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
23631 +#undef container_of
23632 +#define container_of list_entry
23633 +#define schedule_work schedule_task
23634 +#define flush_scheduled_work flush_scheduled_tasks
23635 +#define cancel_work_sync(x) flush_scheduled_work()
23636 +
23637 +#endif /* 2.5.28 => 2.4.17 */
23638 +
23639 +/*****************************************************************************/
23640 +/* 2.6.0 => 2.5.28 */
23641 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
23642 +#define MODULE_INFO(version, _version)
23643 +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
23644 +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
23645 +#endif
23646 +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
23647 +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
23648 +#endif
23649 +
23650 +#define pci_set_consistent_dma_mask(dev,mask) 1
23651 +
23652 +#undef dev_put
23653 +#define dev_put(dev) __dev_put(dev)
23654 +
23655 +#ifndef skb_fill_page_desc
23656 +#define skb_fill_page_desc _kc_skb_fill_page_desc
23657 +extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
23658 +#endif
23659 +
23660 +#undef ALIGN
23661 +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
23662 +
23663 +#ifndef page_count
23664 +#define page_count(p) atomic_read(&(p)->count)
23665 +#endif
23666 +
23667 +/* find_first_bit and find_next bit are not defined for most
23668 + * 2.4 kernels (except for the redhat 2.4.21 kernels
23669 + */
23670 +#include <linux/bitops.h>
23671 +#define BITOP_WORD(nr)          ((nr) / BITS_PER_LONG)
23672 +#undef find_next_bit
23673 +#define find_next_bit _kc_find_next_bit
23674 +extern unsigned long _kc_find_next_bit(const unsigned long *addr,
23675 +                                       unsigned long size,
23676 +                                       unsigned long offset);
23677 +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
23678 +
23679 +#endif /* 2.6.0 => 2.5.28 */
23680 +
23681 +/*****************************************************************************/
23682 +/* 2.6.4 => 2.6.0 */
23683 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
23684 +#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
23685 +#endif /* 2.6.4 => 2.6.0 */
23686 +
23687 +/*****************************************************************************/
23688 +/* 2.6.5 => 2.6.0 */
23689 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
23690 +#define pci_dma_sync_single_for_cpu    pci_dma_sync_single
23691 +#define pci_dma_sync_single_for_device pci_dma_sync_single_for_cpu
23692 +#endif /* 2.6.5 => 2.6.0 */
23693 +
23694 +/*****************************************************************************/
23695 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
23696 +/* taken from 2.6 include/linux/bitmap.h */
23697 +#undef bitmap_zero
23698 +#define bitmap_zero _kc_bitmap_zero
23699 +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
23700 +{
23701 +        if (nbits <= BITS_PER_LONG)
23702 +                *dst = 0UL;
23703 +        else {
23704 +                int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
23705 +                memset(dst, 0, len);
23706 +        }
23707 +}
23708 +#define random_ether_addr _kc_random_ether_addr
23709 +static inline void _kc_random_ether_addr(u8 *addr)
23710 +{
23711 +        get_random_bytes(addr, ETH_ALEN);
23712 +        addr[0] &= 0xfe; /* clear multicast */
23713 +        addr[0] |= 0x02; /* set local assignment */
23714 +} 
23715 +#endif /* < 2.6.6 */
23716 +
23717 +/*****************************************************************************/
23718 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
23719 +#undef if_mii
23720 +#define if_mii _kc_if_mii
23721 +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
23722 +{
23723 +       return (struct mii_ioctl_data *) &rq->ifr_ifru;
23724 +}
23725 +#endif /* < 2.6.7 */
23726 +
23727 +/*****************************************************************************/
23728 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
23729 +#ifndef PCI_EXP_DEVCTL
23730 +#define PCI_EXP_DEVCTL 8
23731 +#endif
23732 +#ifndef PCI_EXP_DEVCTL_CERE
23733 +#define PCI_EXP_DEVCTL_CERE 0x0001
23734 +#endif
23735 +#define msleep(x)      do { set_current_state(TASK_UNINTERRUPTIBLE); \
23736 +                               schedule_timeout((x * HZ)/1000 + 2); \
23737 +                       } while (0)
23738 +
23739 +#endif /* < 2.6.8 */
23740 +
23741 +/*****************************************************************************/
23742 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
23743 +#include <net/dsfield.h>
23744 +#define __iomem
23745 +
23746 +#ifndef kcalloc
23747 +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
23748 +extern void *_kc_kzalloc(size_t size, int flags);
23749 +#endif
23750 +#define MSEC_PER_SEC    1000L
23751 +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
23752 +{
23753 +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
23754 +       return (MSEC_PER_SEC / HZ) * j;
23755 +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
23756 +       return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
23757 +#else
23758 +       return (j * MSEC_PER_SEC) / HZ;
23759 +#endif
23760 +}
23761 +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
23762 +{
23763 +       if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
23764 +               return MAX_JIFFY_OFFSET;
23765 +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
23766 +       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
23767 +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
23768 +       return m * (HZ / MSEC_PER_SEC);
23769 +#else
23770 +       return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
23771 +#endif
23772 +}
23773 +
23774 +#define msleep_interruptible _kc_msleep_interruptible
23775 +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
23776 +{
23777 +       unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
23778 +
23779 +       while (timeout && !signal_pending(current)) {
23780 +               __set_current_state(TASK_INTERRUPTIBLE);
23781 +               timeout = schedule_timeout(timeout);
23782 +       }
23783 +       return _kc_jiffies_to_msecs(timeout);
23784 +}
23785 +
23786 +/* Basic mode control register. */
23787 +#define BMCR_SPEED1000         0x0040  /* MSB of Speed (1000)         */
23788 +
23789 +#ifndef __le16
23790 +#define __le16 u16
23791 +#endif
23792 +#ifndef __le32
23793 +#define __le32 u32
23794 +#endif
23795 +#ifndef __le64
23796 +#define __le64 u64
23797 +#endif
23798 +#ifndef __be16
23799 +#define __be16 u16
23800 +#endif
23801 +
23802 +#ifdef pci_dma_mapping_error
23803 +#undef pci_dma_mapping_error
23804 +#endif
23805 +#define pci_dma_mapping_error _kc_pci_dma_mapping_error
23806 +static inline int _kc_pci_dma_mapping_error(struct pci_dev *pdev,
23807 +                                            dma_addr_t dma_addr)
23808 +{
23809 +       return dma_addr == 0;
23810 +}
23811 +#endif /* < 2.6.9 */
23812 +
23813 +/*****************************************************************************/
23814 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
23815 +#ifdef module_param_array_named
23816 +#undef module_param_array_named
23817 +#define module_param_array_named(name, array, type, nump, perm)          \
23818 +       static struct kparam_array __param_arr_##name                    \
23819 +       = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
23820 +           sizeof(array[0]), array };                                   \
23821 +       module_param_call(name, param_array_set, param_array_get,        \
23822 +                         &__param_arr_##name, perm)
23823 +#endif /* module_param_array_named */
23824 +#endif /* < 2.6.10 */
23825 +
23826 +/*****************************************************************************/
23827 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
23828 +#define PCI_D0      0
23829 +#define PCI_D1      1
23830 +#define PCI_D2      2
23831 +#define PCI_D3hot   3
23832 +#define PCI_D3cold  4
23833 +typedef int pci_power_t;
23834 +#define pci_choose_state(pdev,state) state
23835 +#define PMSG_SUSPEND 3
23836 +#define PCI_EXP_LNKCTL 16
23837 +
23838 +#undef NETIF_F_LLTX
23839 +
23840 +#ifndef ARCH_HAS_PREFETCH
23841 +#define prefetch(X)
23842 +#endif
23843 +
23844 +#ifndef NET_IP_ALIGN
23845 +#define NET_IP_ALIGN 2
23846 +#endif
23847 +
23848 +#define KC_USEC_PER_SEC        1000000L
23849 +#define usecs_to_jiffies _kc_usecs_to_jiffies
23850 +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
23851 +{
23852 +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
23853 +       return (KC_USEC_PER_SEC / HZ) * j;
23854 +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
23855 +       return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
23856 +#else
23857 +       return (j * KC_USEC_PER_SEC) / HZ;
23858 +#endif
23859 +}
23860 +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
23861 +{
23862 +       if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
23863 +               return MAX_JIFFY_OFFSET;
23864 +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
23865 +       return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
23866 +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
23867 +       return m * (HZ / KC_USEC_PER_SEC);
23868 +#else
23869 +       return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
23870 +#endif
23871 +}
23872 +#endif /* < 2.6.11 */
23873 +
23874 +/*****************************************************************************/
23875 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
23876 +#include <linux/reboot.h>
23877 +#define USE_REBOOT_NOTIFIER
23878 +
23879 +/* Generic MII registers. */
23880 +#define MII_CTRL1000        0x09        /* 1000BASE-T control          */
23881 +#define MII_STAT1000        0x0a        /* 1000BASE-T status           */
23882 +/* Advertisement control register. */
23883 +#define ADVERTISE_PAUSE_CAP     0x0400  /* Try for pause               */
23884 +#define ADVERTISE_PAUSE_ASYM    0x0800  /* Try for asymmetric pause     */
23885 +/* 1000BASE-T Control register */
23886 +#define ADVERTISE_1000FULL      0x0200  /* Advertise 1000BASE-T full duplex */
23887 +#endif /* < 2.6.12 */
23888 +
23889 +/*****************************************************************************/
23890 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
23891 +#define pm_message_t u32
23892 +#ifndef kzalloc
23893 +#define kzalloc _kc_kzalloc
23894 +extern void *_kc_kzalloc(size_t size, int flags);
23895 +#endif
23896 +
23897 +/* Generic MII registers. */
23898 +#define MII_ESTATUS        0x0f        /* Extended Status */
23899 +/* Basic mode status register. */
23900 +#define BMSR_ESTATEN           0x0100  /* Extended Status in R15 */
23901 +/* Extended status register. */
23902 +#define ESTATUS_1000_TFULL     0x2000  /* Can do 1000BT Full */
23903 +#define ESTATUS_1000_THALF     0x1000  /* Can do 1000BT Half */
23904 +#endif /* < 2.6.14 */
23905 +
23906 +/*****************************************************************************/
23907 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
23908 +#ifndef device_can_wakeup
23909 +#define device_can_wakeup(dev) (1)
23910 +#endif
23911 +#ifndef device_set_wakeup_enable
23912 +#define device_set_wakeup_enable(dev, val)     do{}while(0)
23913 +#endif
23914 +#ifndef device_init_wakeup
23915 +#define device_init_wakeup(dev,val) do {} while (0)
23916 +#endif
23917 +#endif /* < 2.6.15 */
23918 +
23919 +/*****************************************************************************/
23920 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
23921 +#undef DEFINE_MUTEX
23922 +#define DEFINE_MUTEX(x)        DECLARE_MUTEX(x)
23923 +#define mutex_lock(x)  down_interruptible(x)
23924 +#define mutex_unlock(x)        up(x)
23925 +
23926 +#undef HAVE_PCI_ERS
23927 +#else /* 2.6.16 and above */
23928 +#undef HAVE_PCI_ERS
23929 +#define HAVE_PCI_ERS
23930 +#endif /* < 2.6.16 */
23931 +
23932 +/*****************************************************************************/
23933 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
23934 +
23935 +#ifndef IRQ_HANDLED
23936 +#define irqreturn_t void
23937 +#define IRQ_HANDLED
23938 +#define IRQ_NONE
23939 +#endif
23940 +
23941 +#ifndef IRQF_PROBE_SHARED
23942 +#ifdef SA_PROBEIRQ
23943 +#define IRQF_PROBE_SHARED SA_PROBEIRQ
23944 +#else
23945 +#define IRQF_PROBE_SHARED 0
23946 +#endif
23947 +#endif
23948 +
23949 +#ifndef IRQF_SHARED
23950 +#define IRQF_SHARED SA_SHIRQ
23951 +#endif
23952 +
23953 +#ifndef ARRAY_SIZE
23954 +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
23955 +#endif
23956 +
23957 +#ifndef netdev_alloc_skb
23958 +#define netdev_alloc_skb _kc_netdev_alloc_skb
23959 +extern struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
23960 +                                            unsigned int length);
23961 +#endif
23962 +
23963 +#ifndef skb_is_gso
23964 +#ifdef NETIF_F_TSO
23965 +#define skb_is_gso _kc_skb_is_gso
23966 +static inline int _kc_skb_is_gso(const struct sk_buff *skb)
23967 +{
23968 +       return skb_shinfo(skb)->gso_size;
23969 +}
23970 +#else
23971 +#define skb_is_gso(a) 0
23972 +#endif
23973 +#endif
23974 +
23975 +#endif /* < 2.6.18 */
23976 +
23977 +/*****************************************************************************/
23978 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
23979 +
23980 +#ifndef DIV_ROUND_UP
23981 +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
23982 +#endif
23983 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
23984 +#ifndef RHEL_RELEASE_CODE
23985 +#define RHEL_RELEASE_CODE 0
23986 +#endif
23987 +#ifndef RHEL_RELEASE_VERSION
23988 +#define RHEL_RELEASE_VERSION(a,b) 0
23989 +#endif
23990 +#ifndef AX_RELEASE_CODE
23991 +#define AX_RELEASE_CODE 0
23992 +#endif
23993 +#ifndef AX_RELEASE_VERSION
23994 +#define AX_RELEASE_VERSION(a,b) 0
23995 +#endif
23996 +#if (!(( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) ) && ( RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0) ) || ( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0) ) || (AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0))))
23997 +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
23998 +#endif
23999 +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
24000 +#undef CONFIG_INET_LRO
24001 +#undef CONFIG_INET_LRO_MODULE
24002 +#ifdef IXGBE_FCOE
24003 +#undef CONFIG_FCOE
24004 +#undef CONFIG_FCOE_MODULE
24005 +#endif /* IXGBE_FCOE */
24006 +#endif
24007 +typedef irqreturn_t (*new_handler_t)(int, void*);
24008 +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
24009 +#else /* 2.4.x */
24010 +typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
24011 +typedef void (*new_handler_t)(int, void*);
24012 +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
24013 +#endif /* >= 2.5.x */
24014 +{
24015 +       irq_handler_t new_handler = (irq_handler_t) handler;
24016 +       return request_irq(irq, new_handler, flags, devname, dev_id);
24017 +}
24018 +
24019 +#undef request_irq
24020 +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
24021 +
24022 +#define irq_handler_t new_handler_t
24023 +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
24024 +#define PCIE_CONFIG_SPACE_LEN 256
24025 +#define PCI_CONFIG_SPACE_LEN 64
24026 +#define PCIE_LINK_STATUS 0x12
24027 +#define pci_config_space_ich8lan() do {} while(0)
24028 +#undef pci_save_state
24029 +extern int _kc_pci_save_state(struct pci_dev *);
24030 +#define pci_save_state(pdev) _kc_pci_save_state(pdev)
24031 +#undef pci_restore_state
24032 +extern void _kc_pci_restore_state(struct pci_dev *);
24033 +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
24034 +#ifdef HAVE_PCI_ERS
24035 +#undef free_netdev
24036 +extern void _kc_free_netdev(struct net_device *);
24037 +#define free_netdev(netdev) _kc_free_netdev(netdev)
24038 +#endif
24039 +static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
24040 +{
24041 +       return 0;
24042 +}
24043 +#define pci_disable_pcie_error_reporting(dev) do {} while (0)
24044 +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
24045 +#else /* 2.6.19 */
24046 +#include <linux/aer.h>
24047 +#endif /* < 2.6.19 */
24048 +
24049 +/*****************************************************************************/
24050 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
24051 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
24052 +#undef INIT_WORK
24053 +#define INIT_WORK(_work, _func) \
24054 +do { \
24055 +       INIT_LIST_HEAD(&(_work)->entry); \
24056 +       (_work)->pending = 0; \
24057 +       (_work)->func = (void (*)(void *))_func; \
24058 +       (_work)->data = _work; \
24059 +       init_timer(&(_work)->timer); \
24060 +} while (0)
24061 +#endif
24062 +
24063 +#ifndef PCI_VDEVICE
24064 +#define PCI_VDEVICE(ven, dev)        \
24065 +       PCI_VENDOR_ID_##ven, (dev),  \
24066 +       PCI_ANY_ID, PCI_ANY_ID, 0, 0
24067 +#endif
24068 +
24069 +#ifndef round_jiffies
24070 +#define round_jiffies(x) x
24071 +#endif
24072 +
24073 +#define csum_offset csum
24074 +
24075 +#endif /* < 2.6.20 */
24076 +
24077 +/*****************************************************************************/
24078 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
24079 +#define to_net_dev(class) container_of(class, struct net_device, class_dev)
24080 +#define NETDEV_CLASS_DEV
24081 +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
24082 +#define vlan_group_set_device(vg, id, dev) if (vg) vg->vlan_devices[id] = dev;
24083 +#define pci_channel_offline(pdev) (pdev->error_state && \
24084 +       pdev->error_state != pci_channel_io_normal)
24085 +#define pci_request_selected_regions(pdev, bars, name) \
24086 +        pci_request_regions(pdev, name)
24087 +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
24088 +#endif /* < 2.6.21 */
24089 +
24090 +/*****************************************************************************/
24091 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
24092 +#define tcp_hdr(skb) (skb->h.th)
24093 +#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
24094 +#define skb_transport_offset(skb) (skb->h.raw - skb->data)
24095 +#define skb_transport_header(skb) (skb->h.raw)
24096 +#define ipv6_hdr(skb) (skb->nh.ipv6h)
24097 +#define ip_hdr(skb) (skb->nh.iph)
24098 +#define skb_network_offset(skb) (skb->nh.raw - skb->data)
24099 +#define skb_network_header(skb) (skb->nh.raw)
24100 +#define skb_tail_pointer(skb) skb->tail
24101 +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
24102 +                                 memcpy(skb->data + offset, from, len)
24103 +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
24104 +#define pci_register_driver pci_module_init
24105 +#define skb_mac_header(skb) skb->mac.raw
24106 +
24107 +#ifdef NETIF_F_MULTI_QUEUE
24108 +#ifndef alloc_etherdev_mq
24109 +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
24110 +#endif
24111 +#endif /* NETIF_F_MULTI_QUEUE */
24112 +
24113 +#ifndef ETH_FCS_LEN
24114 +#define ETH_FCS_LEN 4
24115 +#endif
24116 +#define cancel_work_sync(x) flush_scheduled_work()
24117 +#ifndef udp_hdr
24118 +#define udp_hdr _udp_hdr
24119 +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
24120 +{
24121 +       return (struct udphdr *)skb_transport_header(skb);
24122 +}
24123 +#endif
24124 +#else /* 2.6.22 */
24125 +#define ETH_TYPE_TRANS_SETS_DEV
24126 +#endif /* < 2.6.22 */
24127 +
24128 +/*****************************************************************************/
24129 +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
24130 +#undef ETHTOOL_GPERMADDR
24131 +#undef SET_MODULE_OWNER
24132 +#define SET_MODULE_OWNER(dev) do { } while (0)
24133 +#endif /* > 2.6.22 */
24134 +
24135 +/*****************************************************************************/
24136 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
24137 +#define netif_subqueue_stopped(_a, _b) 0
24138 +#endif /* < 2.6.23 */
24139 +
24140 +/*****************************************************************************/
24141 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
24142 +/* if GRO is supported then the napi struct must already exist */
24143 +#ifndef NETIF_F_GRO
24144 +/* NAPI API changes in 2.6.24 break everything */
24145 +struct napi_struct {
24146 +       /* used to look up the real NAPI polling routine */
24147 +       int (*poll)(struct napi_struct *, int);
24148 +       struct net_device *dev;
24149 +       int weight;
24150 +};
24151 +#endif
24152 +
24153 +#ifdef NAPI
24154 +extern int __kc_adapter_clean(struct net_device *, int *);
24155 +extern struct net_device *napi_to_poll_dev(struct napi_struct *napi);
24156 +#define napi_enable(napi) do { \
24157 +       struct napi_struct *_napi = (napi); \
24158 +       /* abuse if_port as a counter */ \
24159 +       if (!_napi->dev->if_port) { \
24160 +               netif_poll_enable(_napi->dev); \
24161 +       } \
24162 +       ++_napi->dev->if_port; \
24163 +       netif_poll_enable(napi_to_poll_dev(_napi)); \
24164 +       } while (0)
24165 +#define napi_disable(napi) do { \
24166 +       struct napi_struct *_napi = (napi); \
24167 +       netif_poll_disable(napi_to_poll_dev(_napi)); \
24168 +       --_napi->dev->if_port; \
24169 +       if (!_napi->dev->if_port) \
24170 +               netif_poll_disable(_napi->dev); \
24171 +       } while (0)
24172 +#define netif_napi_add(_netdev, _napi, _poll, _weight) \
24173 +       do { \
24174 +               struct napi_struct *__napi = (_napi); \
24175 +               struct net_device *poll_dev = napi_to_poll_dev(__napi); \
24176 +               poll_dev->poll = &(__kc_adapter_clean); \
24177 +               poll_dev->priv = (_napi); \
24178 +               poll_dev->weight = (_weight); \
24179 +               set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
24180 +               set_bit(__LINK_STATE_START, &poll_dev->state);\
24181 +               dev_hold(poll_dev); \
24182 +               _netdev->poll = &(__kc_adapter_clean); \
24183 +               _netdev->weight = (_weight); \
24184 +               __napi->poll = &(_poll); \
24185 +               __napi->weight = (_weight); \
24186 +               __napi->dev = (_netdev); \
24187 +               set_bit(__LINK_STATE_RX_SCHED, &(_netdev)->state); \
24188 +       } while (0)
24189 +#define netif_napi_del(_napi) \
24190 +       do { \
24191 +               struct net_device *poll_dev = napi_to_poll_dev(_napi); \
24192 +               WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
24193 +               dev_put(poll_dev); \
24194 +               memset(poll_dev, 0, sizeof(struct net_device));\
24195 +       } while (0)
24196 +#define napi_schedule_prep(_napi) \
24197 +       (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
24198 +#define napi_schedule(_napi) netif_rx_schedule(napi_to_poll_dev(_napi))
24199 +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
24200 +#ifndef NETIF_F_GRO
24201 +#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
24202 +#else
24203 +#define napi_complete(_napi) \
24204 +       do { \
24205 +               napi_gro_flush(_napi); \
24206 +               netif_rx_complete(napi_to_poll_dev(_napi)); \
24207 +       } while (0)
24208 +#endif /* NETIF_F_GRO */
24209 +#else /* NAPI */
24210 +#define netif_napi_add(_netdev, _napi, _poll, _weight) \
24211 +       do { \
24212 +               struct napi_struct *__napi = _napi; \
24213 +               _netdev->poll = &(_poll); \
24214 +               _netdev->weight = (_weight); \
24215 +               __napi->poll = &(_poll); \
24216 +               __napi->weight = (_weight); \
24217 +               __napi->dev = (_netdev); \
24218 +       } while (0)
24219 +#define netif_napi_del(_a) do {} while (0)
24220 +#endif /* NAPI */
24221 +
24222 +#undef dev_get_by_name
24223 +#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
24224 +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
24225 +#define DMA_BIT_MASK(n)        (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
24226 +#else /* < 2.6.24 */
24227 +#define HAVE_ETHTOOL_GET_SSET_COUNT
24228 +#define HAVE_NETDEV_NAPI_LIST
24229 +#endif /* < 2.6.24 */
24230 +
24231 +/*****************************************************************************/
24232 +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
24233 +#include <linux/pm_qos_params.h>
24234 +#endif /* > 2.6.24 */
24235 +
24236 +/*****************************************************************************/
24237 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
24238 +#define PM_QOS_CPU_DMA_LATENCY 1
24239 +
24240 +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
24241 +#include <linux/latency.h>
24242 +#define PM_QOS_DEFAULT_VALUE   INFINITE_LATENCY
24243 +#define pm_qos_add_requirement(pm_qos_class, name, value) \
24244 +               set_acceptable_latency(name, value)
24245 +#define pm_qos_remove_requirement(pm_qos_class, name) \
24246 +               remove_acceptable_latency(name)
24247 +#define pm_qos_update_requirement(pm_qos_class, name, value) \
24248 +               modify_acceptable_latency(name, value)
24249 +#else
24250 +#define PM_QOS_DEFAULT_VALUE   -1
24251 +#define pm_qos_add_requirement(pm_qos_class, name, value)
24252 +#define pm_qos_remove_requirement(pm_qos_class, name)
24253 +#define pm_qos_update_requirement(pm_qos_class, name, value) { \
24254 +       if (value != PM_QOS_DEFAULT_VALUE) { \
24255 +               printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
24256 +                       pci_name(adapter->pdev)); \
24257 +       } \
24258 +}
24259 +#endif /* > 2.6.18 */
24260 +
24261 +#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
24262 +
24263 +#endif /* < 2.6.25 */
24264 +
24265 +/*****************************************************************************/
24266 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
24267 +#else /* < 2.6.26 */
24268 +#include <linux/pci-aspm.h>
24269 +#define HAVE_NETDEV_VLAN_FEATURES
24270 +#endif /* < 2.6.26 */
24271 +/*****************************************************************************/
24272 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
24273 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
24274 +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && defined(CONFIG_PM_SLEEP)))
24275 +#undef device_set_wakeup_enable
24276 +#define device_set_wakeup_enable(dev, val) \
24277 +       do { \
24278 +               u16 pmc = 0; \
24279 +               int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
24280 +               if (pm) { \
24281 +                       pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
24282 +                               &pmc); \
24283 +               } \
24284 +               (dev)->power.can_wakeup = !!(pmc >> 11); \
24285 +               (dev)->power.should_wakeup = (val && (pmc >> 11)); \
24286 +       } while (0)
24287 +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
24288 +#endif /* 2.6.15 through 2.6.27 */
24289 +#ifndef netif_napi_del
24290 +#define netif_napi_del(_a) do {} while (0)
24291 +#ifdef NAPI
24292 +#ifdef CONFIG_NETPOLL
24293 +#undef netif_napi_del
24294 +#define netif_napi_del(_a) list_del(&(_a)->dev_list);
24295 +#endif
24296 +#endif
24297 +#endif /* netif_napi_del */
24298 +#ifndef pci_dma_mapping_error
24299 +#define pci_dma_mapping_error(pdev, dma_addr) pci_dma_mapping_error(dma_addr)
24300 +#endif
24301 +
24302 +#ifdef CONFIG_NETDEVICES_MULTIQUEUE
24303 +#define HAVE_TX_MQ
24304 +#endif
24305 +
24306 +#ifdef HAVE_TX_MQ
24307 +extern void _kc_netif_tx_stop_all_queues(struct net_device *);
24308 +extern void _kc_netif_tx_wake_all_queues(struct net_device *);
24309 +extern void _kc_netif_tx_start_all_queues(struct net_device *);
24310 +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
24311 +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
24312 +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
24313 +#undef netif_stop_subqueue
24314 +#define netif_stop_subqueue(_ndev,_qi) do { \
24315 +       if (netif_is_multiqueue((_ndev))) \
24316 +               netif_stop_subqueue((_ndev), (_qi)); \
24317 +       else \
24318 +               netif_stop_queue((_ndev)); \
24319 +       } while (0)
24320 +#undef netif_start_subqueue
24321 +#define netif_start_subqueue(_ndev,_qi) do { \
24322 +       if (netif_is_multiqueue((_ndev))) \
24323 +               netif_start_subqueue((_ndev), (_qi)); \
24324 +       else \
24325 +               netif_start_queue((_ndev)); \
24326 +       } while (0)
24327 +#else /* HAVE_TX_MQ */
24328 +#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
24329 +#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
24330 +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
24331 +#define netif_tx_start_all_queues(a) netif_start_queue(a)
24332 +#else
24333 +#define netif_tx_start_all_queues(a) do {} while (0)
24334 +#endif
24335 +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
24336 +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
24337 +#endif /* HAVE_TX_MQ */
24338 +#ifndef NETIF_F_MULTI_QUEUE
24339 +#define NETIF_F_MULTI_QUEUE 0
24340 +#define netif_is_multiqueue(a) 0
24341 +#define netif_wake_subqueue(a, b)
24342 +#endif /* NETIF_F_MULTI_QUEUE */
24343 +#else /* < 2.6.27 */
24344 +#define HAVE_TX_MQ
24345 +#define HAVE_NETDEV_SELECT_QUEUE
24346 +#endif /* < 2.6.27 */
24347 +
24348 +/*****************************************************************************/
24349 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
24350 +#define pci_ioremap_bar(pdev, bar)     ioremap(pci_resource_start(pdev, bar), \
24351 +                                               pci_resource_len(pdev, bar))
24352 +#define pci_wake_from_d3 _kc_pci_wake_from_d3
24353 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
24354 +extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
24355 +extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
24356 +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
24357 +#endif /* < 2.6.28 */
24358 +
24359 +/*****************************************************************************/
24360 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
24361 +#define pci_request_selected_regions_exclusive(pdev, bars, name) \
24362 +               pci_request_selected_regions(pdev, bars, name)
24363 +extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
24364 +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
24365 +#else /* < 2.6.29 */
24366 +#ifdef CONFIG_DCB
24367 +#define HAVE_PFC_MODE_ENABLE
24368 +#endif /* CONFIG_DCB */
24369 +#endif /* < 2.6.29 */
24370 +
24371 +/*****************************************************************************/
24372 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
24373 +#ifdef IXGBE_FCOE
24374 +#undef CONFIG_FCOE
24375 +#undef CONFIG_FCOE_MODULE
24376 +#endif /* IXGBE_FCOE */
24377 +extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb);
24378 +#define skb_tx_hash(n, s) _kc_skb_tx_hash(n, s)
24379 +#define skb_record_rx_queue(a, b) do {} while (0)
24380 +#else
24381 +#define HAVE_ASPM_QUIRKS
24382 +#endif /* < 2.6.30 */
24383 +
24384 +/*****************************************************************************/
24385 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
24386 +#define ETH_P_1588 0x88F7
24387 +#else
24388 +#ifndef HAVE_NETDEV_STORAGE_ADDRESS
24389 +#define HAVE_NETDEV_STORAGE_ADDRESS
24390 +#endif
24391 +#ifndef HAVE_NETDEV_HW_ADDR
24392 +#define HAVE_NETDEV_HW_ADDR
24393 +#endif
24394 +#endif /* < 2.6.31 */
24395 +
24396 +/*****************************************************************************/
24397 +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
24398 +#undef netdev_tx_t
24399 +#define netdev_tx_t int
24400 +#endif /* < 2.6.32 */
24401 +#endif /* _KCOMPAT_H_ */
24402 Index: linux-2.6.22/drivers/net/igb/kcompat_ethtool.c
24403 ===================================================================
24404 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
24405 +++ linux-2.6.22/drivers/net/igb/kcompat_ethtool.c      2009-12-18 12:39:22.000000000 -0500
24406 @@ -0,0 +1,1168 @@
24407 +/*******************************************************************************
24408 +
24409 +  Intel(R) Gigabit Ethernet Linux driver
24410 +  Copyright(c) 2007-2009 Intel Corporation.
24411 +
24412 +  This program is free software; you can redistribute it and/or modify it
24413 +  under the terms and conditions of the GNU General Public License,
24414 +  version 2, as published by the Free Software Foundation.
24415 +
24416 +  This program is distributed in the hope it will be useful, but WITHOUT
24417 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24418 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
24419 +  more details.
24420 +
24421 +  You should have received a copy of the GNU General Public License along with
24422 +  this program; if not, write to the Free Software Foundation, Inc.,
24423 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24424 +
24425 +  The full GNU General Public License is included in this distribution in
24426 +  the file called "COPYING".
24427 +
24428 +  Contact Information:
24429 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24430 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24431 +
24432 +*******************************************************************************/
24433 +
24434 +/*
24435 + * net/core/ethtool.c - Ethtool ioctl handler
24436 + * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
24437 + *
24438 + * This file is where we call all the ethtool_ops commands to get
24439 + * the information ethtool needs.  We fall back to calling do_ioctl()
24440 + * for drivers which haven't been converted to ethtool_ops yet.
24441 + *
24442 + * It's GPL, stupid.
24443 + *
24444 + * Modification by sfeldma@pobox.com to work as backward compat
24445 + * solution for pre-ethtool_ops kernels.
24446 + *     - copied struct ethtool_ops from ethtool.h
24447 + *     - defined SET_ETHTOOL_OPS
24448 + *     - put in some #ifndef NETIF_F_xxx wrappers
24449 + *     - changes refs to dev->ethtool_ops to ethtool_ops
24450 + *     - changed dev_ethtool to ethtool_ioctl
24451 + *      - remove EXPORT_SYMBOL()s
24452 + *      - added _kc_ prefix in built-in ethtool_op_xxx ops.
24453 + */
24454 +
24455 +#include <linux/module.h>
24456 +#include <linux/types.h>
24457 +#include <linux/errno.h>
24458 +#include <linux/mii.h>
24459 +#include <linux/ethtool.h>
24460 +#include <linux/netdevice.h>
24461 +#include <asm/uaccess.h>
24462 +
24463 +#include "kcompat.h"
24464 +
24465 +#undef SUPPORTED_10000baseT_Full
24466 +#define SUPPORTED_10000baseT_Full      (1 << 12)
24467 +#undef ADVERTISED_10000baseT_Full
24468 +#define ADVERTISED_10000baseT_Full     (1 << 12)
24469 +#undef SPEED_10000
24470 +#define SPEED_10000            10000
24471 +
24472 +#undef ethtool_ops
24473 +#define ethtool_ops _kc_ethtool_ops
24474 +
24475 +struct _kc_ethtool_ops {
24476 +       int  (*get_settings)(struct net_device *, struct ethtool_cmd *);
24477 +       int  (*set_settings)(struct net_device *, struct ethtool_cmd *);
24478 +       void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
24479 +       int  (*get_regs_len)(struct net_device *);
24480 +       void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
24481 +       void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
24482 +       int  (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
24483 +       u32  (*get_msglevel)(struct net_device *);
24484 +       void (*set_msglevel)(struct net_device *, u32);
24485 +       int  (*nway_reset)(struct net_device *);
24486 +       u32  (*get_link)(struct net_device *);
24487 +       int  (*get_eeprom_len)(struct net_device *);
24488 +       int  (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
24489 +       int  (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
24490 +       int  (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
24491 +       int  (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
24492 +       void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
24493 +       int  (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
24494 +       void (*get_pauseparam)(struct net_device *,
24495 +                              struct ethtool_pauseparam*);
24496 +       int  (*set_pauseparam)(struct net_device *,
24497 +                              struct ethtool_pauseparam*);
24498 +       u32  (*get_rx_csum)(struct net_device *);
24499 +       int  (*set_rx_csum)(struct net_device *, u32);
24500 +       u32  (*get_tx_csum)(struct net_device *);
24501 +       int  (*set_tx_csum)(struct net_device *, u32);
24502 +       u32  (*get_sg)(struct net_device *);
24503 +       int  (*set_sg)(struct net_device *, u32);
24504 +       u32  (*get_tso)(struct net_device *);
24505 +       int  (*set_tso)(struct net_device *, u32);
24506 +       int  (*self_test_count)(struct net_device *);
24507 +       void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
24508 +       void (*get_strings)(struct net_device *, u32 stringset, u8 *);
24509 +       int  (*phys_id)(struct net_device *, u32);
24510 +       int  (*get_stats_count)(struct net_device *);
24511 +       void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *,
24512 +                                 u64 *);
24513 +} *ethtool_ops = NULL;
24514 +
24515 +#undef SET_ETHTOOL_OPS
24516 +#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops))
24517 +
24518 +/*
24519 + * Some useful ethtool_ops methods that are device independent. If we find that
24520 + * all drivers want to do the same thing here, we can turn these into dev_()
24521 + * function calls.
24522 + */
24523 +
24524 +#undef ethtool_op_get_link
24525 +#define ethtool_op_get_link _kc_ethtool_op_get_link
24526 +u32 _kc_ethtool_op_get_link(struct net_device *dev)
24527 +{
24528 +       return netif_carrier_ok(dev) ? 1 : 0;
24529 +}
24530 +
24531 +#undef ethtool_op_get_tx_csum
24532 +#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum
24533 +u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev)
24534 +{
24535 +#ifdef NETIF_F_IP_CSUM
24536 +       return (dev->features & NETIF_F_IP_CSUM) != 0;
24537 +#else
24538 +       return 0;
24539 +#endif
24540 +}
24541 +
24542 +#undef ethtool_op_set_tx_csum
24543 +#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum
24544 +int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
24545 +{
24546 +#ifdef NETIF_F_IP_CSUM
24547 +       if (data)
24548 +#ifdef NETIF_F_IPV6_CSUM
24549 +               dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
24550 +       else
24551 +               dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
24552 +#else
24553 +               dev->features |= NETIF_F_IP_CSUM;
24554 +       else
24555 +               dev->features &= ~NETIF_F_IP_CSUM;
24556 +#endif
24557 +#endif
24558 +
24559 +       return 0;
24560 +}
24561 +
24562 +#undef ethtool_op_get_sg
24563 +#define ethtool_op_get_sg _kc_ethtool_op_get_sg
24564 +u32 _kc_ethtool_op_get_sg(struct net_device *dev)
24565 +{
24566 +#ifdef NETIF_F_SG
24567 +       return (dev->features & NETIF_F_SG) != 0;
24568 +#else
24569 +       return 0;
24570 +#endif
24571 +}
24572 +
24573 +#undef ethtool_op_set_sg
24574 +#define ethtool_op_set_sg _kc_ethtool_op_set_sg
24575 +int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data)
24576 +{
24577 +#ifdef NETIF_F_SG
24578 +       if (data)
24579 +               dev->features |= NETIF_F_SG;
24580 +       else
24581 +               dev->features &= ~NETIF_F_SG;
24582 +#endif
24583 +
24584 +       return 0;
24585 +}
24586 +
24587 +#undef ethtool_op_get_tso
24588 +#define ethtool_op_get_tso _kc_ethtool_op_get_tso
24589 +u32 _kc_ethtool_op_get_tso(struct net_device *dev)
24590 +{
24591 +#ifdef NETIF_F_TSO
24592 +       return (dev->features & NETIF_F_TSO) != 0;
24593 +#else
24594 +       return 0;
24595 +#endif
24596 +}
24597 +
24598 +#undef ethtool_op_set_tso
24599 +#define ethtool_op_set_tso _kc_ethtool_op_set_tso
24600 +int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data)
24601 +{
24602 +#ifdef NETIF_F_TSO
24603 +       if (data)
24604 +               dev->features |= NETIF_F_TSO;
24605 +       else
24606 +               dev->features &= ~NETIF_F_TSO;
24607 +#endif
24608 +
24609 +       return 0;
24610 +}
24611 +
24612 +/* Handlers for each ethtool command */
24613 +
24614 +static int ethtool_get_settings(struct net_device *dev, void *useraddr)
24615 +{
24616 +       struct ethtool_cmd cmd = { ETHTOOL_GSET };
24617 +       int err;
24618 +
24619 +       if (!ethtool_ops->get_settings)
24620 +               return -EOPNOTSUPP;
24621 +
24622 +       err = ethtool_ops->get_settings(dev, &cmd);
24623 +       if (err < 0)
24624 +               return err;
24625 +
24626 +       if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
24627 +               return -EFAULT;
24628 +       return 0;
24629 +}
24630 +
24631 +static int ethtool_set_settings(struct net_device *dev, void *useraddr)
24632 +{
24633 +       struct ethtool_cmd cmd;
24634 +
24635 +       if (!ethtool_ops->set_settings)
24636 +               return -EOPNOTSUPP;
24637 +
24638 +       if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
24639 +               return -EFAULT;
24640 +
24641 +       return ethtool_ops->set_settings(dev, &cmd);
24642 +}
24643 +
24644 +static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr)
24645 +{
24646 +       struct ethtool_drvinfo info;
24647 +       struct ethtool_ops *ops = ethtool_ops;
24648 +
24649 +       if (!ops->get_drvinfo)
24650 +               return -EOPNOTSUPP;
24651 +
24652 +       memset(&info, 0, sizeof(info));
24653 +       info.cmd = ETHTOOL_GDRVINFO;
24654 +       ops->get_drvinfo(dev, &info);
24655 +
24656 +       if (ops->self_test_count)
24657 +               info.testinfo_len = ops->self_test_count(dev);
24658 +       if (ops->get_stats_count)
24659 +               info.n_stats = ops->get_stats_count(dev);
24660 +       if (ops->get_regs_len)
24661 +               info.regdump_len = ops->get_regs_len(dev);
24662 +       if (ops->get_eeprom_len)
24663 +               info.eedump_len = ops->get_eeprom_len(dev);
24664 +
24665 +       if (copy_to_user(useraddr, &info, sizeof(info)))
24666 +               return -EFAULT;
24667 +       return 0;
24668 +}
24669 +
24670 +static int ethtool_get_regs(struct net_device *dev, char *useraddr)
24671 +{
24672 +       struct ethtool_regs regs;
24673 +       struct ethtool_ops *ops = ethtool_ops;
24674 +       void *regbuf;
24675 +       int reglen, ret;
24676 +
24677 +       if (!ops->get_regs || !ops->get_regs_len)
24678 +               return -EOPNOTSUPP;
24679 +
24680 +       if (copy_from_user(&regs, useraddr, sizeof(regs)))
24681 +               return -EFAULT;
24682 +
24683 +       reglen = ops->get_regs_len(dev);
24684 +       if (regs.len > reglen)
24685 +               regs.len = reglen;
24686 +
24687 +       regbuf = kmalloc(reglen, GFP_USER);
24688 +       if (!regbuf)
24689 +               return -ENOMEM;
24690 +
24691 +       ops->get_regs(dev, &regs, regbuf);
24692 +
24693 +       ret = -EFAULT;
24694 +       if (copy_to_user(useraddr, &regs, sizeof(regs)))
24695 +               goto out;
24696 +       useraddr += offsetof(struct ethtool_regs, data);
24697 +       if (copy_to_user(useraddr, regbuf, reglen))
24698 +               goto out;
24699 +       ret = 0;
24700 +
24701 +out:
24702 +       kfree(regbuf);
24703 +       return ret;
24704 +}
24705 +
24706 +static int ethtool_get_wol(struct net_device *dev, char *useraddr)
24707 +{
24708 +       struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
24709 +
24710 +       if (!ethtool_ops->get_wol)
24711 +               return -EOPNOTSUPP;
24712 +
24713 +       ethtool_ops->get_wol(dev, &wol);
24714 +
24715 +       if (copy_to_user(useraddr, &wol, sizeof(wol)))
24716 +               return -EFAULT;
24717 +       return 0;
24718 +}
24719 +
24720 +static int ethtool_set_wol(struct net_device *dev, char *useraddr)
24721 +{
24722 +       struct ethtool_wolinfo wol;
24723 +
24724 +       if (!ethtool_ops->set_wol)
24725 +               return -EOPNOTSUPP;
24726 +
24727 +       if (copy_from_user(&wol, useraddr, sizeof(wol)))
24728 +               return -EFAULT;
24729 +
24730 +       return ethtool_ops->set_wol(dev, &wol);
24731 +}
24732 +
24733 +static int ethtool_get_msglevel(struct net_device *dev, char *useraddr)
24734 +{
24735 +       struct ethtool_value edata = { ETHTOOL_GMSGLVL };
24736 +
24737 +       if (!ethtool_ops->get_msglevel)
24738 +               return -EOPNOTSUPP;
24739 +
24740 +       edata.data = ethtool_ops->get_msglevel(dev);
24741 +
24742 +       if (copy_to_user(useraddr, &edata, sizeof(edata)))
24743 +               return -EFAULT;
24744 +       return 0;
24745 +}
24746 +
24747 +static int ethtool_set_msglevel(struct net_device *dev, char *useraddr)
24748 +{
24749 +       struct ethtool_value edata;
24750 +
24751 +       if (!ethtool_ops->set_msglevel)
24752 +               return -EOPNOTSUPP;
24753 +
24754 +       if (copy_from_user(&edata, useraddr, sizeof(edata)))
24755 +               return -EFAULT;
24756 +
24757 +       ethtool_ops->set_msglevel(dev, edata.data);
24758 +       return 0;
24759 +}
24760 +
24761 +static int ethtool_nway_reset(struct net_device *dev)
24762 +{
24763 +       if (!ethtool_ops->nway_reset)
24764 +               return -EOPNOTSUPP;
24765 +
24766 +       return ethtool_ops->nway_reset(dev);
24767 +}
24768 +
24769 +static int ethtool_get_link(struct net_device *dev, void *useraddr)
24770 +{
24771 +       struct ethtool_value edata = { ETHTOOL_GLINK };
24772 +
24773 +       if (!ethtool_ops->get_link)
24774 +               return -EOPNOTSUPP;
24775 +
24776 +       edata.data = ethtool_ops->get_link(dev);
24777 +
24778 +       if (copy_to_user(useraddr, &edata, sizeof(edata)))
24779 +               return -EFAULT;
24780 +       return 0;
24781 +}
24782 +
24783 +static int ethtool_get_eeprom(struct net_device *dev, void *useraddr)
24784 +{
24785 +       struct ethtool_eeprom eeprom;
24786 +       struct ethtool_ops *ops = ethtool_ops;
24787 +       u8 *data;
24788 +       int ret;
24789 +
24790 +       if (!ops->get_eeprom || !ops->get_eeprom_len)
24791 +               return -EOPNOTSUPP;
24792 +
24793 +       if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
24794 +               return -EFAULT;
24795 +
24796 +       /* Check for wrap and zero */
24797 +       if (eeprom.offset + eeprom.len <= eeprom.offset)
24798 +               return -EINVAL;
24799 +
24800 +       /* Check for exceeding total eeprom len */
24801 +       if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
24802 +               return -EINVAL;
24803 +
24804 +       data = kmalloc(eeprom.len, GFP_USER);
24805 +       if (!data)
24806 +               return -ENOMEM;
24807 +
24808 +       ret = -EFAULT;
24809 +       if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
24810 +               goto out;
24811 +
24812 +       ret = ops->get_eeprom(dev, &eeprom, data);
24813 +       if (ret)
24814 +               goto out;
24815 +
24816 +       ret = -EFAULT;
24817 +       if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
24818 +               goto out;
24819 +       if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
24820 +               goto out;
24821 +       ret = 0;
24822 +
24823 +out:
24824 +       kfree(data);
24825 +       return ret;
24826 +}
24827 +
24828 +static int ethtool_set_eeprom(struct net_device *dev, void *useraddr)
24829 +{
24830 +       struct ethtool_eeprom eeprom;
24831 +       struct ethtool_ops *ops = ethtool_ops;
24832 +       u8 *data;
24833 +       int ret;
24834 +
24835 +       if (!ops->set_eeprom || !ops->get_eeprom_len)
24836 +               return -EOPNOTSUPP;
24837 +
24838 +       if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
24839 +               return -EFAULT;
24840 +
24841 +       /* Check for wrap and zero */
24842 +       if (eeprom.offset + eeprom.len <= eeprom.offset)
24843 +               return -EINVAL;
24844 +
24845 +       /* Check for exceeding total eeprom len */
24846 +       if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
24847 +               return -EINVAL;
24848 +
24849 +       data = kmalloc(eeprom.len, GFP_USER);
24850 +       if (!data)
24851 +               return -ENOMEM;
24852 +
24853 +       ret = -EFAULT;
24854 +       if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
24855 +               goto out;
24856 +
24857 +       ret = ops->set_eeprom(dev, &eeprom, data);
24858 +       if (ret)
24859 +               goto out;
24860 +
24861 +       if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
24862 +               ret = -EFAULT;
24863 +
24864 +out:
24865 +       kfree(data);
24866 +       return ret;
24867 +}
24868 +
24869 +static int ethtool_get_coalesce(struct net_device *dev, void *useraddr)
24870 +{
24871 +       struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
24872 +
24873 +       if (!ethtool_ops->get_coalesce)
24874 +               return -EOPNOTSUPP;
24875 +
24876 +       ethtool_ops->get_coalesce(dev, &coalesce);
24877 +
24878 +       if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
24879 +               return -EFAULT;
24880 +       return 0;
24881 +}
24882 +
24883 +static int ethtool_set_coalesce(struct net_device *dev, void *useraddr)
24884 +{
24885 +       struct ethtool_coalesce coalesce;
24886 +
24887 +       if (!ethtool_ops->get_coalesce)
24888 +               return -EOPNOTSUPP;
24889 +
24890 +       if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
24891 +               return -EFAULT;
24892 +
24893 +       return ethtool_ops->set_coalesce(dev, &coalesce);
24894 +}
24895 +
24896 +static int ethtool_get_ringparam(struct net_device *dev, void *useraddr)
24897 +{
24898 +       struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
24899 +
24900 +       if (!ethtool_ops->get_ringparam)
24901 +               return -EOPNOTSUPP;
24902 +
24903 +       ethtool_ops->get_ringparam(dev, &ringparam);
24904 +
24905 +       if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
24906 +               return -EFAULT;
24907 +       return 0;
24908 +}
24909 +
24910 +static int ethtool_set_ringparam(struct net_device *dev, void *useraddr)
24911 +{
24912 +       struct ethtool_ringparam ringparam;
24913 +
24914 +       if (!ethtool_ops->get_ringparam)
24915 +               return -EOPNOTSUPP;
24916 +
24917 +       if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
24918 +               return -EFAULT;
24919 +
24920 +       return ethtool_ops->set_ringparam(dev, &ringparam);
24921 +}
24922 +
24923 +static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr)
24924 +{
24925 +       struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
24926 +
24927 +       if (!ethtool_ops->get_pauseparam)
24928 +               return -EOPNOTSUPP;
24929 +
24930 +       ethtool_ops->get_pauseparam(dev, &pauseparam);
24931 +
24932 +       if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
24933 +               return -EFAULT;
24934 +       return 0;
24935 +}
24936 +
24937 +static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr)
24938 +{
24939 +       struct ethtool_pauseparam pauseparam;
24940 +
24941 +       if (!ethtool_ops->get_pauseparam)
24942 +               return -EOPNOTSUPP;
24943 +
24944 +       if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
24945 +               return -EFAULT;
24946 +
24947 +       return ethtool_ops->set_pauseparam(dev, &pauseparam);
24948 +}
24949 +
24950 +static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr)
24951 +{
24952 +       struct ethtool_value edata = { ETHTOOL_GRXCSUM };
24953 +
24954 +       if (!ethtool_ops->get_rx_csum)
24955 +               return -EOPNOTSUPP;
24956 +
24957 +       edata.data = ethtool_ops->get_rx_csum(dev);
24958 +
24959 +       if (copy_to_user(useraddr, &edata, sizeof(edata)))
24960 +               return -EFAULT;
24961 +       return 0;
24962 +}
24963 +
24964 +static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr)
24965 +{
24966 +       struct ethtool_value edata;
24967 +
24968 +       if (!ethtool_ops->set_rx_csum)
24969 +               return -EOPNOTSUPP;
24970 +
24971 +       if (copy_from_user(&edata, useraddr, sizeof(edata)))
24972 +               return -EFAULT;
24973 +
24974 +       ethtool_ops->set_rx_csum(dev, edata.data);
24975 +       return 0;
24976 +}
24977 +
24978 +static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr)
24979 +{
24980 +       struct ethtool_value edata = { ETHTOOL_GTXCSUM };
24981 +
24982 +       if (!ethtool_ops->get_tx_csum)
24983 +               return -EOPNOTSUPP;
24984 +
24985 +       edata.data = ethtool_ops->get_tx_csum(dev);
24986 +
24987 +       if (copy_to_user(useraddr, &edata, sizeof(edata)))
24988 +               return -EFAULT;
24989 +       return 0;
24990 +}
24991 +
24992 +static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr)
24993 +{
24994 +       struct ethtool_value edata;
24995 +
24996 +       if (!ethtool_ops->set_tx_csum)
24997 +               return -EOPNOTSUPP;
24998 +
24999 +       if (copy_from_user(&edata, useraddr, sizeof(edata)))
25000 +               return -EFAULT;
25001 +
25002 +       return ethtool_ops->set_tx_csum(dev, edata.data);
25003 +}
25004 +
25005 +static int ethtool_get_sg(struct net_device *dev, char *useraddr)
25006 +{
25007 +       struct ethtool_value edata = { ETHTOOL_GSG };
25008 +
25009 +       if (!ethtool_ops->get_sg)
25010 +               return -EOPNOTSUPP;
25011 +
25012 +       edata.data = ethtool_ops->get_sg(dev);
25013 +
25014 +       if (copy_to_user(useraddr, &edata, sizeof(edata)))
25015 +               return -EFAULT;
25016 +       return 0;
25017 +}
25018 +
25019 +static int ethtool_set_sg(struct net_device *dev, char *useraddr)
25020 +{
25021 +       struct ethtool_value edata;
25022 +
25023 +       if (!ethtool_ops->set_sg)
25024 +               return -EOPNOTSUPP;
25025 +
25026 +       if (copy_from_user(&edata, useraddr, sizeof(edata)))
25027 +               return -EFAULT;
25028 +
25029 +       return ethtool_ops->set_sg(dev, edata.data);
25030 +}
25031 +
25032 +static int ethtool_get_tso(struct net_device *dev, char *useraddr)
25033 +{
25034 +       struct ethtool_value edata = { ETHTOOL_GTSO };
25035 +
25036 +       if (!ethtool_ops->get_tso)
25037 +               return -EOPNOTSUPP;
25038 +
25039 +       edata.data = ethtool_ops->get_tso(dev);
25040 +
25041 +       if (copy_to_user(useraddr, &edata, sizeof(edata)))
25042 +               return -EFAULT;
25043 +       return 0;
25044 +}
25045 +
25046 +static int ethtool_set_tso(struct net_device *dev, char *useraddr)
25047 +{
25048 +       struct ethtool_value edata;
25049 +
25050 +       if (!ethtool_ops->set_tso)
25051 +               return -EOPNOTSUPP;
25052 +
25053 +       if (copy_from_user(&edata, useraddr, sizeof(edata)))
25054 +               return -EFAULT;
25055 +
25056 +       return ethtool_ops->set_tso(dev, edata.data);
25057 +}
25058 +
25059 +static int ethtool_self_test(struct net_device *dev, char *useraddr)
25060 +{
25061 +       struct ethtool_test test;
25062 +       struct ethtool_ops *ops = ethtool_ops;
25063 +       u64 *data;
25064 +       int ret;
25065 +
25066 +       if (!ops->self_test || !ops->self_test_count)
25067 +               return -EOPNOTSUPP;
25068 +
25069 +       if (copy_from_user(&test, useraddr, sizeof(test)))
25070 +               return -EFAULT;
25071 +
25072 +       test.len = ops->self_test_count(dev);
25073 +       data = kmalloc(test.len * sizeof(u64), GFP_USER);
25074 +       if (!data)
25075 +               return -ENOMEM;
25076 +
25077 +       ops->self_test(dev, &test, data);
25078 +
25079 +       ret = -EFAULT;
25080 +       if (copy_to_user(useraddr, &test, sizeof(test)))
25081 +               goto out;
25082 +       useraddr += sizeof(test);
25083 +       if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
25084 +               goto out;
25085 +       ret = 0;
25086 +
25087 +out:
25088 +       kfree(data);
25089 +       return ret;
25090 +}
25091 +
25092 +static int ethtool_get_strings(struct net_device *dev, void *useraddr)
25093 +{
25094 +       struct ethtool_gstrings gstrings;
25095 +       struct ethtool_ops *ops = ethtool_ops;
25096 +       u8 *data;
25097 +       int ret;
25098 +
25099 +       if (!ops->get_strings)
25100 +               return -EOPNOTSUPP;
25101 +
25102 +       if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
25103 +               return -EFAULT;
25104 +
25105 +       switch (gstrings.string_set) {
25106 +       case ETH_SS_TEST:
25107 +               if (!ops->self_test_count)
25108 +                       return -EOPNOTSUPP;
25109 +               gstrings.len = ops->self_test_count(dev);
25110 +               break;
25111 +       case ETH_SS_STATS:
25112 +               if (!ops->get_stats_count)
25113 +                       return -EOPNOTSUPP;
25114 +               gstrings.len = ops->get_stats_count(dev);
25115 +               break;
25116 +       default:
25117 +               return -EINVAL;
25118 +       }
25119 +
25120 +       data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
25121 +       if (!data)
25122 +               return -ENOMEM;
25123 +
25124 +       ops->get_strings(dev, gstrings.string_set, data);
25125 +
25126 +       ret = -EFAULT;
25127 +       if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
25128 +               goto out;
25129 +       useraddr += sizeof(gstrings);
25130 +       if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
25131 +               goto out;
25132 +       ret = 0;
25133 +
25134 +out:
25135 +       kfree(data);
25136 +       return ret;
25137 +}
25138 +
25139 +static int ethtool_phys_id(struct net_device *dev, void *useraddr)
25140 +{
25141 +       struct ethtool_value id;
25142 +
25143 +       if (!ethtool_ops->phys_id)
25144 +               return -EOPNOTSUPP;
25145 +
25146 +       if (copy_from_user(&id, useraddr, sizeof(id)))
25147 +               return -EFAULT;
25148 +
25149 +       return ethtool_ops->phys_id(dev, id.data);
25150 +}
25151 +
25152 +static int ethtool_get_stats(struct net_device *dev, void *useraddr)
25153 +{
25154 +       struct ethtool_stats stats;
25155 +       struct ethtool_ops *ops = ethtool_ops;
25156 +       u64 *data;
25157 +       int ret;
25158 +
25159 +       if (!ops->get_ethtool_stats || !ops->get_stats_count)
25160 +               return -EOPNOTSUPP;
25161 +
25162 +       if (copy_from_user(&stats, useraddr, sizeof(stats)))
25163 +               return -EFAULT;
25164 +
25165 +       stats.n_stats = ops->get_stats_count(dev);
25166 +       data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER);
25167 +       if (!data)
25168 +               return -ENOMEM;
25169 +
25170 +       ops->get_ethtool_stats(dev, &stats, data);
25171 +
25172 +       ret = -EFAULT;
25173 +       if (copy_to_user(useraddr, &stats, sizeof(stats)))
25174 +               goto out;
25175 +       useraddr += sizeof(stats);
25176 +       if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
25177 +               goto out;
25178 +       ret = 0;
25179 +
25180 +out:
25181 +       kfree(data);
25182 +       return ret;
25183 +}
25184 +
25185 +/* The main entry point in this file.  Called from net/core/dev.c */
25186 +
25187 +#define ETHTOOL_OPS_COMPAT
25188 +int ethtool_ioctl(struct ifreq *ifr)
25189 +{
25190 +       struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
25191 +       void *useraddr = (void *) ifr->ifr_data;
25192 +       u32 ethcmd;
25193 +
25194 +       /*
25195 +        * XXX: This can be pushed down into the ethtool_* handlers that
25196 +        * need it.  Keep existing behavior for the moment.
25197 +        */
25198 +       if (!capable(CAP_NET_ADMIN))
25199 +               return -EPERM;
25200 +
25201 +       if (!dev || !netif_device_present(dev))
25202 +               return -ENODEV;
25203 +
25204 +       if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
25205 +               return -EFAULT;
25206 +
25207 +       switch (ethcmd) {
25208 +       case ETHTOOL_GSET:
25209 +               return ethtool_get_settings(dev, useraddr);
25210 +       case ETHTOOL_SSET:
25211 +               return ethtool_set_settings(dev, useraddr);
25212 +       case ETHTOOL_GDRVINFO:
25213 +               return ethtool_get_drvinfo(dev, useraddr);
25214 +       case ETHTOOL_GREGS:
25215 +               return ethtool_get_regs(dev, useraddr);
25216 +       case ETHTOOL_GWOL:
25217 +               return ethtool_get_wol(dev, useraddr);
25218 +       case ETHTOOL_SWOL:
25219 +               return ethtool_set_wol(dev, useraddr);
25220 +       case ETHTOOL_GMSGLVL:
25221 +               return ethtool_get_msglevel(dev, useraddr);
25222 +       case ETHTOOL_SMSGLVL:
25223 +               return ethtool_set_msglevel(dev, useraddr);
25224 +       case ETHTOOL_NWAY_RST:
25225 +               return ethtool_nway_reset(dev);
25226 +       case ETHTOOL_GLINK:
25227 +               return ethtool_get_link(dev, useraddr);
25228 +       case ETHTOOL_GEEPROM:
25229 +               return ethtool_get_eeprom(dev, useraddr);
25230 +       case ETHTOOL_SEEPROM:
25231 +               return ethtool_set_eeprom(dev, useraddr);
25232 +       case ETHTOOL_GCOALESCE:
25233 +               return ethtool_get_coalesce(dev, useraddr);
25234 +       case ETHTOOL_SCOALESCE:
25235 +               return ethtool_set_coalesce(dev, useraddr);
25236 +       case ETHTOOL_GRINGPARAM:
25237 +               return ethtool_get_ringparam(dev, useraddr);
25238 +       case ETHTOOL_SRINGPARAM:
25239 +               return ethtool_set_ringparam(dev, useraddr);
25240 +       case ETHTOOL_GPAUSEPARAM:
25241 +               return ethtool_get_pauseparam(dev, useraddr);
25242 +       case ETHTOOL_SPAUSEPARAM:
25243 +               return ethtool_set_pauseparam(dev, useraddr);
25244 +       case ETHTOOL_GRXCSUM:
25245 +               return ethtool_get_rx_csum(dev, useraddr);
25246 +       case ETHTOOL_SRXCSUM:
25247 +               return ethtool_set_rx_csum(dev, useraddr);
25248 +       case ETHTOOL_GTXCSUM:
25249 +               return ethtool_get_tx_csum(dev, useraddr);
25250 +       case ETHTOOL_STXCSUM:
25251 +               return ethtool_set_tx_csum(dev, useraddr);
25252 +       case ETHTOOL_GSG:
25253 +               return ethtool_get_sg(dev, useraddr);
25254 +       case ETHTOOL_SSG:
25255 +               return ethtool_set_sg(dev, useraddr);
25256 +       case ETHTOOL_GTSO:
25257 +               return ethtool_get_tso(dev, useraddr);
25258 +       case ETHTOOL_STSO:
25259 +               return ethtool_set_tso(dev, useraddr);
25260 +       case ETHTOOL_TEST:
25261 +               return ethtool_self_test(dev, useraddr);
25262 +       case ETHTOOL_GSTRINGS:
25263 +               return ethtool_get_strings(dev, useraddr);
25264 +       case ETHTOOL_PHYS_ID:
25265 +               return ethtool_phys_id(dev, useraddr);
25266 +       case ETHTOOL_GSTATS:
25267 +               return ethtool_get_stats(dev, useraddr);
25268 +       default:
25269 +               return -EOPNOTSUPP;
25270 +       }
25271 +
25272 +       return -EOPNOTSUPP;
25273 +}
25274 +
25275 +#define mii_if_info _kc_mii_if_info
25276 +struct _kc_mii_if_info {
25277 +       int phy_id;
25278 +       int advertising;
25279 +       int phy_id_mask;
25280 +       int reg_num_mask;
25281 +
25282 +       unsigned int full_duplex : 1;   /* is full duplex? */
25283 +       unsigned int force_media : 1;   /* is autoneg. disabled? */
25284 +
25285 +       struct net_device *dev;
25286 +       int (*mdio_read) (struct net_device *dev, int phy_id, int location);
25287 +       void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
25288 +};
25289 +
25290 +struct ethtool_cmd;
25291 +struct mii_ioctl_data;
25292 +
25293 +#undef mii_link_ok
25294 +#define mii_link_ok _kc_mii_link_ok
25295 +#undef mii_nway_restart
25296 +#define mii_nway_restart _kc_mii_nway_restart
25297 +#undef mii_ethtool_gset
25298 +#define mii_ethtool_gset _kc_mii_ethtool_gset
25299 +#undef mii_ethtool_sset
25300 +#define mii_ethtool_sset _kc_mii_ethtool_sset
25301 +#undef mii_check_link
25302 +#define mii_check_link _kc_mii_check_link
25303 +#undef generic_mii_ioctl
25304 +#define generic_mii_ioctl _kc_generic_mii_ioctl
25305 +extern int _kc_mii_link_ok (struct mii_if_info *mii);
25306 +extern int _kc_mii_nway_restart (struct mii_if_info *mii);
25307 +extern int _kc_mii_ethtool_gset(struct mii_if_info *mii,
25308 +                                struct ethtool_cmd *ecmd);
25309 +extern int _kc_mii_ethtool_sset(struct mii_if_info *mii,
25310 +                                struct ethtool_cmd *ecmd);
25311 +extern void _kc_mii_check_link (struct mii_if_info *mii);
25312 +extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
25313 +                                 struct mii_ioctl_data *mii_data, int cmd,
25314 +                                 unsigned int *duplex_changed);
25315 +
25316 +
25317 +struct _kc_pci_dev_ext {
25318 +       struct pci_dev *dev;
25319 +       void *pci_drvdata;
25320 +       struct pci_driver *driver;
25321 +};
25322 +
25323 +struct _kc_net_dev_ext {
25324 +       struct net_device *dev;
25325 +       unsigned int carrier;
25326 +};
25327 +
25328 +
25329 +/**************************************/
25330 +/* mii support */
25331 +
25332 +int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
25333 +{
25334 +       struct net_device *dev = mii->dev;
25335 +       u32 advert, bmcr, lpa, nego;
25336 +
25337 +       ecmd->supported =
25338 +           (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
25339 +            SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
25340 +            SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
25341 +
25342 +       /* only supports twisted-pair */
25343 +       ecmd->port = PORT_MII;
25344 +
25345 +       /* only supports internal transceiver */
25346 +       ecmd->transceiver = XCVR_INTERNAL;
25347 +
25348 +       /* this isn't fully supported at higher layers */
25349 +       ecmd->phy_address = mii->phy_id;
25350 +
25351 +       ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
25352 +       advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
25353 +       if (advert & ADVERTISE_10HALF)
25354 +               ecmd->advertising |= ADVERTISED_10baseT_Half;
25355 +       if (advert & ADVERTISE_10FULL)
25356 +               ecmd->advertising |= ADVERTISED_10baseT_Full;
25357 +       if (advert & ADVERTISE_100HALF)
25358 +               ecmd->advertising |= ADVERTISED_100baseT_Half;
25359 +       if (advert & ADVERTISE_100FULL)
25360 +               ecmd->advertising |= ADVERTISED_100baseT_Full;
25361 +
25362 +       bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
25363 +       lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
25364 +       if (bmcr & BMCR_ANENABLE) {
25365 +               ecmd->advertising |= ADVERTISED_Autoneg;
25366 +               ecmd->autoneg = AUTONEG_ENABLE;
25367 +
25368 +               nego = mii_nway_result(advert & lpa);
25369 +               if (nego == LPA_100FULL || nego == LPA_100HALF)
25370 +                       ecmd->speed = SPEED_100;
25371 +               else
25372 +                       ecmd->speed = SPEED_10;
25373 +               if (nego == LPA_100FULL || nego == LPA_10FULL) {
25374 +                       ecmd->duplex = DUPLEX_FULL;
25375 +                       mii->full_duplex = 1;
25376 +               } else {
25377 +                       ecmd->duplex = DUPLEX_HALF;
25378 +                       mii->full_duplex = 0;
25379 +               }
25380 +       } else {
25381 +               ecmd->autoneg = AUTONEG_DISABLE;
25382 +
25383 +               ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
25384 +               ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
25385 +       }
25386 +
25387 +       /* ignore maxtxpkt, maxrxpkt for now */
25388 +
25389 +       return 0;
25390 +}
25391 +
25392 +int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
25393 +{
25394 +       struct net_device *dev = mii->dev;
25395 +
25396 +       if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
25397 +               return -EINVAL;
25398 +       if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
25399 +               return -EINVAL;
25400 +       if (ecmd->port != PORT_MII)
25401 +               return -EINVAL;
25402 +       if (ecmd->transceiver != XCVR_INTERNAL)
25403 +               return -EINVAL;
25404 +       if (ecmd->phy_address != mii->phy_id)
25405 +               return -EINVAL;
25406 +       if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
25407 +               return -EINVAL;
25408 +
25409 +       /* ignore supported, maxtxpkt, maxrxpkt */
25410 +
25411 +       if (ecmd->autoneg == AUTONEG_ENABLE) {
25412 +               u32 bmcr, advert, tmp;
25413 +
25414 +               if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
25415 +                                         ADVERTISED_10baseT_Full |
25416 +                                         ADVERTISED_100baseT_Half |
25417 +                                         ADVERTISED_100baseT_Full)) == 0)
25418 +                       return -EINVAL;
25419 +
25420 +               /* advertise only what has been requested */
25421 +               advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
25422 +               tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
25423 +               if (ADVERTISED_10baseT_Half)
25424 +                       tmp |= ADVERTISE_10HALF;
25425 +               if (ADVERTISED_10baseT_Full)
25426 +                       tmp |= ADVERTISE_10FULL;
25427 +               if (ADVERTISED_100baseT_Half)
25428 +                       tmp |= ADVERTISE_100HALF;
25429 +               if (ADVERTISED_100baseT_Full)
25430 +                       tmp |= ADVERTISE_100FULL;
25431 +               if (advert != tmp) {
25432 +                       mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
25433 +                       mii->advertising = tmp;
25434 +               }
25435 +
25436 +               /* turn on autonegotiation, and force a renegotiate */
25437 +               bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
25438 +               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
25439 +               mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
25440 +
25441 +               mii->force_media = 0;
25442 +       } else {
25443 +               u32 bmcr, tmp;
25444 +
25445 +               /* turn off auto negotiation, set speed and duplexity */
25446 +               bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
25447 +               tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
25448 +               if (ecmd->speed == SPEED_100)
25449 +                       tmp |= BMCR_SPEED100;
25450 +               if (ecmd->duplex == DUPLEX_FULL) {
25451 +                       tmp |= BMCR_FULLDPLX;
25452 +                       mii->full_duplex = 1;
25453 +               } else
25454 +                       mii->full_duplex = 0;
25455 +               if (bmcr != tmp)
25456 +                       mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
25457 +
25458 +               mii->force_media = 1;
25459 +       }
25460 +       return 0;
25461 +}
25462 +
25463 +int _kc_mii_link_ok (struct mii_if_info *mii)
25464 +{
25465 +       /* first, a dummy read, needed to latch some MII phys */
25466 +       mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
25467 +       if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
25468 +               return 1;
25469 +       return 0;
25470 +}
25471 +
25472 +int _kc_mii_nway_restart (struct mii_if_info *mii)
25473 +{
25474 +       int bmcr;
25475 +       int r = -EINVAL;
25476 +
25477 +       /* if autoneg is off, it's an error */
25478 +       bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
25479 +
25480 +       if (bmcr & BMCR_ANENABLE) {
25481 +               bmcr |= BMCR_ANRESTART;
25482 +               mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
25483 +               r = 0;
25484 +       }
25485 +
25486 +       return r;
25487 +}
25488 +
25489 +void _kc_mii_check_link (struct mii_if_info *mii)
25490 +{
25491 +       int cur_link = mii_link_ok(mii);
25492 +       int prev_link = netif_carrier_ok(mii->dev);
25493 +
25494 +       if (cur_link && !prev_link)
25495 +               netif_carrier_on(mii->dev);
25496 +       else if (prev_link && !cur_link)
25497 +               netif_carrier_off(mii->dev);
25498 +}
25499 +
25500 +int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
25501 +                          struct mii_ioctl_data *mii_data, int cmd,
25502 +                          unsigned int *duplex_chg_out)
25503 +{
25504 +       int rc = 0;
25505 +       unsigned int duplex_changed = 0;
25506 +
25507 +       if (duplex_chg_out)
25508 +               *duplex_chg_out = 0;
25509 +
25510 +       mii_data->phy_id &= mii_if->phy_id_mask;
25511 +       mii_data->reg_num &= mii_if->reg_num_mask;
25512 +
25513 +       switch(cmd) {
25514 +       case SIOCDEVPRIVATE:    /* binary compat, remove in 2.5 */
25515 +       case SIOCGMIIPHY:
25516 +               mii_data->phy_id = mii_if->phy_id;
25517 +               /* fall through */
25518 +
25519 +       case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */
25520 +       case SIOCGMIIREG:
25521 +               mii_data->val_out =
25522 +                       mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
25523 +                                         mii_data->reg_num);
25524 +               break;
25525 +
25526 +       case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */
25527 +       case SIOCSMIIREG: {
25528 +               u16 val = mii_data->val_in;
25529 +
25530 +               if (!capable(CAP_NET_ADMIN))
25531 +                       return -EPERM;
25532 +
25533 +               if (mii_data->phy_id == mii_if->phy_id) {
25534 +                       switch(mii_data->reg_num) {
25535 +                       case MII_BMCR: {
25536 +                               unsigned int new_duplex = 0;
25537 +                               if (val & (BMCR_RESET|BMCR_ANENABLE))
25538 +                                       mii_if->force_media = 0;
25539 +                               else
25540 +                                       mii_if->force_media = 1;
25541 +                               if (mii_if->force_media &&
25542 +                                   (val & BMCR_FULLDPLX))
25543 +                                       new_duplex = 1;
25544 +                               if (mii_if->full_duplex != new_duplex) {
25545 +                                       duplex_changed = 1;
25546 +                                       mii_if->full_duplex = new_duplex;
25547 +                               }
25548 +                               break;
25549 +                       }
25550 +                       case MII_ADVERTISE:
25551 +                               mii_if->advertising = val;
25552 +                               break;
25553 +                       default:
25554 +                               /* do nothing */
25555 +                               break;
25556 +                       }
25557 +               }
25558 +
25559 +               mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
25560 +                                  mii_data->reg_num, val);
25561 +               break;
25562 +       }
25563 +
25564 +       default:
25565 +               rc = -EOPNOTSUPP;
25566 +               break;
25567 +       }
25568 +
25569 +       if ((rc == 0) && (duplex_chg_out) && (duplex_changed))
25570 +               *duplex_chg_out = 1;
25571 +
25572 +       return rc;
25573 +}
25574 +