2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
18 #include <linux/config.h>
19 #include <linux/version.h>
21 #if (LINUX_VERSION_CODE < 0x020500)
22 #if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
24 #include <linux/modversions.h>
27 #include <linux/module.h>
28 #if (LINUX_VERSION_CODE >= 0x20600)
29 #include <linux/moduleparam.h>
31 #include <linux/kernel.h>
32 #include <linux/types.h>
33 #include <linux/compiler.h>
34 #include <linux/slab.h>
35 #include <linux/delay.h>
37 #include <linux/init.h>
38 #include <linux/ioport.h>
39 #include <linux/pci.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/skbuff.h>
43 #include <linux/ethtool.h>
44 #include <linux/mii.h>
45 #include <linux/if_vlan.h>
47 #include <linux/tcp.h>
48 #if (LINUX_VERSION_CODE >= 0x20600)
49 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #if (LINUX_VERSION_CODE >= 0x020600)
53 #include <linux/dma-mapping.h>
55 #include <linux/bitops.h>
57 #include <net/checksum.h>
59 #include <asm/system.h>
61 #include <asm/byteorder.h>
62 #include <asm/uaccess.h>
65 #include <asm/idprom.h>
66 #include <asm/oplib.h>
70 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
71 #define TG3_VLAN_TAG_USED 1
73 #define TG3_VLAN_TAG_USED 0
77 #define TG3_TSO_SUPPORT 1
79 #define TG3_TSO_SUPPORT 0
84 #define DRV_MODULE_NAME "tg3"
85 #define PFX DRV_MODULE_NAME ": "
86 #define DRV_MODULE_VERSION "3.66f"
87 #define DRV_MODULE_RELDATE "September 1, 2006"
89 #define TG3_DEF_MAC_MODE 0
90 #define TG3_DEF_RX_MODE 0
91 #define TG3_DEF_TX_MODE 0
92 #define TG3_DEF_MSG_ENABLE \
102 /* length of time before we decide the hardware is borked,
103 * and dev->tx_timeout() should be called to fix the problem
105 #define TG3_TX_TIMEOUT (5 * HZ)
107 /* hardware minimum and maximum for a single frame's data payload */
108 #define TG3_MIN_MTU 60
109 #define TG3_MAX_MTU(tp) \
110 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
112 /* These numbers seem to be hard coded in the NIC firmware somehow.
113 * You can't change the ring sizes, but you can change where you place
114 * them in the NIC onboard memory.
116 #define TG3_RX_RING_SIZE 512
117 #define TG3_DEF_RX_RING_PENDING 200
118 #define TG3_RX_JUMBO_RING_SIZE 256
119 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
121 /* Do not place this n-ring entries value into the tp struct itself,
122 * we really want to expose these constants to GCC so that modulo et
123 * al. operations are done with shifts and masks instead of with
124 * hw multiply/modulo instructions. Another solution would be to
125 * replace things like '% foo' with '& (foo - 1)'.
127 #define TG3_RX_RCB_RING_SIZE(tp) \
128 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
130 #define TG3_TX_RING_SIZE 512
131 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
133 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
135 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
136 TG3_RX_JUMBO_RING_SIZE)
137 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
138 TG3_RX_RCB_RING_SIZE(tp))
139 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
141 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
143 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
144 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
146 /* minimum number of free TX descriptors required to wake up TX process */
147 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
149 /* number of ETHTOOL_GSTATS u64's */
150 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
152 #define TG3_NUM_TEST 6
154 static char version[] __devinitdata =
155 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
157 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
158 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
159 MODULE_LICENSE("GPL");
160 MODULE_VERSION(DRV_MODULE_VERSION);
162 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
163 #if (LINUX_VERSION_CODE >= 0x20600)
164 module_param(tg3_debug, int, 0);
165 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
168 static struct pci_device_id tg3_pci_tbl[] = {
169 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
170 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
174 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
176 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
178 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
182 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
184 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
186 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
188 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
190 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
226 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
230 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
232 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
243 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
244 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
245 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
246 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
248 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
249 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
250 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
251 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
252 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
253 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
254 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
255 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
256 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
257 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
258 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
259 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
260 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
261 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
262 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
263 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
264 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
265 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
266 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
267 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
268 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
269 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
270 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
271 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
272 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
273 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
274 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
275 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
276 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
277 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
278 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
282 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
285 const char string[ETH_GSTRING_LEN];
286 } ethtool_stats_keys[TG3_NUM_STATS] = {
289 { "rx_ucast_packets" },
290 { "rx_mcast_packets" },
291 { "rx_bcast_packets" },
293 { "rx_align_errors" },
294 { "rx_xon_pause_rcvd" },
295 { "rx_xoff_pause_rcvd" },
296 { "rx_mac_ctrl_rcvd" },
297 { "rx_xoff_entered" },
298 { "rx_frame_too_long_errors" },
300 { "rx_undersize_packets" },
301 { "rx_in_length_errors" },
302 { "rx_out_length_errors" },
303 { "rx_64_or_less_octet_packets" },
304 { "rx_65_to_127_octet_packets" },
305 { "rx_128_to_255_octet_packets" },
306 { "rx_256_to_511_octet_packets" },
307 { "rx_512_to_1023_octet_packets" },
308 { "rx_1024_to_1522_octet_packets" },
309 { "rx_1523_to_2047_octet_packets" },
310 { "rx_2048_to_4095_octet_packets" },
311 { "rx_4096_to_8191_octet_packets" },
312 { "rx_8192_to_9022_octet_packets" },
319 { "tx_flow_control" },
321 { "tx_single_collisions" },
322 { "tx_mult_collisions" },
324 { "tx_excessive_collisions" },
325 { "tx_late_collisions" },
326 { "tx_collide_2times" },
327 { "tx_collide_3times" },
328 { "tx_collide_4times" },
329 { "tx_collide_5times" },
330 { "tx_collide_6times" },
331 { "tx_collide_7times" },
332 { "tx_collide_8times" },
333 { "tx_collide_9times" },
334 { "tx_collide_10times" },
335 { "tx_collide_11times" },
336 { "tx_collide_12times" },
337 { "tx_collide_13times" },
338 { "tx_collide_14times" },
339 { "tx_collide_15times" },
340 { "tx_ucast_packets" },
341 { "tx_mcast_packets" },
342 { "tx_bcast_packets" },
343 { "tx_carrier_sense_errors" },
347 { "dma_writeq_full" },
348 { "dma_write_prioq_full" },
352 { "rx_threshold_hit" },
354 { "dma_readq_full" },
355 { "dma_read_prioq_full" },
356 { "tx_comp_queue_full" },
358 { "ring_set_send_prod_index" },
359 { "ring_status_update" },
361 { "nic_avoided_irqs" },
362 { "nic_tx_threshold_hit" }
366 const char string[ETH_GSTRING_LEN];
367 } ethtool_test_keys[TG3_NUM_TEST] = {
368 { "nvram test (online) " },
369 { "link test (online) " },
370 { "register test (offline)" },
371 { "memory test (offline)" },
372 { "loopback test (offline)" },
373 { "interrupt test (offline)" },
376 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
378 writel(val, tp->regs + off);
381 static u32 tg3_read32(struct tg3 *tp, u32 off)
383 return (readl(tp->regs + off));
386 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
390 spin_lock_irqsave(&tp->indirect_lock, flags);
391 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
392 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
393 spin_unlock_irqrestore(&tp->indirect_lock, flags);
396 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
398 writel(val, tp->regs + off);
399 readl(tp->regs + off);
402 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
407 spin_lock_irqsave(&tp->indirect_lock, flags);
408 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
409 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
410 spin_unlock_irqrestore(&tp->indirect_lock, flags);
414 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
418 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
419 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
420 TG3_64BIT_REG_LOW, val);
423 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
424 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
425 TG3_64BIT_REG_LOW, val);
429 spin_lock_irqsave(&tp->indirect_lock, flags);
430 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
431 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
432 spin_unlock_irqrestore(&tp->indirect_lock, flags);
434 /* In indirect mode when disabling interrupts, we also need
435 * to clear the interrupt bit in the GRC local ctrl register.
437 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
439 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
440 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
444 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
449 spin_lock_irqsave(&tp->indirect_lock, flags);
450 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
451 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
452 spin_unlock_irqrestore(&tp->indirect_lock, flags);
456 /* usec_wait specifies the wait time in usec when writing to certain registers
457 * where it is unsafe to read back the register without some delay.
458 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
459 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
461 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
463 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
464 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
465 /* Non-posted methods */
466 tp->write32(tp, off, val);
469 tg3_write32(tp, off, val);
474 /* Wait again after the read for the posted method to guarantee that
475 * the wait time is met.
481 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
483 tp->write32_mbox(tp, off, val);
484 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
485 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
486 tp->read32_mbox(tp, off);
489 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
491 void __iomem *mbox = tp->regs + off;
493 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
495 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
499 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
500 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
501 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
502 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
503 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
505 #define tw32(reg,val) tp->write32(tp, reg, val)
506 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
507 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
508 #define tr32(reg) tp->read32(tp, reg)
510 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
514 spin_lock_irqsave(&tp->indirect_lock, flags);
515 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
516 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
517 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
519 /* Always leave this as zero. */
520 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
522 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
523 tw32_f(TG3PCI_MEM_WIN_DATA, val);
525 /* Always leave this as zero. */
526 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
528 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
535 spin_lock_irqsave(&tp->indirect_lock, flags);
536 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
537 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
538 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
540 /* Always leave this as zero. */
541 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
543 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
544 *val = tr32(TG3PCI_MEM_WIN_DATA);
546 /* Always leave this as zero. */
547 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
552 static void tg3_disable_ints(struct tg3 *tp)
554 tw32(TG3PCI_MISC_HOST_CTRL,
555 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
556 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
559 static inline void tg3_cond_int(struct tg3 *tp)
561 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
562 (tp->hw_status->status & SD_STATUS_UPDATED))
563 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
565 tw32(HOSTCC_MODE, tp->coalesce_mode |
566 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
569 static void tg3_enable_ints(struct tg3 *tp)
574 tw32(TG3PCI_MISC_HOST_CTRL,
575 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
576 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
577 (tp->last_tag << 24));
578 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
579 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
580 (tp->last_tag << 24));
584 static inline unsigned int tg3_has_work(struct tg3 *tp)
586 struct tg3_hw_status *sblk = tp->hw_status;
587 unsigned int work_exists = 0;
589 /* check for phy events */
590 if (!(tp->tg3_flags &
591 (TG3_FLAG_USE_LINKCHG_REG |
592 TG3_FLAG_POLL_SERDES))) {
593 if (sblk->status & SD_STATUS_LINK_CHG)
596 /* check for RX/TX work to do */
597 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
598 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
605 * similar to tg3_enable_ints, but it accurately determines whether there
606 * is new work pending and can return without flushing the PIO write
607 * which reenables interrupts
609 static void tg3_restart_ints(struct tg3 *tp)
611 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
615 /* When doing tagged status, this work check is unnecessary.
616 * The last_tag we write above tells the chip which piece of
617 * work we've completed.
619 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
621 tw32(HOSTCC_MODE, tp->coalesce_mode |
622 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
625 static inline void tg3_netif_stop(struct tg3 *tp)
627 tp->dev->trans_start = jiffies; /* prevent tx timeout */
628 netif_poll_disable(tp->dev);
629 netif_tx_disable(tp->dev);
632 static inline void tg3_netif_start(struct tg3 *tp)
634 netif_wake_queue(tp->dev);
635 /* NOTE: unconditional netif_wake_queue is only appropriate
636 * so long as all callers are assured to have free tx slots
637 * (such as after tg3_init_hw)
639 netif_poll_enable(tp->dev);
640 tp->hw_status->status |= SD_STATUS_UPDATED;
644 static void tg3_switch_clocks(struct tg3 *tp)
646 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
649 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
652 orig_clock_ctrl = clock_ctrl;
653 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
654 CLOCK_CTRL_CLKRUN_OENABLE |
656 tp->pci_clock_ctrl = clock_ctrl;
658 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
659 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
660 tw32_wait_f(TG3PCI_CLOCK_CTRL,
661 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
663 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
664 tw32_wait_f(TG3PCI_CLOCK_CTRL,
666 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
668 tw32_wait_f(TG3PCI_CLOCK_CTRL,
669 clock_ctrl | (CLOCK_CTRL_ALTCLK),
672 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
675 #define PHY_BUSY_LOOPS 5000
677 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
683 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
685 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
691 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
692 MI_COM_PHY_ADDR_MASK);
693 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
694 MI_COM_REG_ADDR_MASK);
695 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
697 tw32_f(MAC_MI_COM, frame_val);
699 loops = PHY_BUSY_LOOPS;
702 frame_val = tr32(MAC_MI_COM);
704 if ((frame_val & MI_COM_BUSY) == 0) {
706 frame_val = tr32(MAC_MI_COM);
714 *val = frame_val & MI_COM_DATA_MASK;
718 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
719 tw32_f(MAC_MI_MODE, tp->mi_mode);
726 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
732 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
734 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
738 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
739 MI_COM_PHY_ADDR_MASK);
740 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
741 MI_COM_REG_ADDR_MASK);
742 frame_val |= (val & MI_COM_DATA_MASK);
743 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
745 tw32_f(MAC_MI_COM, frame_val);
747 loops = PHY_BUSY_LOOPS;
750 frame_val = tr32(MAC_MI_COM);
751 if ((frame_val & MI_COM_BUSY) == 0) {
753 frame_val = tr32(MAC_MI_COM);
763 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
764 tw32_f(MAC_MI_MODE, tp->mi_mode);
771 static void tg3_phy_set_wirespeed(struct tg3 *tp)
775 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
778 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
779 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
780 tg3_writephy(tp, MII_TG3_AUX_CTRL,
781 (val | (1 << 15) | (1 << 4)));
784 static int tg3_bmcr_reset(struct tg3 *tp)
789 /* OK, reset it, and poll the BMCR_RESET bit until it
790 * clears or we time out.
792 phy_control = BMCR_RESET;
793 err = tg3_writephy(tp, MII_BMCR, phy_control);
799 err = tg3_readphy(tp, MII_BMCR, &phy_control);
803 if ((phy_control & BMCR_RESET) == 0) {
815 static int tg3_wait_macro_done(struct tg3 *tp)
822 if (!tg3_readphy(tp, 0x16, &tmp32)) {
823 if ((tmp32 & 0x1000) == 0)
833 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
835 static const u32 test_pat[4][6] = {
836 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
837 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
838 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
839 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
843 for (chan = 0; chan < 4; chan++) {
846 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
847 (chan * 0x2000) | 0x0200);
848 tg3_writephy(tp, 0x16, 0x0002);
850 for (i = 0; i < 6; i++)
851 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
854 tg3_writephy(tp, 0x16, 0x0202);
855 if (tg3_wait_macro_done(tp)) {
860 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
861 (chan * 0x2000) | 0x0200);
862 tg3_writephy(tp, 0x16, 0x0082);
863 if (tg3_wait_macro_done(tp)) {
868 tg3_writephy(tp, 0x16, 0x0802);
869 if (tg3_wait_macro_done(tp)) {
874 for (i = 0; i < 6; i += 2) {
877 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
878 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
879 tg3_wait_macro_done(tp)) {
885 if (low != test_pat[chan][i] ||
886 high != test_pat[chan][i+1]) {
887 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
888 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
889 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
899 static int tg3_phy_reset_chanpat(struct tg3 *tp)
903 for (chan = 0; chan < 4; chan++) {
906 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
907 (chan * 0x2000) | 0x0200);
908 tg3_writephy(tp, 0x16, 0x0002);
909 for (i = 0; i < 6; i++)
910 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
911 tg3_writephy(tp, 0x16, 0x0202);
912 if (tg3_wait_macro_done(tp))
919 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
921 u32 reg32, phy9_orig;
922 int retries, do_phy_reset, err;
928 err = tg3_bmcr_reset(tp);
934 /* Disable transmitter and interrupt. */
935 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
939 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
941 /* Set full-duplex, 1000 mbps. */
942 tg3_writephy(tp, MII_BMCR,
943 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
945 /* Set to master mode. */
946 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
949 tg3_writephy(tp, MII_TG3_CTRL,
950 (MII_TG3_CTRL_AS_MASTER |
951 MII_TG3_CTRL_ENABLE_AS_MASTER));
953 /* Enable SM_DSP_CLOCK and 6dB. */
954 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
956 /* Block the PHY control access. */
957 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
958 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
960 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
965 err = tg3_phy_reset_chanpat(tp);
969 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
970 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
972 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
973 tg3_writephy(tp, 0x16, 0x0000);
975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
977 /* Set Extended packet length bit for jumbo frames */
978 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
981 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
984 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
986 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
988 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
995 static void tg3_link_report(struct tg3 *);
997 /* This will reset the tigon3 PHY if there is no valid
998 * link unless the FORCE argument is non-zero.
1000 static int tg3_phy_reset(struct tg3 *tp)
1005 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1006 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1010 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1011 netif_carrier_off(tp->dev);
1012 tg3_link_report(tp);
1015 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1017 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1018 err = tg3_phy_reset_5703_4_5(tp);
1024 err = tg3_bmcr_reset(tp);
1029 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1030 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1033 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1034 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1035 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1037 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1038 tg3_writephy(tp, 0x1c, 0x8d68);
1039 tg3_writephy(tp, 0x1c, 0x8d68);
1041 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1042 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1043 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1044 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1045 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1046 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1048 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1049 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1051 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1053 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1054 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1055 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1057 /* Set Extended packet length bit (bit 14) on all chips that */
1058 /* support jumbo frames */
1059 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1060 /* Cannot do read-modify-write on 5401 */
1061 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1062 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1065 /* Set bit 14 with read-modify-write to preserve other bits */
1066 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1067 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1068 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1071 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1072 * jumbo frames transmission.
1074 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1077 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1078 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1079 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1082 tg3_phy_set_wirespeed(tp);
1086 static void tg3_frob_aux_power(struct tg3 *tp)
1088 struct tg3 *tp_peer = tp;
1090 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1093 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1094 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1095 struct net_device *dev_peer;
1097 dev_peer = pci_get_drvdata(tp->pdev_peer);
1098 /* remove_one() may have been run on the peer. */
1102 tp_peer = netdev_priv(dev_peer);
1105 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1106 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1107 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1108 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1110 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1111 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1112 (GRC_LCLCTRL_GPIO_OE0 |
1113 GRC_LCLCTRL_GPIO_OE1 |
1114 GRC_LCLCTRL_GPIO_OE2 |
1115 GRC_LCLCTRL_GPIO_OUTPUT0 |
1116 GRC_LCLCTRL_GPIO_OUTPUT1),
1120 u32 grc_local_ctrl = 0;
1122 if (tp_peer != tp &&
1123 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1126 /* Workaround to prevent overdrawing Amps. */
1127 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1129 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1130 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131 grc_local_ctrl, 100);
1134 /* On 5753 and variants, GPIO2 cannot be used. */
1135 no_gpio2 = tp->nic_sram_data_cfg &
1136 NIC_SRAM_DATA_CFG_NO_GPIO2;
1138 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1139 GRC_LCLCTRL_GPIO_OE1 |
1140 GRC_LCLCTRL_GPIO_OE2 |
1141 GRC_LCLCTRL_GPIO_OUTPUT1 |
1142 GRC_LCLCTRL_GPIO_OUTPUT2;
1144 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1145 GRC_LCLCTRL_GPIO_OUTPUT2);
1147 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1148 grc_local_ctrl, 100);
1150 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1152 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1153 grc_local_ctrl, 100);
1156 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1157 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1158 grc_local_ctrl, 100);
1162 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1163 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1164 if (tp_peer != tp &&
1165 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1168 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1169 (GRC_LCLCTRL_GPIO_OE1 |
1170 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1172 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1173 GRC_LCLCTRL_GPIO_OE1, 100);
1175 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1176 (GRC_LCLCTRL_GPIO_OE1 |
1177 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1182 static int tg3_setup_phy(struct tg3 *, int);
1184 #define RESET_KIND_SHUTDOWN 0
1185 #define RESET_KIND_INIT 1
1186 #define RESET_KIND_SUSPEND 2
1188 static void tg3_write_sig_post_reset(struct tg3 *, int);
1189 static int tg3_halt_cpu(struct tg3 *, u32);
1190 static int tg3_nvram_lock(struct tg3 *);
1191 static void tg3_nvram_unlock(struct tg3 *);
1193 static void tg3_power_down_phy(struct tg3 *tp)
1195 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
1198 tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1199 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1201 /* The PHY should not be powered down on some chips because
1204 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1205 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1206 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1207 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1209 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1212 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1215 u16 power_control, power_caps;
1216 int pm = tp->pm_cap;
1218 /* Make sure register accesses (indirect or otherwise)
1219 * will function correctly.
1221 pci_write_config_dword(tp->pdev,
1222 TG3PCI_MISC_HOST_CTRL,
1223 tp->misc_host_ctrl);
1225 pci_read_config_word(tp->pdev,
1228 power_control |= PCI_PM_CTRL_PME_STATUS;
1229 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1233 pci_write_config_word(tp->pdev,
1236 udelay(100); /* Delay after power state change */
1238 /* Switch out of Vaux if it is not a LOM */
1239 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1240 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1257 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1259 tp->dev->name, state);
1263 power_control |= PCI_PM_CTRL_PME_ENABLE;
1265 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1266 tw32(TG3PCI_MISC_HOST_CTRL,
1267 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1269 if (tp->link_config.phy_is_low_power == 0) {
1270 tp->link_config.phy_is_low_power = 1;
1271 tp->link_config.orig_speed = tp->link_config.speed;
1272 tp->link_config.orig_duplex = tp->link_config.duplex;
1273 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1276 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1277 tp->link_config.speed = SPEED_10;
1278 tp->link_config.duplex = DUPLEX_HALF;
1279 tp->link_config.autoneg = AUTONEG_ENABLE;
1280 tg3_setup_phy(tp, 0);
1283 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1287 for (i = 0; i < 200; i++) {
1288 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1289 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1291 #if (LINUX_VERSION_CODE < 0x20607)
1292 set_current_state(TASK_UNINTERRUPTIBLE);
1293 schedule_timeout(HZ / 1000);
1299 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1300 WOL_DRV_STATE_SHUTDOWN |
1301 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1303 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1305 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1308 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1309 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1312 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1313 mac_mode = MAC_MODE_PORT_MODE_GMII;
1315 mac_mode = MAC_MODE_PORT_MODE_MII;
1317 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1318 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1319 mac_mode |= MAC_MODE_LINK_POLARITY;
1321 mac_mode = MAC_MODE_PORT_MODE_TBI;
1324 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1325 tw32(MAC_LED_CTRL, tp->led_ctrl);
1327 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1328 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1329 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1331 tw32_f(MAC_MODE, mac_mode);
1334 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1338 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1339 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1340 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1343 base_val = tp->pci_clock_ctrl;
1344 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1345 CLOCK_CTRL_TXCLK_DISABLE);
1347 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1348 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1349 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1351 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1352 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1353 u32 newbits1, newbits2;
1355 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1356 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1357 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1358 CLOCK_CTRL_TXCLK_DISABLE |
1360 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1361 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1362 newbits1 = CLOCK_CTRL_625_CORE;
1363 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1365 newbits1 = CLOCK_CTRL_ALTCLK;
1366 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1369 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1372 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1375 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1379 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1380 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1381 CLOCK_CTRL_TXCLK_DISABLE |
1382 CLOCK_CTRL_44MHZ_CORE);
1384 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1387 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1388 tp->pci_clock_ctrl | newbits3, 40);
1392 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1393 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1394 /* Turn off the PHY */
1395 tg3_power_down_phy(tp);
1397 tg3_frob_aux_power(tp);
1399 /* Workaround for unstable PLL clock */
1400 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1401 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1402 u32 val = tr32(0x7d00);
1404 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1406 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1409 err = tg3_nvram_lock(tp);
1410 tg3_halt_cpu(tp, RX_CPU_BASE);
1412 tg3_nvram_unlock(tp);
1416 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1418 /* Finally, set the new power state. */
1419 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1420 udelay(100); /* Delay after power state change */
1425 static void tg3_link_report(struct tg3 *tp)
1427 if (!netif_carrier_ok(tp->dev)) {
1428 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1430 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1432 (tp->link_config.active_speed == SPEED_1000 ?
1434 (tp->link_config.active_speed == SPEED_100 ?
1436 (tp->link_config.active_duplex == DUPLEX_FULL ?
1439 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1442 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1443 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1447 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1449 u32 new_tg3_flags = 0;
1450 u32 old_rx_mode = tp->rx_mode;
1451 u32 old_tx_mode = tp->tx_mode;
1453 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1455 /* Convert 1000BaseX flow control bits to 1000BaseT
1456 * bits before resolving flow control.
1458 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1459 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1460 ADVERTISE_PAUSE_ASYM);
1461 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1463 if (local_adv & ADVERTISE_1000XPAUSE)
1464 local_adv |= ADVERTISE_PAUSE_CAP;
1465 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1466 local_adv |= ADVERTISE_PAUSE_ASYM;
1467 if (remote_adv & LPA_1000XPAUSE)
1468 remote_adv |= LPA_PAUSE_CAP;
1469 if (remote_adv & LPA_1000XPAUSE_ASYM)
1470 remote_adv |= LPA_PAUSE_ASYM;
1473 if (local_adv & ADVERTISE_PAUSE_CAP) {
1474 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1475 if (remote_adv & LPA_PAUSE_CAP)
1477 (TG3_FLAG_RX_PAUSE |
1479 else if (remote_adv & LPA_PAUSE_ASYM)
1481 (TG3_FLAG_RX_PAUSE);
1483 if (remote_adv & LPA_PAUSE_CAP)
1485 (TG3_FLAG_RX_PAUSE |
1488 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1489 if ((remote_adv & LPA_PAUSE_CAP) &&
1490 (remote_adv & LPA_PAUSE_ASYM))
1491 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1494 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1495 tp->tg3_flags |= new_tg3_flags;
1497 new_tg3_flags = tp->tg3_flags;
1500 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1501 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1503 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1505 if (old_rx_mode != tp->rx_mode) {
1506 tw32_f(MAC_RX_MODE, tp->rx_mode);
1509 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1510 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1512 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1514 if (old_tx_mode != tp->tx_mode) {
1515 tw32_f(MAC_TX_MODE, tp->tx_mode);
1519 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1521 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1522 case MII_TG3_AUX_STAT_10HALF:
1524 *duplex = DUPLEX_HALF;
1527 case MII_TG3_AUX_STAT_10FULL:
1529 *duplex = DUPLEX_FULL;
1532 case MII_TG3_AUX_STAT_100HALF:
1534 *duplex = DUPLEX_HALF;
1537 case MII_TG3_AUX_STAT_100FULL:
1539 *duplex = DUPLEX_FULL;
1542 case MII_TG3_AUX_STAT_1000HALF:
1543 *speed = SPEED_1000;
1544 *duplex = DUPLEX_HALF;
1547 case MII_TG3_AUX_STAT_1000FULL:
1548 *speed = SPEED_1000;
1549 *duplex = DUPLEX_FULL;
1553 *speed = SPEED_INVALID;
1554 *duplex = DUPLEX_INVALID;
1559 static void tg3_phy_copper_begin(struct tg3 *tp)
1564 if (tp->link_config.phy_is_low_power) {
1565 /* Entering low power mode. Disable gigabit and
1566 * 100baseT advertisements.
1568 tg3_writephy(tp, MII_TG3_CTRL, 0);
1570 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1571 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1572 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1573 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1575 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1576 } else if (tp->link_config.speed == SPEED_INVALID) {
1577 tp->link_config.advertising =
1578 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1579 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1580 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1581 ADVERTISED_Autoneg | ADVERTISED_MII);
1583 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1584 tp->link_config.advertising &=
1585 ~(ADVERTISED_1000baseT_Half |
1586 ADVERTISED_1000baseT_Full);
1588 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1589 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1590 new_adv |= ADVERTISE_10HALF;
1591 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1592 new_adv |= ADVERTISE_10FULL;
1593 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1594 new_adv |= ADVERTISE_100HALF;
1595 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1596 new_adv |= ADVERTISE_100FULL;
1597 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1599 if (tp->link_config.advertising &
1600 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1602 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1603 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1604 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1605 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1606 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1607 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1608 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1609 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1610 MII_TG3_CTRL_ENABLE_AS_MASTER);
1611 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1613 tg3_writephy(tp, MII_TG3_CTRL, 0);
1616 /* Asking for a specific link mode. */
1617 if (tp->link_config.speed == SPEED_1000) {
1618 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1619 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1621 if (tp->link_config.duplex == DUPLEX_FULL)
1622 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1624 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1625 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1626 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1627 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1628 MII_TG3_CTRL_ENABLE_AS_MASTER);
1629 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1631 tg3_writephy(tp, MII_TG3_CTRL, 0);
1633 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1634 if (tp->link_config.speed == SPEED_100) {
1635 if (tp->link_config.duplex == DUPLEX_FULL)
1636 new_adv |= ADVERTISE_100FULL;
1638 new_adv |= ADVERTISE_100HALF;
1640 if (tp->link_config.duplex == DUPLEX_FULL)
1641 new_adv |= ADVERTISE_10FULL;
1643 new_adv |= ADVERTISE_10HALF;
1645 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1649 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1650 tp->link_config.speed != SPEED_INVALID) {
1651 u32 bmcr, orig_bmcr;
1653 tp->link_config.active_speed = tp->link_config.speed;
1654 tp->link_config.active_duplex = tp->link_config.duplex;
1657 switch (tp->link_config.speed) {
1663 bmcr |= BMCR_SPEED100;
1667 bmcr |= TG3_BMCR_SPEED1000;
1671 if (tp->link_config.duplex == DUPLEX_FULL)
1672 bmcr |= BMCR_FULLDPLX;
1674 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1675 (bmcr != orig_bmcr)) {
1676 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1677 for (i = 0; i < 1500; i++) {
1681 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1682 tg3_readphy(tp, MII_BMSR, &tmp))
1684 if (!(tmp & BMSR_LSTATUS)) {
1689 tg3_writephy(tp, MII_BMCR, bmcr);
1693 tg3_writephy(tp, MII_BMCR,
1694 BMCR_ANENABLE | BMCR_ANRESTART);
1698 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1702 /* Turn off tap power management. */
1703 /* Set Extended packet length bit */
1704 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1706 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1707 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1709 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1710 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1712 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1713 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1715 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1716 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1718 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1719 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1726 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1728 u32 adv_reg, all_mask;
1730 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1733 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1734 ADVERTISE_100HALF | ADVERTISE_100FULL);
1735 if ((adv_reg & all_mask) != all_mask)
1737 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1740 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1743 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1744 MII_TG3_CTRL_ADV_1000_FULL);
1745 if ((tg3_ctrl & all_mask) != all_mask)
1751 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1753 int current_link_up;
1762 (MAC_STATUS_SYNC_CHANGED |
1763 MAC_STATUS_CFG_CHANGED |
1764 MAC_STATUS_MI_COMPLETION |
1765 MAC_STATUS_LNKSTATE_CHANGED));
1768 tp->mi_mode = MAC_MI_MODE_BASE;
1769 tw32_f(MAC_MI_MODE, tp->mi_mode);
1772 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1774 /* Some third-party PHYs need to be reset on link going
1777 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1778 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1780 netif_carrier_ok(tp->dev)) {
1781 tg3_readphy(tp, MII_BMSR, &bmsr);
1782 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1783 !(bmsr & BMSR_LSTATUS))
1789 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1790 tg3_readphy(tp, MII_BMSR, &bmsr);
1791 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1792 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1795 if (!(bmsr & BMSR_LSTATUS)) {
1796 err = tg3_init_5401phy_dsp(tp);
1800 tg3_readphy(tp, MII_BMSR, &bmsr);
1801 for (i = 0; i < 1000; i++) {
1803 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1804 (bmsr & BMSR_LSTATUS)) {
1810 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1811 !(bmsr & BMSR_LSTATUS) &&
1812 tp->link_config.active_speed == SPEED_1000) {
1813 err = tg3_phy_reset(tp);
1815 err = tg3_init_5401phy_dsp(tp);
1820 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1821 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1822 /* 5701 {A0,B0} CRC bug workaround */
1823 tg3_writephy(tp, 0x15, 0x0a75);
1824 tg3_writephy(tp, 0x1c, 0x8c68);
1825 tg3_writephy(tp, 0x1c, 0x8d68);
1826 tg3_writephy(tp, 0x1c, 0x8c68);
1829 /* Clear pending interrupts... */
1830 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1831 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1833 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1834 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1836 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1839 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1840 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1841 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1842 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1844 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1847 current_link_up = 0;
1848 current_speed = SPEED_INVALID;
1849 current_duplex = DUPLEX_INVALID;
1851 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1854 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1855 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1856 if (!(val & (1 << 10))) {
1858 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1864 for (i = 0; i < 100; i++) {
1865 tg3_readphy(tp, MII_BMSR, &bmsr);
1866 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1867 (bmsr & BMSR_LSTATUS))
1872 if (bmsr & BMSR_LSTATUS) {
1875 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1876 for (i = 0; i < 2000; i++) {
1878 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1883 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1888 for (i = 0; i < 200; i++) {
1889 tg3_readphy(tp, MII_BMCR, &bmcr);
1890 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1892 if (bmcr && bmcr != 0x7fff)
1897 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1898 if (bmcr & BMCR_ANENABLE) {
1899 current_link_up = 1;
1901 /* Force autoneg restart if we are exiting
1904 if (!tg3_copper_is_advertising_all(tp))
1905 current_link_up = 0;
1907 current_link_up = 0;
1910 if (!(bmcr & BMCR_ANENABLE) &&
1911 tp->link_config.speed == current_speed &&
1912 tp->link_config.duplex == current_duplex) {
1913 current_link_up = 1;
1915 current_link_up = 0;
1919 tp->link_config.active_speed = current_speed;
1920 tp->link_config.active_duplex = current_duplex;
1923 if (current_link_up == 1 &&
1924 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1925 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1926 u32 local_adv, remote_adv;
1928 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1930 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1932 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1935 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1937 /* If we are not advertising full pause capability,
1938 * something is wrong. Bring the link down and reconfigure.
1940 if (local_adv != ADVERTISE_PAUSE_CAP) {
1941 current_link_up = 0;
1943 tg3_setup_flow_control(tp, local_adv, remote_adv);
1947 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1950 tg3_phy_copper_begin(tp);
1952 tg3_readphy(tp, MII_BMSR, &tmp);
1953 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1954 (tmp & BMSR_LSTATUS))
1955 current_link_up = 1;
1958 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1959 if (current_link_up == 1) {
1960 if (tp->link_config.active_speed == SPEED_100 ||
1961 tp->link_config.active_speed == SPEED_10)
1962 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1964 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1966 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1968 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1969 if (tp->link_config.active_duplex == DUPLEX_HALF)
1970 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1972 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1973 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1974 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1975 (current_link_up == 1 &&
1976 tp->link_config.active_speed == SPEED_10))
1977 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1979 if (current_link_up == 1)
1980 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1983 /* ??? Without this setting Netgear GA302T PHY does not
1984 * ??? send/receive packets...
1986 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1987 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1988 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1989 tw32_f(MAC_MI_MODE, tp->mi_mode);
1993 tw32_f(MAC_MODE, tp->mac_mode);
1996 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1997 /* Polled via timer. */
1998 tw32_f(MAC_EVENT, 0);
2000 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2004 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2005 current_link_up == 1 &&
2006 tp->link_config.active_speed == SPEED_1000 &&
2007 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2008 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2011 (MAC_STATUS_SYNC_CHANGED |
2012 MAC_STATUS_CFG_CHANGED));
2015 NIC_SRAM_FIRMWARE_MBOX,
2016 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2019 if (current_link_up != netif_carrier_ok(tp->dev)) {
2020 if (current_link_up)
2021 netif_carrier_on(tp->dev);
2023 netif_carrier_off(tp->dev);
2024 tg3_link_report(tp);
2030 struct tg3_fiber_aneginfo {
2032 #define ANEG_STATE_UNKNOWN 0
2033 #define ANEG_STATE_AN_ENABLE 1
2034 #define ANEG_STATE_RESTART_INIT 2
2035 #define ANEG_STATE_RESTART 3
2036 #define ANEG_STATE_DISABLE_LINK_OK 4
2037 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2038 #define ANEG_STATE_ABILITY_DETECT 6
2039 #define ANEG_STATE_ACK_DETECT_INIT 7
2040 #define ANEG_STATE_ACK_DETECT 8
2041 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2042 #define ANEG_STATE_COMPLETE_ACK 10
2043 #define ANEG_STATE_IDLE_DETECT_INIT 11
2044 #define ANEG_STATE_IDLE_DETECT 12
2045 #define ANEG_STATE_LINK_OK 13
2046 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2047 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2050 #define MR_AN_ENABLE 0x00000001
2051 #define MR_RESTART_AN 0x00000002
2052 #define MR_AN_COMPLETE 0x00000004
2053 #define MR_PAGE_RX 0x00000008
2054 #define MR_NP_LOADED 0x00000010
2055 #define MR_TOGGLE_TX 0x00000020
2056 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2057 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2058 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2059 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2060 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2061 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2062 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2063 #define MR_TOGGLE_RX 0x00002000
2064 #define MR_NP_RX 0x00004000
2066 #define MR_LINK_OK 0x80000000
2068 unsigned long link_time, cur_time;
2070 u32 ability_match_cfg;
2071 int ability_match_count;
2073 char ability_match, idle_match, ack_match;
2075 u32 txconfig, rxconfig;
2076 #define ANEG_CFG_NP 0x00000080
2077 #define ANEG_CFG_ACK 0x00000040
2078 #define ANEG_CFG_RF2 0x00000020
2079 #define ANEG_CFG_RF1 0x00000010
2080 #define ANEG_CFG_PS2 0x00000001
2081 #define ANEG_CFG_PS1 0x00008000
2082 #define ANEG_CFG_HD 0x00004000
2083 #define ANEG_CFG_FD 0x00002000
2084 #define ANEG_CFG_INVAL 0x00001f06
2089 #define ANEG_TIMER_ENAB 2
2090 #define ANEG_FAILED -1
2092 #define ANEG_STATE_SETTLE_TIME 10000
2094 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2095 struct tg3_fiber_aneginfo *ap)
2097 unsigned long delta;
2101 if (ap->state == ANEG_STATE_UNKNOWN) {
2105 ap->ability_match_cfg = 0;
2106 ap->ability_match_count = 0;
2107 ap->ability_match = 0;
2113 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2114 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2116 if (rx_cfg_reg != ap->ability_match_cfg) {
2117 ap->ability_match_cfg = rx_cfg_reg;
2118 ap->ability_match = 0;
2119 ap->ability_match_count = 0;
2121 if (++ap->ability_match_count > 1) {
2122 ap->ability_match = 1;
2123 ap->ability_match_cfg = rx_cfg_reg;
2126 if (rx_cfg_reg & ANEG_CFG_ACK)
2134 ap->ability_match_cfg = 0;
2135 ap->ability_match_count = 0;
2136 ap->ability_match = 0;
2142 ap->rxconfig = rx_cfg_reg;
2146 case ANEG_STATE_UNKNOWN:
2147 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2148 ap->state = ANEG_STATE_AN_ENABLE;
2151 case ANEG_STATE_AN_ENABLE:
2152 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2153 if (ap->flags & MR_AN_ENABLE) {
2156 ap->ability_match_cfg = 0;
2157 ap->ability_match_count = 0;
2158 ap->ability_match = 0;
2162 ap->state = ANEG_STATE_RESTART_INIT;
2164 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2168 case ANEG_STATE_RESTART_INIT:
2169 ap->link_time = ap->cur_time;
2170 ap->flags &= ~(MR_NP_LOADED);
2172 tw32(MAC_TX_AUTO_NEG, 0);
2173 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2174 tw32_f(MAC_MODE, tp->mac_mode);
2177 ret = ANEG_TIMER_ENAB;
2178 ap->state = ANEG_STATE_RESTART;
2181 case ANEG_STATE_RESTART:
2182 delta = ap->cur_time - ap->link_time;
2183 if (delta > ANEG_STATE_SETTLE_TIME) {
2184 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2186 ret = ANEG_TIMER_ENAB;
2190 case ANEG_STATE_DISABLE_LINK_OK:
2194 case ANEG_STATE_ABILITY_DETECT_INIT:
2195 ap->flags &= ~(MR_TOGGLE_TX);
2196 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2197 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2198 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2199 tw32_f(MAC_MODE, tp->mac_mode);
2202 ap->state = ANEG_STATE_ABILITY_DETECT;
2205 case ANEG_STATE_ABILITY_DETECT:
2206 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2207 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2211 case ANEG_STATE_ACK_DETECT_INIT:
2212 ap->txconfig |= ANEG_CFG_ACK;
2213 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2214 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2215 tw32_f(MAC_MODE, tp->mac_mode);
2218 ap->state = ANEG_STATE_ACK_DETECT;
2221 case ANEG_STATE_ACK_DETECT:
2222 if (ap->ack_match != 0) {
2223 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2224 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2225 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2227 ap->state = ANEG_STATE_AN_ENABLE;
2229 } else if (ap->ability_match != 0 &&
2230 ap->rxconfig == 0) {
2231 ap->state = ANEG_STATE_AN_ENABLE;
2235 case ANEG_STATE_COMPLETE_ACK_INIT:
2236 if (ap->rxconfig & ANEG_CFG_INVAL) {
2240 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2241 MR_LP_ADV_HALF_DUPLEX |
2242 MR_LP_ADV_SYM_PAUSE |
2243 MR_LP_ADV_ASYM_PAUSE |
2244 MR_LP_ADV_REMOTE_FAULT1 |
2245 MR_LP_ADV_REMOTE_FAULT2 |
2246 MR_LP_ADV_NEXT_PAGE |
2249 if (ap->rxconfig & ANEG_CFG_FD)
2250 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2251 if (ap->rxconfig & ANEG_CFG_HD)
2252 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2253 if (ap->rxconfig & ANEG_CFG_PS1)
2254 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2255 if (ap->rxconfig & ANEG_CFG_PS2)
2256 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2257 if (ap->rxconfig & ANEG_CFG_RF1)
2258 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2259 if (ap->rxconfig & ANEG_CFG_RF2)
2260 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2261 if (ap->rxconfig & ANEG_CFG_NP)
2262 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2264 ap->link_time = ap->cur_time;
2266 ap->flags ^= (MR_TOGGLE_TX);
2267 if (ap->rxconfig & 0x0008)
2268 ap->flags |= MR_TOGGLE_RX;
2269 if (ap->rxconfig & ANEG_CFG_NP)
2270 ap->flags |= MR_NP_RX;
2271 ap->flags |= MR_PAGE_RX;
2273 ap->state = ANEG_STATE_COMPLETE_ACK;
2274 ret = ANEG_TIMER_ENAB;
2277 case ANEG_STATE_COMPLETE_ACK:
2278 if (ap->ability_match != 0 &&
2279 ap->rxconfig == 0) {
2280 ap->state = ANEG_STATE_AN_ENABLE;
2283 delta = ap->cur_time - ap->link_time;
2284 if (delta > ANEG_STATE_SETTLE_TIME) {
2285 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2286 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2288 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2289 !(ap->flags & MR_NP_RX)) {
2290 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2298 case ANEG_STATE_IDLE_DETECT_INIT:
2299 ap->link_time = ap->cur_time;
2300 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2301 tw32_f(MAC_MODE, tp->mac_mode);
2304 ap->state = ANEG_STATE_IDLE_DETECT;
2305 ret = ANEG_TIMER_ENAB;
2308 case ANEG_STATE_IDLE_DETECT:
2309 if (ap->ability_match != 0 &&
2310 ap->rxconfig == 0) {
2311 ap->state = ANEG_STATE_AN_ENABLE;
2314 delta = ap->cur_time - ap->link_time;
2315 if (delta > ANEG_STATE_SETTLE_TIME) {
2316 /* XXX another gem from the Broadcom driver :( */
2317 ap->state = ANEG_STATE_LINK_OK;
2321 case ANEG_STATE_LINK_OK:
2322 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2326 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2327 /* ??? unimplemented */
2330 case ANEG_STATE_NEXT_PAGE_WAIT:
2331 /* ??? unimplemented */
2342 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2345 struct tg3_fiber_aneginfo aninfo;
2346 int status = ANEG_FAILED;
2350 tw32_f(MAC_TX_AUTO_NEG, 0);
2352 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2353 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2356 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2359 memset(&aninfo, 0, sizeof(aninfo));
2360 aninfo.flags |= MR_AN_ENABLE;
2361 aninfo.state = ANEG_STATE_UNKNOWN;
2362 aninfo.cur_time = 0;
2364 while (++tick < 195000) {
2365 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2366 if (status == ANEG_DONE || status == ANEG_FAILED)
2372 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2373 tw32_f(MAC_MODE, tp->mac_mode);
2376 *flags = aninfo.flags;
2378 if (status == ANEG_DONE &&
2379 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2380 MR_LP_ADV_FULL_DUPLEX)))
2386 static void tg3_init_bcm8002(struct tg3 *tp)
2388 u32 mac_status = tr32(MAC_STATUS);
2391 /* Reset when initting first time or we have a link. */
2392 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2393 !(mac_status & MAC_STATUS_PCS_SYNCED))
2396 /* Set PLL lock range. */
2397 tg3_writephy(tp, 0x16, 0x8007);
2400 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2402 /* Wait for reset to complete. */
2403 /* XXX schedule_timeout() ... */
2404 for (i = 0; i < 500; i++)
2407 /* Config mode; select PMA/Ch 1 regs. */
2408 tg3_writephy(tp, 0x10, 0x8411);
2410 /* Enable auto-lock and comdet, select txclk for tx. */
2411 tg3_writephy(tp, 0x11, 0x0a10);
2413 tg3_writephy(tp, 0x18, 0x00a0);
2414 tg3_writephy(tp, 0x16, 0x41ff);
2416 /* Assert and deassert POR. */
2417 tg3_writephy(tp, 0x13, 0x0400);
2419 tg3_writephy(tp, 0x13, 0x0000);
2421 tg3_writephy(tp, 0x11, 0x0a50);
2423 tg3_writephy(tp, 0x11, 0x0a10);
2425 /* Wait for signal to stabilize */
2426 /* XXX schedule_timeout() ... */
2427 for (i = 0; i < 15000; i++)
2430 /* Deselect the channel register so we can read the PHYID
2433 tg3_writephy(tp, 0x10, 0x8011);
2436 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2438 u32 sg_dig_ctrl, sg_dig_status;
2439 u32 serdes_cfg, expected_sg_dig_ctrl;
2440 int workaround, port_a;
2441 int current_link_up;
2444 expected_sg_dig_ctrl = 0;
2447 current_link_up = 0;
2449 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2450 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2452 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2455 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2456 /* preserve bits 20-23 for voltage regulator */
2457 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2460 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2462 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2463 if (sg_dig_ctrl & (1 << 31)) {
2465 u32 val = serdes_cfg;
2471 tw32_f(MAC_SERDES_CFG, val);
2473 tw32_f(SG_DIG_CTRL, 0x01388400);
2475 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2476 tg3_setup_flow_control(tp, 0, 0);
2477 current_link_up = 1;
2482 /* Want auto-negotiation. */
2483 expected_sg_dig_ctrl = 0x81388400;
2485 /* Pause capability */
2486 expected_sg_dig_ctrl |= (1 << 11);
2488 /* Asymettric pause */
2489 expected_sg_dig_ctrl |= (1 << 12);
2491 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2494 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2495 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2497 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2499 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2500 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2501 MAC_STATUS_SIGNAL_DET)) {
2504 /* Giver time to negotiate (~200ms) */
2505 for (i = 0; i < 40000; i++) {
2506 sg_dig_status = tr32(SG_DIG_STATUS);
2507 if (sg_dig_status & (0x3))
2511 mac_status = tr32(MAC_STATUS);
2513 if ((sg_dig_status & (1 << 1)) &&
2514 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2515 u32 local_adv, remote_adv;
2517 local_adv = ADVERTISE_PAUSE_CAP;
2519 if (sg_dig_status & (1 << 19))
2520 remote_adv |= LPA_PAUSE_CAP;
2521 if (sg_dig_status & (1 << 20))
2522 remote_adv |= LPA_PAUSE_ASYM;
2524 tg3_setup_flow_control(tp, local_adv, remote_adv);
2525 current_link_up = 1;
2526 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2527 } else if (!(sg_dig_status & (1 << 1))) {
2528 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2529 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2532 u32 val = serdes_cfg;
2539 tw32_f(MAC_SERDES_CFG, val);
2542 tw32_f(SG_DIG_CTRL, 0x01388400);
2545 /* Link parallel detection - link is up */
2546 /* only if we have PCS_SYNC and not */
2547 /* receiving config code words */
2548 mac_status = tr32(MAC_STATUS);
2549 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2550 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2551 tg3_setup_flow_control(tp, 0, 0);
2552 current_link_up = 1;
2554 goto restart_autoneg;
2560 return current_link_up;
2563 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2565 int current_link_up = 0;
2567 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2568 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2572 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2576 if (fiber_autoneg(tp, &flags)) {
2577 u32 local_adv, remote_adv;
2579 local_adv = ADVERTISE_PAUSE_CAP;
2581 if (flags & MR_LP_ADV_SYM_PAUSE)
2582 remote_adv |= LPA_PAUSE_CAP;
2583 if (flags & MR_LP_ADV_ASYM_PAUSE)
2584 remote_adv |= LPA_PAUSE_ASYM;
2586 tg3_setup_flow_control(tp, local_adv, remote_adv);
2588 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2589 current_link_up = 1;
2591 for (i = 0; i < 30; i++) {
2594 (MAC_STATUS_SYNC_CHANGED |
2595 MAC_STATUS_CFG_CHANGED));
2597 if ((tr32(MAC_STATUS) &
2598 (MAC_STATUS_SYNC_CHANGED |
2599 MAC_STATUS_CFG_CHANGED)) == 0)
2603 mac_status = tr32(MAC_STATUS);
2604 if (current_link_up == 0 &&
2605 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2606 !(mac_status & MAC_STATUS_RCVD_CFG))
2607 current_link_up = 1;
2609 /* Forcing 1000FD link up. */
2610 current_link_up = 1;
2611 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2613 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2618 return current_link_up;
2621 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2624 u16 orig_active_speed;
2625 u8 orig_active_duplex;
2627 int current_link_up;
2631 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2632 TG3_FLAG_TX_PAUSE));
2633 orig_active_speed = tp->link_config.active_speed;
2634 orig_active_duplex = tp->link_config.active_duplex;
2636 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2637 netif_carrier_ok(tp->dev) &&
2638 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2639 mac_status = tr32(MAC_STATUS);
2640 mac_status &= (MAC_STATUS_PCS_SYNCED |
2641 MAC_STATUS_SIGNAL_DET |
2642 MAC_STATUS_CFG_CHANGED |
2643 MAC_STATUS_RCVD_CFG);
2644 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2645 MAC_STATUS_SIGNAL_DET)) {
2646 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2647 MAC_STATUS_CFG_CHANGED));
2652 tw32_f(MAC_TX_AUTO_NEG, 0);
2654 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2655 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2656 tw32_f(MAC_MODE, tp->mac_mode);
2659 if (tp->phy_id == PHY_ID_BCM8002)
2660 tg3_init_bcm8002(tp);
2662 /* Enable link change event even when serdes polling. */
2663 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2666 current_link_up = 0;
2667 mac_status = tr32(MAC_STATUS);
2669 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2670 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2672 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2674 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2675 tw32_f(MAC_MODE, tp->mac_mode);
2678 tp->hw_status->status =
2679 (SD_STATUS_UPDATED |
2680 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2682 for (i = 0; i < 100; i++) {
2683 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2684 MAC_STATUS_CFG_CHANGED));
2686 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2687 MAC_STATUS_CFG_CHANGED)) == 0)
2691 mac_status = tr32(MAC_STATUS);
2692 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2693 current_link_up = 0;
2694 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2695 tw32_f(MAC_MODE, (tp->mac_mode |
2696 MAC_MODE_SEND_CONFIGS));
2698 tw32_f(MAC_MODE, tp->mac_mode);
2702 if (current_link_up == 1) {
2703 tp->link_config.active_speed = SPEED_1000;
2704 tp->link_config.active_duplex = DUPLEX_FULL;
2705 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2706 LED_CTRL_LNKLED_OVERRIDE |
2707 LED_CTRL_1000MBPS_ON));
2709 tp->link_config.active_speed = SPEED_INVALID;
2710 tp->link_config.active_duplex = DUPLEX_INVALID;
2711 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2712 LED_CTRL_LNKLED_OVERRIDE |
2713 LED_CTRL_TRAFFIC_OVERRIDE));
2716 if (current_link_up != netif_carrier_ok(tp->dev)) {
2717 if (current_link_up)
2718 netif_carrier_on(tp->dev);
2720 netif_carrier_off(tp->dev);
2721 tg3_link_report(tp);
2724 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2726 if (orig_pause_cfg != now_pause_cfg ||
2727 orig_active_speed != tp->link_config.active_speed ||
2728 orig_active_duplex != tp->link_config.active_duplex)
2729 tg3_link_report(tp);
2735 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2737 int current_link_up, err = 0;
2742 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2743 tw32_f(MAC_MODE, tp->mac_mode);
2749 (MAC_STATUS_SYNC_CHANGED |
2750 MAC_STATUS_CFG_CHANGED |
2751 MAC_STATUS_MI_COMPLETION |
2752 MAC_STATUS_LNKSTATE_CHANGED));
2758 current_link_up = 0;
2759 current_speed = SPEED_INVALID;
2760 current_duplex = DUPLEX_INVALID;
2762 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2763 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2765 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2766 bmsr |= BMSR_LSTATUS;
2768 bmsr &= ~BMSR_LSTATUS;
2771 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2773 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2774 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2775 /* do nothing, just check for link up at the end */
2776 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2779 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2780 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2781 ADVERTISE_1000XPAUSE |
2782 ADVERTISE_1000XPSE_ASYM |
2785 /* Always advertise symmetric PAUSE just like copper */
2786 new_adv |= ADVERTISE_1000XPAUSE;
2788 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2789 new_adv |= ADVERTISE_1000XHALF;
2790 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2791 new_adv |= ADVERTISE_1000XFULL;
2793 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2794 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2795 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2796 tg3_writephy(tp, MII_BMCR, bmcr);
2798 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2799 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2800 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2807 bmcr &= ~BMCR_SPEED1000;
2808 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2810 if (tp->link_config.duplex == DUPLEX_FULL)
2811 new_bmcr |= BMCR_FULLDPLX;
2813 if (new_bmcr != bmcr) {
2814 /* BMCR_SPEED1000 is a reserved bit that needs
2815 * to be set on write.
2817 new_bmcr |= BMCR_SPEED1000;
2819 /* Force a linkdown */
2820 if (netif_carrier_ok(tp->dev)) {
2823 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2824 adv &= ~(ADVERTISE_1000XFULL |
2825 ADVERTISE_1000XHALF |
2827 tg3_writephy(tp, MII_ADVERTISE, adv);
2828 tg3_writephy(tp, MII_BMCR, bmcr |
2832 netif_carrier_off(tp->dev);
2834 tg3_writephy(tp, MII_BMCR, new_bmcr);
2836 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2837 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2838 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2840 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2841 bmsr |= BMSR_LSTATUS;
2843 bmsr &= ~BMSR_LSTATUS;
2845 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2849 if (bmsr & BMSR_LSTATUS) {
2850 current_speed = SPEED_1000;
2851 current_link_up = 1;
2852 if (bmcr & BMCR_FULLDPLX)
2853 current_duplex = DUPLEX_FULL;
2855 current_duplex = DUPLEX_HALF;
2857 if (bmcr & BMCR_ANENABLE) {
2858 u32 local_adv, remote_adv, common;
2860 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2861 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2862 common = local_adv & remote_adv;
2863 if (common & (ADVERTISE_1000XHALF |
2864 ADVERTISE_1000XFULL)) {
2865 if (common & ADVERTISE_1000XFULL)
2866 current_duplex = DUPLEX_FULL;
2868 current_duplex = DUPLEX_HALF;
2870 tg3_setup_flow_control(tp, local_adv,
2874 current_link_up = 0;
2878 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2879 if (tp->link_config.active_duplex == DUPLEX_HALF)
2880 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2882 tw32_f(MAC_MODE, tp->mac_mode);
2885 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2887 tp->link_config.active_speed = current_speed;
2888 tp->link_config.active_duplex = current_duplex;
2890 if (current_link_up != netif_carrier_ok(tp->dev)) {
2891 if (current_link_up)
2892 netif_carrier_on(tp->dev);
2894 netif_carrier_off(tp->dev);
2895 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2897 tg3_link_report(tp);
2902 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2904 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2905 /* Give autoneg time to complete. */
2906 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2909 if (!netif_carrier_ok(tp->dev) &&
2910 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2913 tg3_readphy(tp, MII_BMCR, &bmcr);
2914 if (bmcr & BMCR_ANENABLE) {
2917 /* Select shadow register 0x1f */
2918 tg3_writephy(tp, 0x1c, 0x7c00);
2919 tg3_readphy(tp, 0x1c, &phy1);
2921 /* Select expansion interrupt status register */
2922 tg3_writephy(tp, 0x17, 0x0f01);
2923 tg3_readphy(tp, 0x15, &phy2);
2924 tg3_readphy(tp, 0x15, &phy2);
2926 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2927 /* We have signal detect and not receiving
2928 * config code words, link is up by parallel
2932 bmcr &= ~BMCR_ANENABLE;
2933 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2934 tg3_writephy(tp, MII_BMCR, bmcr);
2935 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2939 else if (netif_carrier_ok(tp->dev) &&
2940 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2941 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2944 /* Select expansion interrupt status register */
2945 tg3_writephy(tp, 0x17, 0x0f01);
2946 tg3_readphy(tp, 0x15, &phy2);
2950 /* Config code words received, turn on autoneg. */
2951 tg3_readphy(tp, MII_BMCR, &bmcr);
2952 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2954 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2960 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2964 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2965 err = tg3_setup_fiber_phy(tp, force_reset);
2966 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2967 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2969 err = tg3_setup_copper_phy(tp, force_reset);
2972 if (tp->link_config.active_speed == SPEED_1000 &&
2973 tp->link_config.active_duplex == DUPLEX_HALF)
2974 tw32(MAC_TX_LENGTHS,
2975 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2976 (6 << TX_LENGTHS_IPG_SHIFT) |
2977 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2979 tw32(MAC_TX_LENGTHS,
2980 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2981 (6 << TX_LENGTHS_IPG_SHIFT) |
2982 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2984 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2985 if (netif_carrier_ok(tp->dev)) {
2986 tw32(HOSTCC_STAT_COAL_TICKS,
2987 tp->coal.stats_block_coalesce_usecs);
2989 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2996 /* This is called whenever we suspect that the system chipset is re-
2997 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2998 * is bogus tx completions. We try to recover by setting the
2999 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3002 static void tg3_tx_recover(struct tg3 *tp)
3004 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3005 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3007 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3008 "mapped I/O cycles to the network device, attempting to "
3009 "recover. Please report the problem to the driver maintainer "
3010 "and include system chipset information.\n", tp->dev->name);
3012 spin_lock(&tp->lock);
3013 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3014 spin_unlock(&tp->lock);
3017 static inline u32 tg3_tx_avail(struct tg3 *tp)
3020 return (tp->tx_pending -
3021 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3024 /* Tigon3 never reports partial packet sends. So we do not
3025 * need special logic to handle SKBs that have not had all
3026 * of their frags sent yet, like SunGEM does.
3028 static void tg3_tx(struct tg3 *tp)
3030 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3031 u32 sw_idx = tp->tx_cons;
3033 while (sw_idx != hw_idx) {
3034 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3035 struct sk_buff *skb = ri->skb;
3038 if (unlikely(skb == NULL)) {
3043 pci_unmap_single(tp->pdev,
3044 pci_unmap_addr(ri, mapping),
3050 sw_idx = NEXT_TX(sw_idx);
3052 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3053 ri = &tp->tx_buffers[sw_idx];
3054 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3057 pci_unmap_page(tp->pdev,
3058 pci_unmap_addr(ri, mapping),
3059 skb_shinfo(skb)->frags[i].size,
3062 sw_idx = NEXT_TX(sw_idx);
3067 if (unlikely(tx_bug)) {
3073 tp->tx_cons = sw_idx;
3075 /* Need to make the tx_cons update visible to tg3_start_xmit()
3076 * before checking for netif_queue_stopped(). Without the
3077 * memory barrier, there is a small possibility that tg3_start_xmit()
3078 * will miss it and cause the queue to be stopped forever.
3082 if (unlikely(netif_queue_stopped(tp->dev) &&
3083 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3084 netif_tx_lock(tp->dev);
3085 if (netif_queue_stopped(tp->dev) &&
3086 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3087 netif_wake_queue(tp->dev);
3088 netif_tx_unlock(tp->dev);
3092 /* Returns size of skb allocated or < 0 on error.
3094 * We only need to fill in the address because the other members
3095 * of the RX descriptor are invariant, see tg3_init_rings.
3097 * Note the purposeful assymetry of cpu vs. chip accesses. For
3098 * posting buffers we only dirty the first cache line of the RX
3099 * descriptor (containing the address). Whereas for the RX status
3100 * buffers the cpu only reads the last cacheline of the RX descriptor
3101 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3103 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3104 int src_idx, u32 dest_idx_unmasked)
3106 struct tg3_rx_buffer_desc *desc;
3107 struct ring_info *map, *src_map;
3108 struct sk_buff *skb;
3110 int skb_size, dest_idx;
3113 switch (opaque_key) {
3114 case RXD_OPAQUE_RING_STD:
3115 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3116 desc = &tp->rx_std[dest_idx];
3117 map = &tp->rx_std_buffers[dest_idx];
3119 src_map = &tp->rx_std_buffers[src_idx];
3120 skb_size = tp->rx_pkt_buf_sz;
3123 case RXD_OPAQUE_RING_JUMBO:
3124 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3125 desc = &tp->rx_jumbo[dest_idx];
3126 map = &tp->rx_jumbo_buffers[dest_idx];
3128 src_map = &tp->rx_jumbo_buffers[src_idx];
3129 skb_size = RX_JUMBO_PKT_BUF_SZ;
3136 /* Do not overwrite any of the map or rp information
3137 * until we are sure we can commit to a new buffer.
3139 * Callers depend upon this behavior and assume that
3140 * we leave everything unchanged if we fail.
3142 skb = netdev_alloc_skb(tp->dev, skb_size);
3146 skb_reserve(skb, tp->rx_offset);
3148 mapping = pci_map_single(tp->pdev, skb->data,
3149 skb_size - tp->rx_offset,
3150 PCI_DMA_FROMDEVICE);
3153 pci_unmap_addr_set(map, mapping, mapping);
3155 if (src_map != NULL)
3156 src_map->skb = NULL;
3158 desc->addr_hi = ((u64)mapping >> 32);
3159 desc->addr_lo = ((u64)mapping & 0xffffffff);
3164 /* We only need to move over in the address because the other
3165 * members of the RX descriptor are invariant. See notes above
3166 * tg3_alloc_rx_skb for full details.
3168 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3169 int src_idx, u32 dest_idx_unmasked)
3171 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3172 struct ring_info *src_map, *dest_map;
3175 switch (opaque_key) {
3176 case RXD_OPAQUE_RING_STD:
3177 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3178 dest_desc = &tp->rx_std[dest_idx];
3179 dest_map = &tp->rx_std_buffers[dest_idx];
3180 src_desc = &tp->rx_std[src_idx];
3181 src_map = &tp->rx_std_buffers[src_idx];
3184 case RXD_OPAQUE_RING_JUMBO:
3185 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3186 dest_desc = &tp->rx_jumbo[dest_idx];
3187 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3188 src_desc = &tp->rx_jumbo[src_idx];
3189 src_map = &tp->rx_jumbo_buffers[src_idx];
3196 dest_map->skb = src_map->skb;
3197 pci_unmap_addr_set(dest_map, mapping,
3198 pci_unmap_addr(src_map, mapping));
3199 dest_desc->addr_hi = src_desc->addr_hi;
3200 dest_desc->addr_lo = src_desc->addr_lo;
3202 src_map->skb = NULL;
3205 #if TG3_VLAN_TAG_USED
3206 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3208 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3212 /* The RX ring scheme is composed of multiple rings which post fresh
3213 * buffers to the chip, and one special ring the chip uses to report
3214 * status back to the host.
3216 * The special ring reports the status of received packets to the
3217 * host. The chip does not write into the original descriptor the
3218 * RX buffer was obtained from. The chip simply takes the original
3219 * descriptor as provided by the host, updates the status and length
3220 * field, then writes this into the next status ring entry.
3222 * Each ring the host uses to post buffers to the chip is described
3223 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3224 * it is first placed into the on-chip ram. When the packet's length
3225 * is known, it walks down the TG3_BDINFO entries to select the ring.
3226 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3227 * which is within the range of the new packet's length is chosen.
3229 * The "separate ring for rx status" scheme may sound queer, but it makes
3230 * sense from a cache coherency perspective. If only the host writes
3231 * to the buffer post rings, and only the chip writes to the rx status
3232 * rings, then cache lines never move beyond shared-modified state.
3233 * If both the host and chip were to write into the same ring, cache line
3234 * eviction could occur since both entities want it in an exclusive state.
3236 static int tg3_rx(struct tg3 *tp, int budget)
3238 u32 work_mask, rx_std_posted = 0;
3239 u32 sw_idx = tp->rx_rcb_ptr;
3243 hw_idx = tp->hw_status->idx[0].rx_producer;
3245 * We need to order the read of hw_idx and the read of
3246 * the opaque cookie.
3251 while (sw_idx != hw_idx && budget > 0) {
3252 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3254 struct sk_buff *skb;
3255 dma_addr_t dma_addr;
3256 u32 opaque_key, desc_idx, *post_ptr;
3258 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3259 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3260 if (opaque_key == RXD_OPAQUE_RING_STD) {
3261 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3263 skb = tp->rx_std_buffers[desc_idx].skb;
3264 post_ptr = &tp->rx_std_ptr;
3266 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3267 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3269 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3270 post_ptr = &tp->rx_jumbo_ptr;
3273 goto next_pkt_nopost;
3276 work_mask |= opaque_key;
3278 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3279 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3281 tg3_recycle_rx(tp, opaque_key,
3282 desc_idx, *post_ptr);
3284 /* Other statistics kept track of by card. */
3285 tp->net_stats.rx_dropped++;
3289 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3291 if (len > RX_COPY_THRESHOLD
3292 && tp->rx_offset == 2
3293 /* rx_offset != 2 iff this is a 5701 card running
3294 * in PCI-X mode [see tg3_get_invariants()] */
3298 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3299 desc_idx, *post_ptr);
3303 pci_unmap_single(tp->pdev, dma_addr,
3304 skb_size - tp->rx_offset,
3305 PCI_DMA_FROMDEVICE);
3309 struct sk_buff *copy_skb;
3311 tg3_recycle_rx(tp, opaque_key,
3312 desc_idx, *post_ptr);
3314 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3315 if (copy_skb == NULL)
3316 goto drop_it_no_recycle;
3318 skb_reserve(copy_skb, 2);
3319 skb_put(copy_skb, len);
3320 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3321 memcpy(copy_skb->data, skb->data, len);
3322 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3324 /* We'll reuse the original ring buffer. */
3328 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3329 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3330 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3331 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3332 skb->ip_summed = CHECKSUM_UNNECESSARY;
3334 skb->ip_summed = CHECKSUM_NONE;
3336 skb->protocol = eth_type_trans(skb, tp->dev);
3337 #if TG3_VLAN_TAG_USED
3338 if (tp->vlgrp != NULL &&
3339 desc->type_flags & RXD_FLAG_VLAN) {
3340 tg3_vlan_rx(tp, skb,
3341 desc->err_vlan & RXD_VLAN_MASK);
3344 netif_receive_skb(skb);
3346 tp->dev->last_rx = jiffies;
3353 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3354 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3356 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3357 TG3_64BIT_REG_LOW, idx);
3358 work_mask &= ~RXD_OPAQUE_RING_STD;
3363 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3365 /* Refresh hw_idx to see if there is new work */
3366 if (sw_idx == hw_idx) {
3367 hw_idx = tp->hw_status->idx[0].rx_producer;
3372 /* ACK the status ring. */
3373 tp->rx_rcb_ptr = sw_idx;
3374 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3376 /* Some platforms need to sync memory here */
3379 /* Refill RX ring(s). */
3380 if (work_mask & RXD_OPAQUE_RING_STD) {
3381 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3382 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3385 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3386 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3387 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3395 static int tg3_poll(struct net_device *netdev, int *budget)
3397 struct tg3 *tp = netdev_priv(netdev);
3398 struct tg3_hw_status *sblk = tp->hw_status;
3401 /* handle link change and other phy events */
3402 if (!(tp->tg3_flags &
3403 (TG3_FLAG_USE_LINKCHG_REG |
3404 TG3_FLAG_POLL_SERDES))) {
3405 if (sblk->status & SD_STATUS_LINK_CHG) {
3406 sblk->status = SD_STATUS_UPDATED |
3407 (sblk->status & ~SD_STATUS_LINK_CHG);
3408 spin_lock(&tp->lock);
3409 tg3_setup_phy(tp, 0);
3410 spin_unlock(&tp->lock);
3414 /* run TX completion thread */
3415 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3417 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3418 netif_rx_complete(netdev);
3419 schedule_work(&tp->reset_task);
3424 /* run RX thread, within the bounds set by NAPI.
3425 * All RX "locking" is done by ensuring outside
3426 * code synchronizes with dev->poll()
3428 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3429 int orig_budget = *budget;
3432 if (orig_budget > netdev->quota)
3433 orig_budget = netdev->quota;
3435 work_done = tg3_rx(tp, orig_budget);
3437 *budget -= work_done;
3438 netdev->quota -= work_done;
3441 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3442 tp->last_tag = sblk->status_tag;
3445 sblk->status &= ~SD_STATUS_UPDATED;
3447 /* if no more work, tell net stack and NIC we're done */
3448 done = !tg3_has_work(tp);
3450 netif_rx_complete(netdev);
3451 tg3_restart_ints(tp);
3454 return (done ? 0 : 1);
3457 static void tg3_irq_quiesce(struct tg3 *tp)
3459 BUG_ON(tp->irq_sync);
3464 #if (LINUX_VERSION_CODE >= 0x2051c)
3465 synchronize_irq(tp->pdev->irq);
3471 static inline int tg3_irq_sync(struct tg3 *tp)
3473 return tp->irq_sync;
3476 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3477 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3478 * with as well. Most of the time, this is not necessary except when
3479 * shutting down the device.
3481 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3484 tg3_irq_quiesce(tp);
3485 spin_lock_bh(&tp->lock);
3488 static inline void tg3_full_unlock(struct tg3 *tp)
3490 spin_unlock_bh(&tp->lock);
3493 /* One-shot MSI handler - Chip automatically disables interrupt
3494 * after sending MSI so driver doesn't have to do it.
3496 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3498 struct net_device *dev = dev_id;
3499 struct tg3 *tp = netdev_priv(dev);
3501 prefetch(tp->hw_status);
3502 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3504 if (likely(!tg3_irq_sync(tp)))
3505 netif_rx_schedule(dev); /* schedule NAPI poll */
3510 /* MSI ISR - No need to check for interrupt sharing and no need to
3511 * flush status block and interrupt mailbox. PCI ordering rules
3512 * guarantee that MSI will arrive after the status block.
3514 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3516 struct net_device *dev = dev_id;
3517 struct tg3 *tp = netdev_priv(dev);
3519 prefetch(tp->hw_status);
3520 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3522 * Writing any value to intr-mbox-0 clears PCI INTA# and
3523 * chip-internal interrupt pending events.
3524 * Writing non-zero to intr-mbox-0 additional tells the
3525 * NIC to stop sending us irqs, engaging "in-intr-handler"
3528 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3529 if (likely(!tg3_irq_sync(tp)))
3530 netif_rx_schedule(dev); /* schedule NAPI poll */
3532 return IRQ_RETVAL(1);
3535 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3537 struct net_device *dev = dev_id;
3538 struct tg3 *tp = netdev_priv(dev);
3539 struct tg3_hw_status *sblk = tp->hw_status;
3540 unsigned int handled = 1;
3542 /* In INTx mode, it is possible for the interrupt to arrive at
3543 * the CPU before the status block posted prior to the interrupt.
3544 * Reading the PCI State register will confirm whether the
3545 * interrupt is ours and will flush the status block.
3547 if ((sblk->status & SD_STATUS_UPDATED) ||
3548 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3550 * Writing any value to intr-mbox-0 clears PCI INTA# and
3551 * chip-internal interrupt pending events.
3552 * Writing non-zero to intr-mbox-0 additional tells the
3553 * NIC to stop sending us irqs, engaging "in-intr-handler"
3556 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3558 if (tg3_irq_sync(tp))
3560 sblk->status &= ~SD_STATUS_UPDATED;
3561 if (likely(tg3_has_work(tp))) {
3562 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3563 netif_rx_schedule(dev); /* schedule NAPI poll */
3565 /* No work, shared interrupt perhaps? re-enable
3566 * interrupts, and flush that PCI write
3568 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3571 } else { /* shared interrupt */
3575 return IRQ_RETVAL(handled);
3578 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3580 struct net_device *dev = dev_id;
3581 struct tg3 *tp = netdev_priv(dev);
3582 struct tg3_hw_status *sblk = tp->hw_status;
3583 unsigned int handled = 1;
3585 /* In INTx mode, it is possible for the interrupt to arrive at
3586 * the CPU before the status block posted prior to the interrupt.
3587 * Reading the PCI State register will confirm whether the
3588 * interrupt is ours and will flush the status block.
3590 if ((sblk->status_tag != tp->last_tag) ||
3591 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3593 * writing any value to intr-mbox-0 clears PCI INTA# and
3594 * chip-internal interrupt pending events.
3595 * writing non-zero to intr-mbox-0 additional tells the
3596 * NIC to stop sending us irqs, engaging "in-intr-handler"
3599 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3601 if (tg3_irq_sync(tp))
3603 if (netif_rx_schedule_prep(dev)) {
3604 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3605 /* Update last_tag to mark that this status has been
3606 * seen. Because interrupt may be shared, we may be
3607 * racing with tg3_poll(), so only update last_tag
3608 * if tg3_poll() is not scheduled.
3610 tp->last_tag = sblk->status_tag;
3611 __netif_rx_schedule(dev);
3613 } else { /* shared interrupt */
3617 return IRQ_RETVAL(handled);
3620 /* ISR for interrupt test */
3621 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3622 struct pt_regs *regs)
3624 struct net_device *dev = dev_id;
3625 struct tg3 *tp = netdev_priv(dev);
3626 struct tg3_hw_status *sblk = tp->hw_status;
3628 if ((sblk->status & SD_STATUS_UPDATED) ||
3629 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3630 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3632 return IRQ_RETVAL(1);
3634 return IRQ_RETVAL(0);
3637 static int tg3_init_hw(struct tg3 *, int);
3638 static int tg3_halt(struct tg3 *, int, int);
3640 /* Restart hardware after configuration changes, self-test, etc.
3641 * Invoked with tp->lock held.
3643 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3647 err = tg3_init_hw(tp, reset_phy);
3649 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3650 "aborting.\n", tp->dev->name);
3651 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3652 tg3_full_unlock(tp);
3653 del_timer_sync(&tp->timer);
3655 netif_poll_enable(tp->dev);
3657 tg3_full_lock(tp, 0);
3662 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
3663 static void tg3_poll_controller(struct net_device *dev)
3665 struct tg3 *tp = netdev_priv(dev);
3667 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x20600)
3669 tg3_interrupt(tp->pdev->irq, dev, NULL);
3670 if (dev->poll_list.prev) {
3673 tg3_poll(dev, &budget);
3678 tg3_interrupt(tp->pdev->irq, dev, NULL);
3682 static void tg3_reset_task(void *_data)
3684 struct tg3 *tp = _data;
3685 unsigned int restart_timer;
3687 tg3_full_lock(tp, 0);
3688 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3690 if (!netif_running(tp->dev)) {
3691 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3692 tg3_full_unlock(tp);
3696 tg3_full_unlock(tp);
3700 tg3_full_lock(tp, 1);
3702 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3703 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3705 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3706 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3707 tp->write32_rx_mbox = tg3_write_flush_reg32;
3708 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3709 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3712 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3713 if (tg3_init_hw(tp, 1))
3716 tg3_netif_start(tp);
3719 mod_timer(&tp->timer, jiffies + 1);
3722 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3724 tg3_full_unlock(tp);
3727 static void tg3_tx_timeout(struct net_device *dev)
3729 struct tg3 *tp = netdev_priv(dev);
3731 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3734 schedule_work(&tp->reset_task);
3737 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3738 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3740 u32 base = (u32) mapping & 0xffffffff;
3742 return ((base > 0xffffdcc0) &&
3743 (base + len + 8 < base));
3746 /* Test for DMA addresses > 40-bit */
3747 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3750 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3751 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3752 return (((u64) mapping + len) > DMA_40BIT_MASK);
3759 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3761 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3762 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3763 u32 last_plus_one, u32 *start,
3764 u32 base_flags, u32 mss)
3766 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3767 dma_addr_t new_addr = 0;
3774 /* New SKB is guaranteed to be linear. */
3776 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3778 /* Make sure new skb does not cross any 4G boundaries.
3779 * Drop the packet if it does.
3781 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3783 dev_kfree_skb(new_skb);
3786 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3787 base_flags, 1 | (mss << 1));
3788 *start = NEXT_TX(entry);
3792 /* Now clean up the sw ring entries. */
3794 while (entry != last_plus_one) {
3798 len = skb_headlen(skb);
3800 len = skb_shinfo(skb)->frags[i-1].size;
3801 pci_unmap_single(tp->pdev,
3802 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3803 len, PCI_DMA_TODEVICE);
3805 tp->tx_buffers[entry].skb = new_skb;
3806 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3808 tp->tx_buffers[entry].skb = NULL;
3810 entry = NEXT_TX(entry);
3819 static void tg3_set_txd(struct tg3 *tp, int entry,
3820 dma_addr_t mapping, int len, u32 flags,
3823 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3824 int is_end = (mss_and_is_end & 0x1);
3825 u32 mss = (mss_and_is_end >> 1);
3829 flags |= TXD_FLAG_END;
3830 if (flags & TXD_FLAG_VLAN) {
3831 vlan_tag = flags >> 16;
3834 vlan_tag |= (mss << TXD_MSS_SHIFT);
3836 txd->addr_hi = ((u64) mapping >> 32);
3837 txd->addr_lo = ((u64) mapping & 0xffffffff);
3838 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3839 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3842 /* hard_start_xmit for devices that don't have any bugs and
3843 * support TG3_FLG2_HW_TSO_2 only.
3845 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3847 struct tg3 *tp = netdev_priv(dev);
3849 u32 len, entry, base_flags, mss;
3851 len = skb_headlen(skb);
3853 /* We are running in BH disabled context with netif_tx_lock
3854 * and TX reclaim runs via tp->poll inside of a software
3855 * interrupt. Furthermore, IRQ processing runs lockless so we have
3856 * no IRQ context deadlocks to worry about either. Rejoice!
3858 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3859 if (!netif_queue_stopped(dev)) {
3860 netif_stop_queue(dev);
3862 /* This is a hard error, log it. */
3863 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3864 "queue awake!\n", dev->name);
3866 return NETDEV_TX_BUSY;
3869 entry = tp->tx_prod;
3871 #if TG3_TSO_SUPPORT != 0
3873 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3874 (mss = skb_shinfo(skb)->gso_size) != 0) {
3875 int tcp_opt_len, ip_tcp_len;
3877 if (skb_header_cloned(skb) &&
3878 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3884 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3885 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3889 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3890 ip_tcp_len = (skb->nh.iph->ihl * 4) +
3891 sizeof(struct tcphdr);
3893 skb->nh.iph->check = 0;
3894 skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3896 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3899 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3900 TXD_FLAG_CPU_POST_DMA);
3902 skb->h.th->check = 0;
3905 else if (skb->ip_summed == CHECKSUM_HW)
3906 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3909 if (skb->ip_summed == CHECKSUM_HW)
3910 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3912 #if TG3_VLAN_TAG_USED
3913 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3914 base_flags |= (TXD_FLAG_VLAN |
3915 (vlan_tx_tag_get(skb) << 16));
3918 /* Queue skb data, a.k.a. the main skb fragment. */
3919 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3921 tp->tx_buffers[entry].skb = skb;
3922 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3924 tg3_set_txd(tp, entry, mapping, len, base_flags,
3925 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3927 entry = NEXT_TX(entry);
3929 /* Now loop through additional data fragments, and queue them. */
3930 if (skb_shinfo(skb)->nr_frags > 0) {
3931 unsigned int i, last;
3933 last = skb_shinfo(skb)->nr_frags - 1;
3934 for (i = 0; i <= last; i++) {
3935 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3938 mapping = pci_map_page(tp->pdev,
3941 len, PCI_DMA_TODEVICE);
3943 tp->tx_buffers[entry].skb = NULL;
3944 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3946 tg3_set_txd(tp, entry, mapping, len,
3947 base_flags, (i == last) | (mss << 1));
3949 entry = NEXT_TX(entry);
3953 /* Some platforms need to sync memory here */
3956 /* Packets are ready, update Tx producer idx local and on card. */
3957 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3959 tp->tx_prod = entry;
3960 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3961 netif_stop_queue(dev);
3962 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3963 netif_wake_queue(tp->dev);
3966 #if TG3_TSO_SUPPORT != 0
3971 dev->trans_start = jiffies;
3973 return NETDEV_TX_OK;
3977 #if TG3_TSO_SUPPORT != 0
3979 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3981 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3982 * TSO header is greater than 80 bytes.
3984 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3986 struct sk_buff *segs, *nskb;
3988 /* Estimate the number of fragments in the worst case */
3989 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3990 netif_stop_queue(tp->dev);
3991 return NETDEV_TX_BUSY;
3994 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3995 if (unlikely(IS_ERR(segs)))
3996 goto tg3_tso_bug_end;
4002 tg3_start_xmit_dma_bug(nskb, tp->dev);
4008 return NETDEV_TX_OK;
4013 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4014 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4016 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4018 struct tg3 *tp = netdev_priv(dev);
4020 u32 len, entry, base_flags, mss;
4021 int would_hit_hwbug;
4023 len = skb_headlen(skb);
4025 /* We are running in BH disabled context with netif_tx_lock
4026 * and TX reclaim runs via tp->poll inside of a software
4027 * interrupt. Furthermore, IRQ processing runs lockless so we have
4028 * no IRQ context deadlocks to worry about either. Rejoice!
4030 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4031 if (!netif_queue_stopped(dev)) {
4032 netif_stop_queue(dev);
4034 /* This is a hard error, log it. */
4035 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4036 "queue awake!\n", dev->name);
4038 return NETDEV_TX_BUSY;
4041 entry = tp->tx_prod;
4043 if (skb->ip_summed == CHECKSUM_HW)
4044 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4045 #if TG3_TSO_SUPPORT != 0
4047 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4048 (mss = skb_shinfo(skb)->gso_size) != 0) {
4049 int tcp_opt_len, ip_tcp_len, hdr_len;
4051 if (skb_header_cloned(skb) &&
4052 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4057 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4058 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
4060 hdr_len = ip_tcp_len + tcp_opt_len;
4062 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4063 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
4064 return (tg3_tso_bug(tp, skb));
4067 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4068 TXD_FLAG_CPU_POST_DMA);
4070 skb->nh.iph->check = 0;
4071 skb->nh.iph->tot_len = htons(mss + hdr_len);
4072 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4073 skb->h.th->check = 0;
4074 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4078 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4083 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4084 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4085 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4088 tsflags = ((skb->nh.iph->ihl - 5) +
4089 (tcp_opt_len >> 2));
4090 mss |= (tsflags << 11);
4093 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4096 tsflags = ((skb->nh.iph->ihl - 5) +
4097 (tcp_opt_len >> 2));
4098 base_flags |= tsflags << 12;
4105 #if TG3_VLAN_TAG_USED
4106 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4107 base_flags |= (TXD_FLAG_VLAN |
4108 (vlan_tx_tag_get(skb) << 16));
4111 /* Queue skb data, a.k.a. the main skb fragment. */
4112 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4114 tp->tx_buffers[entry].skb = skb;
4115 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4117 would_hit_hwbug = 0;
4119 if (tg3_4g_overflow_test(mapping, len))
4120 would_hit_hwbug = 1;
4122 tg3_set_txd(tp, entry, mapping, len, base_flags,
4123 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4125 entry = NEXT_TX(entry);
4127 /* Now loop through additional data fragments, and queue them. */
4128 if (skb_shinfo(skb)->nr_frags > 0) {
4129 unsigned int i, last;
4131 last = skb_shinfo(skb)->nr_frags - 1;
4132 for (i = 0; i <= last; i++) {
4133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4136 mapping = pci_map_page(tp->pdev,
4139 len, PCI_DMA_TODEVICE);
4141 tp->tx_buffers[entry].skb = NULL;
4142 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4144 if (tg3_4g_overflow_test(mapping, len))
4145 would_hit_hwbug = 1;
4147 if (tg3_40bit_overflow_test(tp, mapping, len))
4148 would_hit_hwbug = 1;
4150 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4151 tg3_set_txd(tp, entry, mapping, len,
4152 base_flags, (i == last)|(mss << 1));
4154 tg3_set_txd(tp, entry, mapping, len,
4155 base_flags, (i == last));
4157 entry = NEXT_TX(entry);
4161 if (would_hit_hwbug) {
4162 u32 last_plus_one = entry;
4165 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4166 start &= (TG3_TX_RING_SIZE - 1);
4168 /* If the workaround fails due to memory/mapping
4169 * failure, silently drop this packet.
4171 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4172 &start, base_flags, mss))
4178 /* Some platforms need to sync memory here */
4181 /* Packets are ready, update Tx producer idx local and on card. */
4182 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4184 tp->tx_prod = entry;
4185 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4186 netif_stop_queue(dev);
4187 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4188 netif_wake_queue(tp->dev);
4194 dev->trans_start = jiffies;
4196 return NETDEV_TX_OK;
4199 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4204 if (new_mtu > ETH_DATA_LEN) {
4205 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4206 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4207 #if TG3_TSO_SUPPORT != 0
4208 ethtool_op_set_tso(dev, 0);
4212 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4214 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4215 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4216 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4220 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4222 struct tg3 *tp = netdev_priv(dev);
4225 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4228 if (!netif_running(dev)) {
4229 /* We'll just catch it later when the
4232 tg3_set_mtu(dev, tp, new_mtu);
4238 tg3_full_lock(tp, 1);
4240 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4242 tg3_set_mtu(dev, tp, new_mtu);
4244 err = tg3_restart_hw(tp, 0);
4247 tg3_netif_start(tp);
4249 tg3_full_unlock(tp);
4254 /* Free up pending packets in all rx/tx rings.
4256 * The chip has been shut down and the driver detached from
4257 * the networking, so no interrupts or new tx packets will
4258 * end up in the driver. tp->{tx,}lock is not held and we are not
4259 * in an interrupt context and thus may sleep.
4261 static void tg3_free_rings(struct tg3 *tp)
4263 struct ring_info *rxp;
4266 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4267 rxp = &tp->rx_std_buffers[i];
4269 if (rxp->skb == NULL)
4271 pci_unmap_single(tp->pdev,
4272 pci_unmap_addr(rxp, mapping),
4273 tp->rx_pkt_buf_sz - tp->rx_offset,
4274 PCI_DMA_FROMDEVICE);
4275 dev_kfree_skb_any(rxp->skb);
4279 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4280 rxp = &tp->rx_jumbo_buffers[i];
4282 if (rxp->skb == NULL)
4284 pci_unmap_single(tp->pdev,
4285 pci_unmap_addr(rxp, mapping),
4286 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4287 PCI_DMA_FROMDEVICE);
4288 dev_kfree_skb_any(rxp->skb);
4292 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4293 struct tx_ring_info *txp;
4294 struct sk_buff *skb;
4297 txp = &tp->tx_buffers[i];
4305 pci_unmap_single(tp->pdev,
4306 pci_unmap_addr(txp, mapping),
4313 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4314 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4315 pci_unmap_page(tp->pdev,
4316 pci_unmap_addr(txp, mapping),
4317 skb_shinfo(skb)->frags[j].size,
4322 dev_kfree_skb_any(skb);
4326 /* Initialize tx/rx rings for packet processing.
4328 * The chip has been shut down and the driver detached from
4329 * the networking, so no interrupts or new tx packets will
4330 * end up in the driver. tp->{tx,}lock are held and thus
4333 static int tg3_init_rings(struct tg3 *tp)
4337 /* Free up all the SKBs. */
4340 /* Zero out all descriptors. */
4341 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4342 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4343 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4344 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4346 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4347 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4348 (tp->dev->mtu > ETH_DATA_LEN))
4349 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4351 /* Initialize invariants of the rings, we only set this
4352 * stuff once. This works because the card does not
4353 * write into the rx buffer posting rings.
4355 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4356 struct tg3_rx_buffer_desc *rxd;
4358 rxd = &tp->rx_std[i];
4359 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4361 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4362 rxd->opaque = (RXD_OPAQUE_RING_STD |
4363 (i << RXD_OPAQUE_INDEX_SHIFT));
4366 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4367 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4368 struct tg3_rx_buffer_desc *rxd;
4370 rxd = &tp->rx_jumbo[i];
4371 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4373 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4375 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4376 (i << RXD_OPAQUE_INDEX_SHIFT));
4380 /* Now allocate fresh SKBs for each rx ring. */
4381 for (i = 0; i < tp->rx_pending; i++) {
4382 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4383 printk(KERN_WARNING PFX
4384 "%s: Using a smaller RX standard ring, "
4385 "only %d out of %d buffers were allocated "
4387 tp->dev->name, i, tp->rx_pending);
4395 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4396 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4397 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4399 printk(KERN_WARNING PFX
4400 "%s: Using a smaller RX jumbo ring, "
4401 "only %d out of %d buffers were "
4402 "allocated successfully.\n",
4403 tp->dev->name, i, tp->rx_jumbo_pending);
4408 tp->rx_jumbo_pending = i;
4417 * Must not be invoked with interrupt sources disabled and
4418 * the hardware shutdown down.
4420 static void tg3_free_consistent(struct tg3 *tp)
4422 kfree(tp->rx_std_buffers);
4423 tp->rx_std_buffers = NULL;
4425 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4426 tp->rx_std, tp->rx_std_mapping);
4430 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4431 tp->rx_jumbo, tp->rx_jumbo_mapping);
4432 tp->rx_jumbo = NULL;
4435 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4436 tp->rx_rcb, tp->rx_rcb_mapping);
4440 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4441 tp->tx_ring, tp->tx_desc_mapping);
4444 if (tp->hw_status) {
4445 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4446 tp->hw_status, tp->status_mapping);
4447 tp->hw_status = NULL;
4450 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4451 tp->hw_stats, tp->stats_mapping);
4452 tp->hw_stats = NULL;
4457 * Must not be invoked with interrupt sources disabled and
4458 * the hardware shutdown down. Can sleep.
4460 static int tg3_alloc_consistent(struct tg3 *tp)
4462 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4464 TG3_RX_JUMBO_RING_SIZE)) +
4465 (sizeof(struct tx_ring_info) *
4468 if (!tp->rx_std_buffers)
4471 memset(tp->rx_std_buffers, 0,
4472 (sizeof(struct ring_info) *
4474 TG3_RX_JUMBO_RING_SIZE)) +
4475 (sizeof(struct tx_ring_info) *
4478 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4479 tp->tx_buffers = (struct tx_ring_info *)
4480 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4482 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4483 &tp->rx_std_mapping);
4487 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4488 &tp->rx_jumbo_mapping);
4493 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4494 &tp->rx_rcb_mapping);
4498 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4499 &tp->tx_desc_mapping);
4503 tp->hw_status = pci_alloc_consistent(tp->pdev,
4505 &tp->status_mapping);
4509 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4510 sizeof(struct tg3_hw_stats),
4511 &tp->stats_mapping);
4515 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4516 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4521 tg3_free_consistent(tp);
4525 #define MAX_WAIT_CNT 1000
4527 /* To stop a block, clear the enable bit and poll till it
4528 * clears. tp->lock is held.
4530 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4535 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4542 /* We can't enable/disable these bits of the
4543 * 5705/5750, just say success.
4556 for (i = 0; i < MAX_WAIT_CNT; i++) {
4559 if ((val & enable_bit) == 0)
4563 if (i == MAX_WAIT_CNT && !silent) {
4564 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4565 "ofs=%lx enable_bit=%x\n",
4573 /* tp->lock is held. */
4574 static int tg3_abort_hw(struct tg3 *tp, int silent)
4578 tg3_disable_ints(tp);
4580 tp->rx_mode &= ~RX_MODE_ENABLE;
4581 tw32_f(MAC_RX_MODE, tp->rx_mode);
4584 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4585 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4586 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4587 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4588 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4589 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4591 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4592 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4593 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4594 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4595 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4596 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4597 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4599 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4600 tw32_f(MAC_MODE, tp->mac_mode);
4603 tp->tx_mode &= ~TX_MODE_ENABLE;
4604 tw32_f(MAC_TX_MODE, tp->tx_mode);
4606 for (i = 0; i < MAX_WAIT_CNT; i++) {
4608 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4611 if (i >= MAX_WAIT_CNT) {
4612 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4613 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4614 tp->dev->name, tr32(MAC_TX_MODE));
4618 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4619 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4620 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4622 tw32(FTQ_RESET, 0xffffffff);
4623 tw32(FTQ_RESET, 0x00000000);
4625 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4626 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4629 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4631 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4636 /* tp->lock is held. */
4637 static int tg3_nvram_lock(struct tg3 *tp)
4639 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4642 if (tp->nvram_lock_cnt == 0) {
4643 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4644 for (i = 0; i < 8000; i++) {
4645 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4650 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4654 tp->nvram_lock_cnt++;
4659 /* tp->lock is held. */
4660 static void tg3_nvram_unlock(struct tg3 *tp)
4662 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4663 if (tp->nvram_lock_cnt > 0)
4664 tp->nvram_lock_cnt--;
4665 if (tp->nvram_lock_cnt == 0)
4666 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4670 /* tp->lock is held. */
4671 static void tg3_enable_nvram_access(struct tg3 *tp)
4673 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4674 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4675 u32 nvaccess = tr32(NVRAM_ACCESS);
4677 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4681 /* tp->lock is held. */
4682 static void tg3_disable_nvram_access(struct tg3 *tp)
4684 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4685 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4686 u32 nvaccess = tr32(NVRAM_ACCESS);
4688 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4692 /* tp->lock is held. */
4693 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4695 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4696 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4698 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4700 case RESET_KIND_INIT:
4701 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4705 case RESET_KIND_SHUTDOWN:
4706 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4710 case RESET_KIND_SUSPEND:
4711 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4721 /* tp->lock is held. */
4722 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4724 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4726 case RESET_KIND_INIT:
4727 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4728 DRV_STATE_START_DONE);
4731 case RESET_KIND_SHUTDOWN:
4732 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4733 DRV_STATE_UNLOAD_DONE);
4742 /* tp->lock is held. */
4743 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4745 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4747 case RESET_KIND_INIT:
4748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4752 case RESET_KIND_SHUTDOWN:
4753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4757 case RESET_KIND_SUSPEND:
4758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4768 static int tg3_poll_fw(struct tg3 *tp)
4773 /* Wait for firmware initialization to complete. */
4774 for (i = 0; i < 100000; i++) {
4775 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4776 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4781 /* Chip might not be fitted with firmare. Some Sun onboard
4782 * parts are configured like that. So don't signal the timeout
4783 * of the above loop as an error, but do report the lack of
4784 * running firmware once.
4787 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4788 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4790 printk(KERN_INFO PFX "%s: No firmware running.\n",
4797 static void tg3_stop_fw(struct tg3 *);
4799 /* tp->lock is held. */
4800 static int tg3_chip_reset(struct tg3 *tp)
4803 void (*write_op)(struct tg3 *, u32, u32);
4808 /* No matching tg3_nvram_unlock() after this because
4809 * chip reset below will undo the nvram lock.
4811 tp->nvram_lock_cnt = 0;
4813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4814 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4816 tw32(GRC_FASTBOOT_PC, 0);
4819 * We must avoid the readl() that normally takes place.
4820 * It locks machines, causes machine checks, and other
4821 * fun things. So, temporarily disable the 5701
4822 * hardware workaround, while we do the reset.
4824 write_op = tp->write32;
4825 if (write_op == tg3_write_flush_reg32)
4826 tp->write32 = tg3_write32;
4829 val = GRC_MISC_CFG_CORECLK_RESET;
4831 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4832 if (tr32(0x7e2c) == 0x60) {
4835 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4836 tw32(GRC_MISC_CFG, (1 << 29));
4841 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4842 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4843 tw32(GRC_MISC_CFG, val);
4845 /* restore 5701 hardware bug workaround write method */
4846 tp->write32 = write_op;
4848 /* Unfortunately, we have to delay before the PCI read back.
4849 * Some 575X chips even will not respond to a PCI cfg access
4850 * when the reset command is given to the chip.
4852 * How do these hardware designers expect things to work
4853 * properly if the PCI write is posted for a long period
4854 * of time? It is always necessary to have some method by
4855 * which a register read back can occur to push the write
4856 * out which does the reset.
4858 * For most tg3 variants the trick below was working.
4863 /* Flush PCI posted writes. The normal MMIO registers
4864 * are inaccessible at this time so this is the only
4865 * way to make this reliably (actually, this is no longer
4866 * the case, see above). I tried to use indirect
4867 * register read/write but this upset some 5701 variants.
4869 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4873 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4874 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4878 /* Wait for link training to complete. */
4879 for (i = 0; i < 5000; i++)
4882 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4883 pci_write_config_dword(tp->pdev, 0xc4,
4884 cfg_val | (1 << 15));
4886 /* Set PCIE max payload size and clear error status. */
4887 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4890 /* Re-enable indirect register accesses. */
4891 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4892 tp->misc_host_ctrl);
4894 /* Set MAX PCI retry to zero. */
4895 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4896 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4897 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4898 val |= PCISTATE_RETRY_SAME_DMA;
4899 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4901 #if (LINUX_VERSION_CODE < 0x2060a)
4902 pci_restore_state(tp->pdev, tp->pci_cfg_state);
4904 pci_restore_state(tp->pdev);
4907 /* Make sure PCI-X relaxed ordering bit is clear. */
4908 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4909 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4910 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4912 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4915 /* Chip reset on 5780 will reset MSI enable bit,
4916 * so need to restore it.
4918 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4921 pci_read_config_word(tp->pdev,
4922 tp->msi_cap + PCI_MSI_FLAGS,
4924 pci_write_config_word(tp->pdev,
4925 tp->msi_cap + PCI_MSI_FLAGS,
4926 ctrl | PCI_MSI_FLAGS_ENABLE);
4927 val = tr32(MSGINT_MODE);
4928 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4931 val = tr32(MEMARB_MODE);
4932 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4935 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4937 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4939 tw32(0x5000, 0x400);
4942 tw32(GRC_MODE, tp->grc_mode);
4944 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4945 u32 val = tr32(0xc4);
4947 tw32(0xc4, val | (1 << 15));
4950 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4952 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4953 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4954 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4955 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4958 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4959 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4960 tw32_f(MAC_MODE, tp->mac_mode);
4961 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4962 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4963 tw32_f(MAC_MODE, tp->mac_mode);
4965 tw32_f(MAC_MODE, 0);
4968 err = tg3_poll_fw(tp);
4972 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4973 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4974 u32 val = tr32(0x7c00);
4976 tw32(0x7c00, val | (1 << 25));
4979 /* Reprobe ASF enable state. */
4980 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4981 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4982 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4983 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4986 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4987 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4988 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4989 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4990 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4997 /* tp->lock is held. */
4998 static void tg3_stop_fw(struct tg3 *tp)
5000 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5004 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5005 val = tr32(GRC_RX_CPU_EVENT);
5007 tw32(GRC_RX_CPU_EVENT, val);
5009 /* Wait for RX cpu to ACK the event. */
5010 for (i = 0; i < 100; i++) {
5011 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5018 /* tp->lock is held. */
5019 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5025 tg3_write_sig_pre_reset(tp, kind);
5027 tg3_abort_hw(tp, silent);
5028 err = tg3_chip_reset(tp);
5030 tg3_write_sig_legacy(tp, kind);
5031 tg3_write_sig_post_reset(tp, kind);
5039 #define TG3_FW_RELEASE_MAJOR 0x0
5040 #define TG3_FW_RELASE_MINOR 0x0
5041 #define TG3_FW_RELEASE_FIX 0x0
5042 #define TG3_FW_START_ADDR 0x08000000
5043 #define TG3_FW_TEXT_ADDR 0x08000000
5044 #define TG3_FW_TEXT_LEN 0x9c0
5045 #define TG3_FW_RODATA_ADDR 0x080009c0
5046 #define TG3_FW_RODATA_LEN 0x60
5047 #define TG3_FW_DATA_ADDR 0x08000a40
5048 #define TG3_FW_DATA_LEN 0x20
5049 #define TG3_FW_SBSS_ADDR 0x08000a60
5050 #define TG3_FW_SBSS_LEN 0xc
5051 #define TG3_FW_BSS_ADDR 0x08000a70
5052 #define TG3_FW_BSS_LEN 0x10
5054 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5055 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5056 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5057 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5058 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5059 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5060 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5061 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5062 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5063 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5064 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5065 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5066 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5067 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5068 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5069 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5070 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5071 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5072 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5073 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5074 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5075 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5076 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5077 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5078 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5079 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5081 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5082 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5083 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5084 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5085 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5086 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5087 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5088 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5089 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5090 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5091 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5092 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5093 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5094 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5095 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5096 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5097 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5098 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5099 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5100 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5101 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5102 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5103 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5104 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5105 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5106 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5107 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5108 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5109 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5110 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5111 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5112 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5113 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5114 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5115 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5116 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5117 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5118 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5119 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5120 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5121 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5122 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5123 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5124 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5125 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5126 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5127 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5128 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5129 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5130 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5131 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5132 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5133 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5134 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5135 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5136 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5137 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5138 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5139 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5140 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5141 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5142 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5143 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5144 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5145 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5148 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5149 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5150 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5151 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5152 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5156 #if 0 /* All zeros, don't eat up space with it. */
5157 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5158 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5159 0x00000000, 0x00000000, 0x00000000, 0x00000000
5163 #define RX_CPU_SCRATCH_BASE 0x30000
5164 #define RX_CPU_SCRATCH_SIZE 0x04000
5165 #define TX_CPU_SCRATCH_BASE 0x34000
5166 #define TX_CPU_SCRATCH_SIZE 0x04000
5168 /* tp->lock is held. */
5169 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5173 BUG_ON(offset == TX_CPU_BASE &&
5174 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5176 if (offset == RX_CPU_BASE) {
5177 for (i = 0; i < 10000; i++) {
5178 tw32(offset + CPU_STATE, 0xffffffff);
5179 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5180 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5184 tw32(offset + CPU_STATE, 0xffffffff);
5185 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5188 for (i = 0; i < 10000; i++) {
5189 tw32(offset + CPU_STATE, 0xffffffff);
5190 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5191 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5197 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5200 (offset == RX_CPU_BASE ? "RX" : "TX"));
5204 /* Clear firmware's nvram arbitration. */
5205 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5206 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5211 unsigned int text_base;
5212 unsigned int text_len;
5214 unsigned int rodata_base;
5215 unsigned int rodata_len;
5217 unsigned int data_base;
5218 unsigned int data_len;
5222 /* tp->lock is held. */
5223 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5224 int cpu_scratch_size, struct fw_info *info)
5226 int err, lock_err, i;
5227 void (*write_op)(struct tg3 *, u32, u32);
5229 if (cpu_base == TX_CPU_BASE &&
5230 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5231 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5232 "TX cpu firmware on %s which is 5705.\n",
5237 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5238 write_op = tg3_write_mem;
5240 write_op = tg3_write_indirect_reg32;
5242 /* It is possible that bootcode is still loading at this point.
5243 * Get the nvram lock first before halting the cpu.
5245 lock_err = tg3_nvram_lock(tp);
5246 err = tg3_halt_cpu(tp, cpu_base);
5248 tg3_nvram_unlock(tp);
5252 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5253 write_op(tp, cpu_scratch_base + i, 0);
5254 tw32(cpu_base + CPU_STATE, 0xffffffff);
5255 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5256 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5257 write_op(tp, (cpu_scratch_base +
5258 (info->text_base & 0xffff) +
5261 info->text_data[i] : 0));
5262 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5263 write_op(tp, (cpu_scratch_base +
5264 (info->rodata_base & 0xffff) +
5266 (info->rodata_data ?
5267 info->rodata_data[i] : 0));
5268 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5269 write_op(tp, (cpu_scratch_base +
5270 (info->data_base & 0xffff) +
5273 info->data_data[i] : 0));
5281 /* tp->lock is held. */
5282 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5284 struct fw_info info;
5287 info.text_base = TG3_FW_TEXT_ADDR;
5288 info.text_len = TG3_FW_TEXT_LEN;
5289 info.text_data = &tg3FwText[0];
5290 info.rodata_base = TG3_FW_RODATA_ADDR;
5291 info.rodata_len = TG3_FW_RODATA_LEN;
5292 info.rodata_data = &tg3FwRodata[0];
5293 info.data_base = TG3_FW_DATA_ADDR;
5294 info.data_len = TG3_FW_DATA_LEN;
5295 info.data_data = NULL;
5297 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5298 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5303 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5304 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5309 /* Now startup only the RX cpu. */
5310 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5311 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5313 for (i = 0; i < 5; i++) {
5314 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5316 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5317 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5318 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5322 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5323 "to set RX CPU PC, is %08x should be %08x\n",
5324 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5328 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5329 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5334 #if TG3_TSO_SUPPORT != 0
5336 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5337 #define TG3_TSO_FW_RELASE_MINOR 0x6
5338 #define TG3_TSO_FW_RELEASE_FIX 0x0
5339 #define TG3_TSO_FW_START_ADDR 0x08000000
5340 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5341 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5342 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5343 #define TG3_TSO_FW_RODATA_LEN 0x60
5344 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5345 #define TG3_TSO_FW_DATA_LEN 0x30
5346 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5347 #define TG3_TSO_FW_SBSS_LEN 0x2c
5348 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5349 #define TG3_TSO_FW_BSS_LEN 0x894
5351 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5352 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5353 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5354 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5355 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5356 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5357 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5358 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5359 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5360 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5361 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5362 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5363 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5364 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5365 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5366 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5367 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5368 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5369 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5370 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5371 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5372 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5373 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5374 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5375 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5376 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5377 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5378 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5379 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5380 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5381 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5382 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5383 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5384 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5385 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5386 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5387 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5388 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5389 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5390 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5391 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5392 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5393 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5394 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5395 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5396 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5397 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5398 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5399 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5400 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5401 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5402 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5403 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5404 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5405 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5406 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5407 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5408 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5409 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5410 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5411 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5412 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5413 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5414 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5415 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5416 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5417 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5418 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5419 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5420 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5421 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5422 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5423 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5424 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5425 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5426 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5427 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5428 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5429 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5430 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5431 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5432 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5433 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5434 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5435 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5436 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5437 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5438 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5439 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5440 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5441 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5442 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5443 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5444 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5445 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5446 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5447 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5448 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5449 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5450 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5451 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5452 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5453 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5454 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5455 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5456 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5457 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5458 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5459 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5460 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5461 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5462 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5463 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5464 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5465 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5466 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5467 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5468 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5469 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5470 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5471 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5472 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5473 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5474 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5475 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5476 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5477 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5478 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5479 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5480 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5481 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5482 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5483 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5484 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5485 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5486 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5487 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5488 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5489 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5490 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5491 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5492 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5493 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5494 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5495 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5496 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5497 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5498 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5499 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5500 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5501 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5502 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5503 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5504 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5505 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5506 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5507 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5508 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5509 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5510 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5511 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5512 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5513 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5514 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5515 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5516 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5517 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5518 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5519 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5520 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5521 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5522 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5523 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5524 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5525 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5526 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5527 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5528 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5529 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5530 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5531 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5532 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5533 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5534 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5535 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5536 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5537 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5538 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5539 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5540 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5541 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5542 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5543 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5544 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5545 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5546 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5547 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5548 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5549 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5550 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5551 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5552 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5553 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5554 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5555 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5556 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5557 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5558 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5559 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5560 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5561 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5562 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5563 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5564 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5565 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5566 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5567 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5568 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5569 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5570 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5571 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5572 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5573 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5574 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5575 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5576 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5577 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5578 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5579 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5580 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5581 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5582 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5583 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5584 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5585 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5586 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5587 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5588 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5589 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5590 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5591 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5592 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5593 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5594 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5595 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5596 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5597 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5598 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5599 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5600 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5601 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5602 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5603 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5604 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5605 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5606 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5607 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5608 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5609 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5610 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5611 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5612 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5613 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5614 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5615 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5616 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5617 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5618 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5619 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5620 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5621 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5622 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5623 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5624 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5625 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5626 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5627 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5628 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5629 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5630 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5631 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5632 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5633 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5634 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5635 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5638 static u32 tg3TsoFwRodata[] = {
5639 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5640 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5641 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5642 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5646 static u32 tg3TsoFwData[] = {
5647 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5648 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5652 /* 5705 needs a special version of the TSO firmware. */
5653 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5654 #define TG3_TSO5_FW_RELASE_MINOR 0x2
5655 #define TG3_TSO5_FW_RELEASE_FIX 0x0
5656 #define TG3_TSO5_FW_START_ADDR 0x00010000
5657 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5658 #define TG3_TSO5_FW_TEXT_LEN 0xe90
5659 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5660 #define TG3_TSO5_FW_RODATA_LEN 0x50
5661 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5662 #define TG3_TSO5_FW_DATA_LEN 0x20
5663 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5664 #define TG3_TSO5_FW_SBSS_LEN 0x28
5665 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5666 #define TG3_TSO5_FW_BSS_LEN 0x88
5668 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5669 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5670 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5671 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5672 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5673 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5674 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5675 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5676 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5677 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5678 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5679 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5680 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5681 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5682 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5683 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5684 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5685 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5686 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5687 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5688 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5689 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5690 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5691 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5692 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5693 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5694 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5695 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5696 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5697 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5698 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5699 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5700 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5701 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5702 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5703 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5704 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5705 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5706 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5707 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5708 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5709 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5710 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5711 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5712 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5713 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5714 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5715 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5716 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5717 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5718 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5719 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5720 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5721 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5722 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5723 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5724 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5725 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5726 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5727 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5728 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5729 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5730 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5731 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5732 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5733 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5734 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5735 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5736 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5737 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5738 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5739 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5740 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5741 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5742 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5743 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5744 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5745 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5746 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5747 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5748 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5749 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5750 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5751 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5752 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5753 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5754 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5755 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5756 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5757 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5758 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5759 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5760 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5761 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5762 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5763 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5764 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5765 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5766 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5767 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5768 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5769 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5770 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5771 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5772 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5773 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5774 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5775 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5776 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5777 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5778 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5779 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5780 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5781 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5782 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5783 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5784 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5785 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5786 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5787 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5788 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5789 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5790 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5791 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5792 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5793 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5794 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5795 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5796 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5797 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5798 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5799 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5800 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5801 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5802 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5803 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5804 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5805 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5806 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5807 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5808 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5809 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5810 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5811 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5812 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5813 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5814 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5815 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5816 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5817 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5818 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5819 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5820 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5821 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5822 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5823 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5824 0x00000000, 0x00000000, 0x00000000,
5827 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5828 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5829 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5830 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5831 0x00000000, 0x00000000, 0x00000000,
5834 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5835 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5836 0x00000000, 0x00000000, 0x00000000,
5839 /* tp->lock is held. */
5840 static int tg3_load_tso_firmware(struct tg3 *tp)
5842 struct fw_info info;
5843 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5846 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5849 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5850 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5851 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5852 info.text_data = &tg3Tso5FwText[0];
5853 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5854 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5855 info.rodata_data = &tg3Tso5FwRodata[0];
5856 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5857 info.data_len = TG3_TSO5_FW_DATA_LEN;
5858 info.data_data = &tg3Tso5FwData[0];
5859 cpu_base = RX_CPU_BASE;
5860 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5861 cpu_scratch_size = (info.text_len +
5864 TG3_TSO5_FW_SBSS_LEN +
5865 TG3_TSO5_FW_BSS_LEN);
5867 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5868 info.text_len = TG3_TSO_FW_TEXT_LEN;
5869 info.text_data = &tg3TsoFwText[0];
5870 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5871 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5872 info.rodata_data = &tg3TsoFwRodata[0];
5873 info.data_base = TG3_TSO_FW_DATA_ADDR;
5874 info.data_len = TG3_TSO_FW_DATA_LEN;
5875 info.data_data = &tg3TsoFwData[0];
5876 cpu_base = TX_CPU_BASE;
5877 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5878 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5881 err = tg3_load_firmware_cpu(tp, cpu_base,
5882 cpu_scratch_base, cpu_scratch_size,
5887 /* Now startup the cpu. */
5888 tw32(cpu_base + CPU_STATE, 0xffffffff);
5889 tw32_f(cpu_base + CPU_PC, info.text_base);
5891 for (i = 0; i < 5; i++) {
5892 if (tr32(cpu_base + CPU_PC) == info.text_base)
5894 tw32(cpu_base + CPU_STATE, 0xffffffff);
5895 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5896 tw32_f(cpu_base + CPU_PC, info.text_base);
5900 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5901 "to set CPU PC, is %08x should be %08x\n",
5902 tp->dev->name, tr32(cpu_base + CPU_PC),
5906 tw32(cpu_base + CPU_STATE, 0xffffffff);
5907 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5911 #endif /* TG3_TSO_SUPPORT != 0 */
5913 /* tp->lock is held. */
5914 static void __tg3_set_mac_addr(struct tg3 *tp)
5916 u32 addr_high, addr_low;
5919 addr_high = ((tp->dev->dev_addr[0] << 8) |
5920 tp->dev->dev_addr[1]);
5921 addr_low = ((tp->dev->dev_addr[2] << 24) |
5922 (tp->dev->dev_addr[3] << 16) |
5923 (tp->dev->dev_addr[4] << 8) |
5924 (tp->dev->dev_addr[5] << 0));
5925 for (i = 0; i < 4; i++) {
5926 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5927 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5932 for (i = 0; i < 12; i++) {
5933 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5934 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5938 addr_high = (tp->dev->dev_addr[0] +
5939 tp->dev->dev_addr[1] +
5940 tp->dev->dev_addr[2] +
5941 tp->dev->dev_addr[3] +
5942 tp->dev->dev_addr[4] +
5943 tp->dev->dev_addr[5]) &
5944 TX_BACKOFF_SEED_MASK;
5945 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5948 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5950 struct tg3 *tp = netdev_priv(dev);
5951 struct sockaddr *addr = p;
5954 if (!is_valid_ether_addr(addr->sa_data))
5957 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5959 if (!netif_running(dev))
5962 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5963 /* Reset chip so that ASF can re-init any MAC addresses it
5967 tg3_full_lock(tp, 1);
5969 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5970 err = tg3_restart_hw(tp, 0);
5972 tg3_netif_start(tp);
5973 tg3_full_unlock(tp);
5975 spin_lock_bh(&tp->lock);
5976 __tg3_set_mac_addr(tp);
5977 spin_unlock_bh(&tp->lock);
5983 /* tp->lock is held. */
5984 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5985 dma_addr_t mapping, u32 maxlen_flags,
5989 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5990 ((u64) mapping >> 32));
5992 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5993 ((u64) mapping & 0xffffffff));
5995 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5998 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6000 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6004 static void __tg3_set_rx_mode(struct net_device *);
6005 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6007 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6008 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6009 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6010 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6011 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6012 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6013 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6015 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6016 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6017 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6018 u32 val = ec->stats_block_coalesce_usecs;
6020 if (!netif_carrier_ok(tp->dev))
6023 tw32(HOSTCC_STAT_COAL_TICKS, val);
6027 /* tp->lock is held. */
6028 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6030 u32 val, rdmac_mode;
6033 tg3_disable_ints(tp);
6037 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6039 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6040 tg3_abort_hw(tp, 1);
6043 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
6046 err = tg3_chip_reset(tp);
6050 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6052 /* This works around an issue with Athlon chipsets on
6053 * B3 tigon3 silicon. This bit has no effect on any
6054 * other revision. But do not set this on PCI Express
6057 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6058 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6059 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6061 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6062 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6063 val = tr32(TG3PCI_PCISTATE);
6064 val |= PCISTATE_RETRY_SAME_DMA;
6065 tw32(TG3PCI_PCISTATE, val);
6068 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6069 /* Enable some hw fixes. */
6070 val = tr32(TG3PCI_MSI_DATA);
6071 val |= (1 << 26) | (1 << 28) | (1 << 29);
6072 tw32(TG3PCI_MSI_DATA, val);
6075 /* Descriptor ring init may make accesses to the
6076 * NIC SRAM area to setup the TX descriptors, so we
6077 * can only do this after the hardware has been
6078 * successfully reset.
6080 err = tg3_init_rings(tp);
6084 /* This value is determined during the probe time DMA
6085 * engine test, tg3_test_dma.
6087 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6089 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6090 GRC_MODE_4X_NIC_SEND_RINGS |
6091 GRC_MODE_NO_TX_PHDR_CSUM |
6092 GRC_MODE_NO_RX_PHDR_CSUM);
6093 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6095 /* Pseudo-header checksum is done by hardware logic and not
6096 * the offload processers, so make the chip do the pseudo-
6097 * header checksums on receive. For transmit it is more
6098 * convenient to do the pseudo-header checksum in software
6099 * as Linux does that on transmit for us in all cases.
6101 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6105 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6107 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6108 val = tr32(GRC_MISC_CFG);
6110 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6111 tw32(GRC_MISC_CFG, val);
6113 /* Initialize MBUF/DESC pool. */
6114 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6116 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6117 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6118 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6119 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6121 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6122 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6123 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6125 #if TG3_TSO_SUPPORT != 0
6126 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6129 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6130 TG3_TSO5_FW_RODATA_LEN +
6131 TG3_TSO5_FW_DATA_LEN +
6132 TG3_TSO5_FW_SBSS_LEN +
6133 TG3_TSO5_FW_BSS_LEN);
6134 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6135 tw32(BUFMGR_MB_POOL_ADDR,
6136 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6137 tw32(BUFMGR_MB_POOL_SIZE,
6138 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6142 if (tp->dev->mtu <= ETH_DATA_LEN) {
6143 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6144 tp->bufmgr_config.mbuf_read_dma_low_water);
6145 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6146 tp->bufmgr_config.mbuf_mac_rx_low_water);
6147 tw32(BUFMGR_MB_HIGH_WATER,
6148 tp->bufmgr_config.mbuf_high_water);
6150 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6151 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6152 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6153 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6154 tw32(BUFMGR_MB_HIGH_WATER,
6155 tp->bufmgr_config.mbuf_high_water_jumbo);
6157 tw32(BUFMGR_DMA_LOW_WATER,
6158 tp->bufmgr_config.dma_low_water);
6159 tw32(BUFMGR_DMA_HIGH_WATER,
6160 tp->bufmgr_config.dma_high_water);
6162 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6163 for (i = 0; i < 2000; i++) {
6164 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6169 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6174 /* Setup replenish threshold. */
6175 val = tp->rx_pending / 8;
6178 else if (val > tp->rx_std_max_post)
6179 val = tp->rx_std_max_post;
6181 tw32(RCVBDI_STD_THRESH, val);
6183 /* Initialize TG3_BDINFO's at:
6184 * RCVDBDI_STD_BD: standard eth size rx ring
6185 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6186 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6189 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6190 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6191 * ring attribute flags
6192 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6194 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6195 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6197 * The size of each ring is fixed in the firmware, but the location is
6200 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6201 ((u64) tp->rx_std_mapping >> 32));
6202 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6203 ((u64) tp->rx_std_mapping & 0xffffffff));
6204 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6205 NIC_SRAM_RX_BUFFER_DESC);
6207 /* Don't even try to program the JUMBO/MINI buffer descriptor
6210 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6211 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6212 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6214 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6215 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6217 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6218 BDINFO_FLAGS_DISABLED);
6220 /* Setup replenish threshold. */
6221 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6223 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6224 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6225 ((u64) tp->rx_jumbo_mapping >> 32));
6226 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6227 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6228 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6229 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6230 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6231 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6233 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6234 BDINFO_FLAGS_DISABLED);
6239 /* There is only one send ring on 5705/5750, no need to explicitly
6240 * disable the others.
6242 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6243 /* Clear out send RCB ring in SRAM. */
6244 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6245 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6246 BDINFO_FLAGS_DISABLED);
6251 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6252 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6254 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6255 tp->tx_desc_mapping,
6256 (TG3_TX_RING_SIZE <<
6257 BDINFO_FLAGS_MAXLEN_SHIFT),
6258 NIC_SRAM_TX_BUFFER_DESC);
6260 /* There is only one receive return ring on 5705/5750, no need
6261 * to explicitly disable the others.
6263 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6264 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6265 i += TG3_BDINFO_SIZE) {
6266 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6267 BDINFO_FLAGS_DISABLED);
6272 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6274 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6276 (TG3_RX_RCB_RING_SIZE(tp) <<
6277 BDINFO_FLAGS_MAXLEN_SHIFT),
6280 tp->rx_std_ptr = tp->rx_pending;
6281 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6284 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6285 tp->rx_jumbo_pending : 0;
6286 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6289 /* Initialize MAC address and backoff seed. */
6290 __tg3_set_mac_addr(tp);
6292 /* MTU + ethernet header + FCS + optional VLAN tag */
6293 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6295 /* The slot time is changed by tg3_setup_phy if we
6296 * run at gigabit with half duplex.
6298 tw32(MAC_TX_LENGTHS,
6299 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6300 (6 << TX_LENGTHS_IPG_SHIFT) |
6301 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6303 /* Receive rules. */
6304 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6305 tw32(RCVLPC_CONFIG, 0x0181);
6307 /* Calculate RDMAC_MODE setting early, we need it to determine
6308 * the RCVLPC_STATE_ENABLE mask.
6310 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6311 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6312 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6313 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6314 RDMAC_MODE_LNGREAD_ENAB);
6315 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6316 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6318 /* If statement applies to 5705 and 5750 PCI devices only */
6319 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6320 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6321 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6322 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6323 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6324 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6325 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6326 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6327 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6328 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6332 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6333 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6335 #if TG3_TSO_SUPPORT != 0
6336 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6337 rdmac_mode |= (1 << 27);
6340 /* Receive/send statistics. */
6341 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6342 val = tr32(RCVLPC_STATS_ENABLE);
6343 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6344 tw32(RCVLPC_STATS_ENABLE, val);
6345 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6346 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6347 val = tr32(RCVLPC_STATS_ENABLE);
6348 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6349 tw32(RCVLPC_STATS_ENABLE, val);
6351 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6353 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6354 tw32(SNDDATAI_STATSENAB, 0xffffff);
6355 tw32(SNDDATAI_STATSCTRL,
6356 (SNDDATAI_SCTRL_ENABLE |
6357 SNDDATAI_SCTRL_FASTUPD));
6359 /* Setup host coalescing engine. */
6360 tw32(HOSTCC_MODE, 0);
6361 for (i = 0; i < 2000; i++) {
6362 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6367 __tg3_set_coalesce(tp, &tp->coal);
6369 /* set status block DMA address */
6370 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6371 ((u64) tp->status_mapping >> 32));
6372 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6373 ((u64) tp->status_mapping & 0xffffffff));
6375 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6376 /* Status/statistics block address. See tg3_timer,
6377 * the tg3_periodic_fetch_stats call there, and
6378 * tg3_get_stats to see how this works for 5705/5750 chips.
6380 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6381 ((u64) tp->stats_mapping >> 32));
6382 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6383 ((u64) tp->stats_mapping & 0xffffffff));
6384 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6385 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6388 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6390 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6391 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6392 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6393 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6395 /* Clear statistics/status block in chip, and status block in ram. */
6396 for (i = NIC_SRAM_STATS_BLK;
6397 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6399 tg3_write_mem(tp, i, 0);
6402 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6404 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6405 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6406 /* reset to prevent losing 1st rx packet intermittently */
6407 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6411 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6412 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6413 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6416 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6417 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6418 * register to preserve the GPIO settings for LOMs. The GPIOs,
6419 * whether used as inputs or outputs, are set by boot code after
6422 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6425 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6426 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6429 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6430 GRC_LCLCTRL_GPIO_OUTPUT3;
6432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6433 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6435 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6437 /* GPIO1 must be driven high for eeprom write protect */
6438 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6439 GRC_LCLCTRL_GPIO_OUTPUT1);
6441 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6444 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6447 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6448 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6452 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6453 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6454 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6455 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6456 WDMAC_MODE_LNGREAD_ENAB);
6458 /* If statement applies to 5705 and 5750 PCI devices only */
6459 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6460 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6461 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6462 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6463 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6464 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6466 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6467 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6468 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6469 val |= WDMAC_MODE_RX_ACCEL;
6473 /* Enable host coalescing bug fix */
6474 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6475 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6478 tw32_f(WDMAC_MODE, val);
6481 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6482 val = tr32(TG3PCI_X_CAPS);
6483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6484 val &= ~PCIX_CAPS_BURST_MASK;
6485 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6486 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6487 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6488 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6489 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6490 val |= (tp->split_mode_max_reqs <<
6491 PCIX_CAPS_SPLIT_SHIFT);
6493 tw32(TG3PCI_X_CAPS, val);
6496 tw32_f(RDMAC_MODE, rdmac_mode);
6499 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6500 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6501 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6502 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6503 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6504 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6505 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6506 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6507 #if TG3_TSO_SUPPORT != 0
6508 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6509 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6511 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6512 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6514 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6515 err = tg3_load_5701_a0_firmware_fix(tp);
6520 #if TG3_TSO_SUPPORT != 0
6521 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6522 err = tg3_load_tso_firmware(tp);
6528 tp->tx_mode = TX_MODE_ENABLE;
6529 tw32_f(MAC_TX_MODE, tp->tx_mode);
6532 tp->rx_mode = RX_MODE_ENABLE;
6533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6534 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6536 tw32_f(MAC_RX_MODE, tp->rx_mode);
6539 if (tp->link_config.phy_is_low_power) {
6540 tp->link_config.phy_is_low_power = 0;
6541 tp->link_config.speed = tp->link_config.orig_speed;
6542 tp->link_config.duplex = tp->link_config.orig_duplex;
6543 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6546 tp->mi_mode = MAC_MI_MODE_BASE;
6547 tw32_f(MAC_MI_MODE, tp->mi_mode);
6550 tw32(MAC_LED_CTRL, tp->led_ctrl);
6552 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6553 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6554 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6557 tw32_f(MAC_RX_MODE, tp->rx_mode);
6560 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6561 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6562 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6563 /* Set drive transmission level to 1.2V */
6564 /* only if the signal pre-emphasis bit is not set */
6565 val = tr32(MAC_SERDES_CFG);
6568 tw32(MAC_SERDES_CFG, val);
6570 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6571 tw32(MAC_SERDES_CFG, 0x616000);
6574 /* Prevent chip from dropping frames when flow control
6577 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6579 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6580 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6581 /* Use hardware link auto-negotiation */
6582 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6585 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6586 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6589 tmp = tr32(SERDES_RX_CTRL);
6590 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6591 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6592 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6593 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6596 err = tg3_setup_phy(tp, reset_phy);
6600 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6603 /* Clear CRC stats. */
6604 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6605 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6606 tg3_readphy(tp, 0x14, &tmp);
6610 __tg3_set_rx_mode(tp->dev);
6612 /* Initialize receive rules. */
6613 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6614 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6615 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6616 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6618 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6619 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6623 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6627 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6629 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6631 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6633 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6635 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6637 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6639 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6641 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6643 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6645 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6647 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6649 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6651 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6653 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6661 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6666 /* Called at device open time to get the chip ready for
6667 * packet processing. Invoked with tp->lock held.
6669 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6673 /* Force the chip into D0. */
6674 err = tg3_set_power_state(tp, PCI_D0);
6678 tg3_switch_clocks(tp);
6680 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6682 err = tg3_reset_hw(tp, reset_phy);
6688 #define TG3_STAT_ADD32(PSTAT, REG) \
6689 do { u32 __val = tr32(REG); \
6690 (PSTAT)->low += __val; \
6691 if ((PSTAT)->low < __val) \
6692 (PSTAT)->high += 1; \
6695 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6697 struct tg3_hw_stats *sp = tp->hw_stats;
6699 if (!netif_carrier_ok(tp->dev))
6702 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6703 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6704 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6705 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6706 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6707 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6708 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6709 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6710 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6711 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6712 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6713 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6714 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6716 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6717 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6718 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6719 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6720 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6721 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6722 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6723 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6724 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6725 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6726 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6727 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6728 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6729 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6731 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6732 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6733 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6736 static void tg3_timer(unsigned long __opaque)
6738 struct tg3 *tp = (struct tg3 *) __opaque;
6743 spin_lock(&tp->lock);
6745 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6746 /* All of this garbage is because when using non-tagged
6747 * IRQ status the mailbox/status_block protocol the chip
6748 * uses with the cpu is race prone.
6750 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6751 tw32(GRC_LOCAL_CTRL,
6752 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6754 tw32(HOSTCC_MODE, tp->coalesce_mode |
6755 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6758 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6759 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6760 spin_unlock(&tp->lock);
6761 schedule_work(&tp->reset_task);
6766 /* This part only runs once per second. */
6767 if (!--tp->timer_counter) {
6768 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6769 tg3_periodic_fetch_stats(tp);
6771 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6775 mac_stat = tr32(MAC_STATUS);
6778 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6779 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6781 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6785 tg3_setup_phy(tp, 0);
6786 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6787 u32 mac_stat = tr32(MAC_STATUS);
6790 if (netif_carrier_ok(tp->dev) &&
6791 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6794 if (! netif_carrier_ok(tp->dev) &&
6795 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6796 MAC_STATUS_SIGNAL_DET))) {
6802 ~MAC_MODE_PORT_MODE_MASK));
6804 tw32_f(MAC_MODE, tp->mac_mode);
6806 tg3_setup_phy(tp, 0);
6808 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6809 tg3_serdes_parallel_detect(tp);
6811 tp->timer_counter = tp->timer_multiplier;
6814 /* Heartbeat is only sent once every 2 seconds. */
6815 if (!--tp->asf_counter) {
6816 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6819 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6820 FWCMD_NICDRV_ALIVE_DETECT);
6821 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6822 /* 5 seconds timeout */
6823 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6824 val = tr32(GRC_RX_CPU_EVENT);
6826 tw32(GRC_RX_CPU_EVENT, val);
6828 tp->asf_counter = tp->asf_multiplier;
6831 spin_unlock(&tp->lock);
6834 tp->timer.expires = jiffies + tp->timer_offset;
6835 add_timer(&tp->timer);
6838 static int tg3_request_irq(struct tg3 *tp)
6840 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6841 unsigned long flags;
6842 struct net_device *dev = tp->dev;
6844 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6846 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6848 flags = IRQF_SAMPLE_RANDOM;
6851 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6852 fn = tg3_interrupt_tagged;
6853 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6855 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6858 static int tg3_test_interrupt(struct tg3 *tp)
6860 struct net_device *dev = tp->dev;
6864 if (!netif_running(dev))
6867 tg3_disable_ints(tp);
6869 free_irq(tp->pdev->irq, dev);
6871 err = request_irq(tp->pdev->irq, tg3_test_isr,
6872 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6876 tp->hw_status->status &= ~SD_STATUS_UPDATED;
6877 tg3_enable_ints(tp);
6879 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6882 for (i = 0; i < 5; i++) {
6883 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6888 #if (LINUX_VERSION_CODE < 0x20607)
6889 set_current_state(TASK_UNINTERRUPTIBLE);
6890 schedule_timeout(10);
6896 tg3_disable_ints(tp);
6898 free_irq(tp->pdev->irq, dev);
6900 err = tg3_request_irq(tp);
6911 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6912 * successfully restored
6914 static int tg3_test_msi(struct tg3 *tp)
6916 struct net_device *dev = tp->dev;
6920 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6923 /* Turn off SERR reporting in case MSI terminates with Master
6926 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6927 pci_write_config_word(tp->pdev, PCI_COMMAND,
6928 pci_cmd & ~PCI_COMMAND_SERR);
6930 err = tg3_test_interrupt(tp);
6932 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6937 /* other failures */
6941 /* MSI test failed, go back to INTx mode */
6942 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6943 "switching to INTx mode. Please report this failure to "
6944 "the PCI maintainer and include system chipset information.\n",
6947 free_irq(tp->pdev->irq, dev);
6948 #ifdef CONFIG_PCI_MSI
6949 pci_disable_msi(tp->pdev);
6952 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6954 err = tg3_request_irq(tp);
6958 /* Need to reset the chip because the MSI cycle may have terminated
6959 * with Master Abort.
6961 tg3_full_lock(tp, 1);
6963 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6964 err = tg3_init_hw(tp, 1);
6966 tg3_full_unlock(tp);
6969 free_irq(tp->pdev->irq, dev);
6974 static int tg3_open(struct net_device *dev)
6976 struct tg3 *tp = netdev_priv(dev);
6979 tg3_full_lock(tp, 0);
6981 err = tg3_set_power_state(tp, PCI_D0);
6985 tg3_disable_ints(tp);
6986 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6988 tg3_full_unlock(tp);
6990 /* The placement of this call is tied
6991 * to the setup and use of Host TX descriptors.
6993 err = tg3_alloc_consistent(tp);
6997 #ifdef CONFIG_PCI_MSI
6998 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6999 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
7000 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
7001 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
7002 (tp->pdev_peer == tp->pdev))) {
7003 /* All MSI supporting chips should support tagged
7004 * status. Assert that this is the case.
7006 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7007 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7008 "Not using MSI.\n", tp->dev->name);
7009 } else if (pci_enable_msi(tp->pdev) == 0) {
7012 msi_mode = tr32(MSGINT_MODE);
7013 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7014 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7018 err = tg3_request_irq(tp);
7021 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7022 #ifdef CONFIG_PCI_MSI
7023 pci_disable_msi(tp->pdev);
7025 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7027 tg3_free_consistent(tp);
7031 tg3_full_lock(tp, 0);
7033 err = tg3_init_hw(tp, 1);
7035 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7038 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7039 tp->timer_offset = HZ;
7041 tp->timer_offset = HZ / 10;
7043 BUG_ON(tp->timer_offset > HZ);
7044 tp->timer_counter = tp->timer_multiplier =
7045 (HZ / tp->timer_offset);
7046 tp->asf_counter = tp->asf_multiplier =
7047 ((HZ / tp->timer_offset) * 2);
7049 init_timer(&tp->timer);
7050 tp->timer.expires = jiffies + tp->timer_offset;
7051 tp->timer.data = (unsigned long) tp;
7052 tp->timer.function = tg3_timer;
7055 tg3_full_unlock(tp);
7058 free_irq(tp->pdev->irq, dev);
7059 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7060 #ifdef CONFIG_PCI_MSI
7061 pci_disable_msi(tp->pdev);
7063 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7065 tg3_free_consistent(tp);
7069 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7070 err = tg3_test_msi(tp);
7073 tg3_full_lock(tp, 0);
7075 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7076 #ifdef CONFIG_PCI_MSI
7077 pci_disable_msi(tp->pdev);
7079 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7081 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7083 tg3_free_consistent(tp);
7085 tg3_full_unlock(tp);
7090 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7091 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7092 u32 val = tr32(0x7c04);
7094 tw32(0x7c04, val | (1 << 29));
7099 tg3_full_lock(tp, 0);
7101 add_timer(&tp->timer);
7102 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7103 tg3_enable_ints(tp);
7105 tg3_full_unlock(tp);
7107 netif_start_queue(dev);
7113 /*static*/ void tg3_dump_state(struct tg3 *tp)
7115 u32 val32, val32_2, val32_3, val32_4, val32_5;
7119 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7120 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7121 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7125 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7126 tr32(MAC_MODE), tr32(MAC_STATUS));
7127 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7128 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7129 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7130 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7131 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7132 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7134 /* Send data initiator control block */
7135 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7136 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7137 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7138 tr32(SNDDATAI_STATSCTRL));
7140 /* Send data completion control block */
7141 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7143 /* Send BD ring selector block */
7144 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7145 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7147 /* Send BD initiator control block */
7148 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7149 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7151 /* Send BD completion control block */
7152 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7154 /* Receive list placement control block */
7155 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7156 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7157 printk(" RCVLPC_STATSCTRL[%08x]\n",
7158 tr32(RCVLPC_STATSCTRL));
7160 /* Receive data and receive BD initiator control block */
7161 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7162 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7164 /* Receive data completion control block */
7165 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7168 /* Receive BD initiator control block */
7169 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7170 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7172 /* Receive BD completion control block */
7173 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7174 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7176 /* Receive list selector control block */
7177 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7178 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7180 /* Mbuf cluster free block */
7181 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7182 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7184 /* Host coalescing control block */
7185 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7186 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7187 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7188 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7189 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7190 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7191 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7192 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7193 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7194 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7195 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7196 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7198 /* Memory arbiter control block */
7199 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7200 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7202 /* Buffer manager control block */
7203 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7204 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7205 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7206 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7207 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7208 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7209 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7210 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7212 /* Read DMA control block */
7213 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7214 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7216 /* Write DMA control block */
7217 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7218 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7220 /* DMA completion block */
7221 printk("DEBUG: DMAC_MODE[%08x]\n",
7225 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7226 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7227 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7228 tr32(GRC_LOCAL_CTRL));
7231 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7232 tr32(RCVDBDI_JUMBO_BD + 0x0),
7233 tr32(RCVDBDI_JUMBO_BD + 0x4),
7234 tr32(RCVDBDI_JUMBO_BD + 0x8),
7235 tr32(RCVDBDI_JUMBO_BD + 0xc));
7236 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7237 tr32(RCVDBDI_STD_BD + 0x0),
7238 tr32(RCVDBDI_STD_BD + 0x4),
7239 tr32(RCVDBDI_STD_BD + 0x8),
7240 tr32(RCVDBDI_STD_BD + 0xc));
7241 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7242 tr32(RCVDBDI_MINI_BD + 0x0),
7243 tr32(RCVDBDI_MINI_BD + 0x4),
7244 tr32(RCVDBDI_MINI_BD + 0x8),
7245 tr32(RCVDBDI_MINI_BD + 0xc));
7247 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7248 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7249 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7250 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7251 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7252 val32, val32_2, val32_3, val32_4);
7254 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7255 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7256 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7257 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7258 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7259 val32, val32_2, val32_3, val32_4);
7261 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7262 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7263 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7264 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7265 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7266 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7267 val32, val32_2, val32_3, val32_4, val32_5);
7269 /* SW status block */
7270 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7271 tp->hw_status->status,
7272 tp->hw_status->status_tag,
7273 tp->hw_status->rx_jumbo_consumer,
7274 tp->hw_status->rx_consumer,
7275 tp->hw_status->rx_mini_consumer,
7276 tp->hw_status->idx[0].rx_producer,
7277 tp->hw_status->idx[0].tx_consumer);
7279 /* SW statistics block */
7280 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7281 ((u32 *)tp->hw_stats)[0],
7282 ((u32 *)tp->hw_stats)[1],
7283 ((u32 *)tp->hw_stats)[2],
7284 ((u32 *)tp->hw_stats)[3]);
7287 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7288 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7289 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7290 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7291 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7293 /* NIC side send descriptors. */
7294 for (i = 0; i < 6; i++) {
7297 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7298 + (i * sizeof(struct tg3_tx_buffer_desc));
7299 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7301 readl(txd + 0x0), readl(txd + 0x4),
7302 readl(txd + 0x8), readl(txd + 0xc));
7305 /* NIC side RX descriptors. */
7306 for (i = 0; i < 6; i++) {
7309 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7310 + (i * sizeof(struct tg3_rx_buffer_desc));
7311 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7313 readl(rxd + 0x0), readl(rxd + 0x4),
7314 readl(rxd + 0x8), readl(rxd + 0xc));
7315 rxd += (4 * sizeof(u32));
7316 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7318 readl(rxd + 0x0), readl(rxd + 0x4),
7319 readl(rxd + 0x8), readl(rxd + 0xc));
7322 for (i = 0; i < 6; i++) {
7325 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7326 + (i * sizeof(struct tg3_rx_buffer_desc));
7327 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7329 readl(rxd + 0x0), readl(rxd + 0x4),
7330 readl(rxd + 0x8), readl(rxd + 0xc));
7331 rxd += (4 * sizeof(u32));
7332 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7334 readl(rxd + 0x0), readl(rxd + 0x4),
7335 readl(rxd + 0x8), readl(rxd + 0xc));
7340 static struct net_device_stats *tg3_get_stats(struct net_device *);
7341 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7343 static int tg3_close(struct net_device *dev)
7345 struct tg3 *tp = netdev_priv(dev);
7347 /* Calling flush_scheduled_work() may deadlock because
7348 * linkwatch_event() may be on the workqueue and it will try to get
7349 * the rtnl_lock which we are holding.
7351 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK) {
7352 #if (LINUX_VERSION_CODE < 0x20607)
7353 set_current_state(TASK_UNINTERRUPTIBLE);
7354 schedule_timeout(1);
7360 netif_stop_queue(dev);
7362 del_timer_sync(&tp->timer);
7364 tg3_full_lock(tp, 1);
7369 tg3_disable_ints(tp);
7371 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7374 ~(TG3_FLAG_INIT_COMPLETE |
7375 TG3_FLAG_GOT_SERDES_FLOWCTL);
7377 tg3_full_unlock(tp);
7379 free_irq(tp->pdev->irq, dev);
7380 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7381 #ifdef CONFIG_PCI_MSI
7382 pci_disable_msi(tp->pdev);
7384 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7387 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7388 sizeof(tp->net_stats_prev));
7389 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7390 sizeof(tp->estats_prev));
7392 tg3_free_consistent(tp);
7394 tg3_set_power_state(tp, PCI_D3hot);
7396 netif_carrier_off(tp->dev);
7401 static inline unsigned long get_stat64(tg3_stat64_t *val)
7405 #if (BITS_PER_LONG == 32)
7408 ret = ((u64)val->high << 32) | ((u64)val->low);
7413 static unsigned long calc_crc_errors(struct tg3 *tp)
7415 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7417 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7418 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7419 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7422 spin_lock_bh(&tp->lock);
7423 if (!tg3_readphy(tp, 0x1e, &val)) {
7424 tg3_writephy(tp, 0x1e, val | 0x8000);
7425 tg3_readphy(tp, 0x14, &val);
7428 spin_unlock_bh(&tp->lock);
7430 tp->phy_crc_errors += val;
7432 return tp->phy_crc_errors;
7435 return get_stat64(&hw_stats->rx_fcs_errors);
7438 #define ESTAT_ADD(member) \
7439 estats->member = old_estats->member + \
7440 get_stat64(&hw_stats->member)
7442 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7444 struct tg3_ethtool_stats *estats = &tp->estats;
7445 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7446 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7451 ESTAT_ADD(rx_octets);
7452 ESTAT_ADD(rx_fragments);
7453 ESTAT_ADD(rx_ucast_packets);
7454 ESTAT_ADD(rx_mcast_packets);
7455 ESTAT_ADD(rx_bcast_packets);
7456 ESTAT_ADD(rx_fcs_errors);
7457 ESTAT_ADD(rx_align_errors);
7458 ESTAT_ADD(rx_xon_pause_rcvd);
7459 ESTAT_ADD(rx_xoff_pause_rcvd);
7460 ESTAT_ADD(rx_mac_ctrl_rcvd);
7461 ESTAT_ADD(rx_xoff_entered);
7462 ESTAT_ADD(rx_frame_too_long_errors);
7463 ESTAT_ADD(rx_jabbers);
7464 ESTAT_ADD(rx_undersize_packets);
7465 ESTAT_ADD(rx_in_length_errors);
7466 ESTAT_ADD(rx_out_length_errors);
7467 ESTAT_ADD(rx_64_or_less_octet_packets);
7468 ESTAT_ADD(rx_65_to_127_octet_packets);
7469 ESTAT_ADD(rx_128_to_255_octet_packets);
7470 ESTAT_ADD(rx_256_to_511_octet_packets);
7471 ESTAT_ADD(rx_512_to_1023_octet_packets);
7472 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7473 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7474 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7475 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7476 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7478 ESTAT_ADD(tx_octets);
7479 ESTAT_ADD(tx_collisions);
7480 ESTAT_ADD(tx_xon_sent);
7481 ESTAT_ADD(tx_xoff_sent);
7482 ESTAT_ADD(tx_flow_control);
7483 ESTAT_ADD(tx_mac_errors);
7484 ESTAT_ADD(tx_single_collisions);
7485 ESTAT_ADD(tx_mult_collisions);
7486 ESTAT_ADD(tx_deferred);
7487 ESTAT_ADD(tx_excessive_collisions);
7488 ESTAT_ADD(tx_late_collisions);
7489 ESTAT_ADD(tx_collide_2times);
7490 ESTAT_ADD(tx_collide_3times);
7491 ESTAT_ADD(tx_collide_4times);
7492 ESTAT_ADD(tx_collide_5times);
7493 ESTAT_ADD(tx_collide_6times);
7494 ESTAT_ADD(tx_collide_7times);
7495 ESTAT_ADD(tx_collide_8times);
7496 ESTAT_ADD(tx_collide_9times);
7497 ESTAT_ADD(tx_collide_10times);
7498 ESTAT_ADD(tx_collide_11times);
7499 ESTAT_ADD(tx_collide_12times);
7500 ESTAT_ADD(tx_collide_13times);
7501 ESTAT_ADD(tx_collide_14times);
7502 ESTAT_ADD(tx_collide_15times);
7503 ESTAT_ADD(tx_ucast_packets);
7504 ESTAT_ADD(tx_mcast_packets);
7505 ESTAT_ADD(tx_bcast_packets);
7506 ESTAT_ADD(tx_carrier_sense_errors);
7507 ESTAT_ADD(tx_discards);
7508 ESTAT_ADD(tx_errors);
7510 ESTAT_ADD(dma_writeq_full);
7511 ESTAT_ADD(dma_write_prioq_full);
7512 ESTAT_ADD(rxbds_empty);
7513 ESTAT_ADD(rx_discards);
7514 ESTAT_ADD(rx_errors);
7515 ESTAT_ADD(rx_threshold_hit);
7517 ESTAT_ADD(dma_readq_full);
7518 ESTAT_ADD(dma_read_prioq_full);
7519 ESTAT_ADD(tx_comp_queue_full);
7521 ESTAT_ADD(ring_set_send_prod_index);
7522 ESTAT_ADD(ring_status_update);
7523 ESTAT_ADD(nic_irqs);
7524 ESTAT_ADD(nic_avoided_irqs);
7525 ESTAT_ADD(nic_tx_threshold_hit);
7530 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7532 struct tg3 *tp = netdev_priv(dev);
7533 struct net_device_stats *stats = &tp->net_stats;
7534 struct net_device_stats *old_stats = &tp->net_stats_prev;
7535 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7540 stats->rx_packets = old_stats->rx_packets +
7541 get_stat64(&hw_stats->rx_ucast_packets) +
7542 get_stat64(&hw_stats->rx_mcast_packets) +
7543 get_stat64(&hw_stats->rx_bcast_packets);
7545 stats->tx_packets = old_stats->tx_packets +
7546 get_stat64(&hw_stats->tx_ucast_packets) +
7547 get_stat64(&hw_stats->tx_mcast_packets) +
7548 get_stat64(&hw_stats->tx_bcast_packets);
7550 stats->rx_bytes = old_stats->rx_bytes +
7551 get_stat64(&hw_stats->rx_octets);
7552 stats->tx_bytes = old_stats->tx_bytes +
7553 get_stat64(&hw_stats->tx_octets);
7555 stats->rx_errors = old_stats->rx_errors +
7556 get_stat64(&hw_stats->rx_errors);
7557 stats->tx_errors = old_stats->tx_errors +
7558 get_stat64(&hw_stats->tx_errors) +
7559 get_stat64(&hw_stats->tx_mac_errors) +
7560 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7561 get_stat64(&hw_stats->tx_discards);
7563 stats->multicast = old_stats->multicast +
7564 get_stat64(&hw_stats->rx_mcast_packets);
7565 stats->collisions = old_stats->collisions +
7566 get_stat64(&hw_stats->tx_collisions);
7568 stats->rx_length_errors = old_stats->rx_length_errors +
7569 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7570 get_stat64(&hw_stats->rx_undersize_packets);
7572 stats->rx_over_errors = old_stats->rx_over_errors +
7573 get_stat64(&hw_stats->rxbds_empty);
7574 stats->rx_frame_errors = old_stats->rx_frame_errors +
7575 get_stat64(&hw_stats->rx_align_errors);
7576 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7577 get_stat64(&hw_stats->tx_discards);
7578 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7579 get_stat64(&hw_stats->tx_carrier_sense_errors);
7581 stats->rx_crc_errors = old_stats->rx_crc_errors +
7582 calc_crc_errors(tp);
7584 stats->rx_missed_errors = old_stats->rx_missed_errors +
7585 get_stat64(&hw_stats->rx_discards);
7590 static inline u32 calc_crc(unsigned char *buf, int len)
7598 for (j = 0; j < len; j++) {
7601 for (k = 0; k < 8; k++) {
7615 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7617 /* accept or reject all multicast frames */
7618 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7619 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7620 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7621 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7624 static void __tg3_set_rx_mode(struct net_device *dev)
7626 struct tg3 *tp = netdev_priv(dev);
7629 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7630 RX_MODE_KEEP_VLAN_TAG);
7632 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7635 #if TG3_VLAN_TAG_USED
7637 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7638 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7640 /* By definition, VLAN is disabled always in this
7643 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7644 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7647 if (dev->flags & IFF_PROMISC) {
7648 /* Promiscuous mode. */
7649 rx_mode |= RX_MODE_PROMISC;
7650 } else if (dev->flags & IFF_ALLMULTI) {
7651 /* Accept all multicast. */
7652 tg3_set_multi (tp, 1);
7653 } else if (dev->mc_count < 1) {
7654 /* Reject all multicast. */
7655 tg3_set_multi (tp, 0);
7657 /* Accept one or more multicast(s). */
7658 struct dev_mc_list *mclist;
7660 u32 mc_filter[4] = { 0, };
7665 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7666 i++, mclist = mclist->next) {
7668 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7670 regidx = (bit & 0x60) >> 5;
7672 mc_filter[regidx] |= (1 << bit);
7675 tw32(MAC_HASH_REG_0, mc_filter[0]);
7676 tw32(MAC_HASH_REG_1, mc_filter[1]);
7677 tw32(MAC_HASH_REG_2, mc_filter[2]);
7678 tw32(MAC_HASH_REG_3, mc_filter[3]);
7681 if (rx_mode != tp->rx_mode) {
7682 tp->rx_mode = rx_mode;
7683 tw32_f(MAC_RX_MODE, rx_mode);
7688 static void tg3_set_rx_mode(struct net_device *dev)
7690 struct tg3 *tp = netdev_priv(dev);
7692 if (!netif_running(dev))
7695 tg3_full_lock(tp, 0);
7696 __tg3_set_rx_mode(dev);
7697 tg3_full_unlock(tp);
7700 #define TG3_REGDUMP_LEN (32 * 1024)
7702 static int tg3_get_regs_len(struct net_device *dev)
7704 return TG3_REGDUMP_LEN;
7707 static void tg3_get_regs(struct net_device *dev,
7708 struct ethtool_regs *regs, void *_p)
7711 struct tg3 *tp = netdev_priv(dev);
7717 memset(p, 0, TG3_REGDUMP_LEN);
7719 if (tp->link_config.phy_is_low_power)
7722 tg3_full_lock(tp, 0);
7724 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
7725 #define GET_REG32_LOOP(base,len) \
7726 do { p = (u32 *)(orig_p + (base)); \
7727 for (i = 0; i < len; i += 4) \
7728 __GET_REG32((base) + i); \
7730 #define GET_REG32_1(reg) \
7731 do { p = (u32 *)(orig_p + (reg)); \
7732 __GET_REG32((reg)); \
7735 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7736 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7737 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7738 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7739 GET_REG32_1(SNDDATAC_MODE);
7740 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7741 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7742 GET_REG32_1(SNDBDC_MODE);
7743 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7744 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7745 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7746 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7747 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7748 GET_REG32_1(RCVDCC_MODE);
7749 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7750 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7751 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7752 GET_REG32_1(MBFREE_MODE);
7753 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7754 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7755 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7756 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7757 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7758 GET_REG32_1(RX_CPU_MODE);
7759 GET_REG32_1(RX_CPU_STATE);
7760 GET_REG32_1(RX_CPU_PGMCTR);
7761 GET_REG32_1(RX_CPU_HWBKPT);
7762 GET_REG32_1(TX_CPU_MODE);
7763 GET_REG32_1(TX_CPU_STATE);
7764 GET_REG32_1(TX_CPU_PGMCTR);
7765 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7766 GET_REG32_LOOP(FTQ_RESET, 0x120);
7767 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7768 GET_REG32_1(DMAC_MODE);
7769 GET_REG32_LOOP(GRC_MODE, 0x4c);
7770 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7771 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7774 #undef GET_REG32_LOOP
7777 tg3_full_unlock(tp);
7780 #if (LINUX_VERSION_CODE >= 0x20418)
7781 static int tg3_get_eeprom_len(struct net_device *dev)
7783 struct tg3 *tp = netdev_priv(dev);
7785 return tp->nvram_size;
7789 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7790 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7792 #ifdef ETHTOOL_GEEPROM
7793 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7795 struct tg3 *tp = netdev_priv(dev);
7798 u32 i, offset, len, val, b_offset, b_count;
7800 if (tp->link_config.phy_is_low_power)
7803 offset = eeprom->offset;
7807 eeprom->magic = TG3_EEPROM_MAGIC;
7810 /* adjustments to start on required 4 byte boundary */
7811 b_offset = offset & 3;
7812 b_count = 4 - b_offset;
7813 if (b_count > len) {
7814 /* i.e. offset=1 len=2 */
7817 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7820 val = cpu_to_le32(val);
7821 memcpy(data, ((char*)&val) + b_offset, b_count);
7824 eeprom->len += b_count;
7827 /* read bytes upto the last 4 byte boundary */
7828 pd = &data[eeprom->len];
7829 for (i = 0; i < (len - (len & 3)); i += 4) {
7830 ret = tg3_nvram_read(tp, offset + i, &val);
7835 val = cpu_to_le32(val);
7836 memcpy(pd + i, &val, 4);
7841 /* read last bytes not ending on 4 byte boundary */
7842 pd = &data[eeprom->len];
7844 b_offset = offset + len - b_count;
7845 ret = tg3_nvram_read(tp, b_offset, &val);
7848 val = cpu_to_le32(val);
7849 memcpy(pd, ((char*)&val), b_count);
7850 eeprom->len += b_count;
7856 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7858 #ifdef ETHTOOL_SEEPROM
7859 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7861 struct tg3 *tp = netdev_priv(dev);
7863 u32 offset, len, b_offset, odd_len, start, end;
7866 if (tp->link_config.phy_is_low_power)
7869 if (eeprom->magic != TG3_EEPROM_MAGIC)
7872 offset = eeprom->offset;
7875 if ((b_offset = (offset & 3))) {
7876 /* adjustments to start on required 4 byte boundary */
7877 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7880 start = cpu_to_le32(start);
7889 /* adjustments to end on required 4 byte boundary */
7891 len = (len + 3) & ~3;
7892 ret = tg3_nvram_read(tp, offset+len-4, &end);
7895 end = cpu_to_le32(end);
7899 if (b_offset || odd_len) {
7900 buf = kmalloc(len, GFP_KERNEL);
7904 memcpy(buf, &start, 4);
7906 memcpy(buf+len-4, &end, 4);
7907 memcpy(buf + b_offset, data, eeprom->len);
7910 ret = tg3_nvram_write_block(tp, offset, len, buf);
7919 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7921 struct tg3 *tp = netdev_priv(dev);
7923 cmd->supported = (SUPPORTED_Autoneg);
7925 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7926 cmd->supported |= (SUPPORTED_1000baseT_Half |
7927 SUPPORTED_1000baseT_Full);
7929 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7930 cmd->supported |= (SUPPORTED_100baseT_Half |
7931 SUPPORTED_100baseT_Full |
7932 SUPPORTED_10baseT_Half |
7933 SUPPORTED_10baseT_Full |
7935 cmd->port = PORT_TP;
7937 cmd->supported |= SUPPORTED_FIBRE;
7938 cmd->port = PORT_FIBRE;
7941 cmd->advertising = tp->link_config.advertising;
7942 if (netif_running(dev)) {
7943 cmd->speed = tp->link_config.active_speed;
7944 cmd->duplex = tp->link_config.active_duplex;
7946 cmd->phy_address = PHY_ADDR;
7947 cmd->transceiver = 0;
7948 cmd->autoneg = tp->link_config.autoneg;
7954 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7956 struct tg3 *tp = netdev_priv(dev);
7958 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7959 /* These are the only valid advertisement bits allowed. */
7960 if (cmd->autoneg == AUTONEG_ENABLE &&
7961 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7962 ADVERTISED_1000baseT_Full |
7963 ADVERTISED_Autoneg |
7966 /* Fiber can only do SPEED_1000. */
7967 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7968 (cmd->speed != SPEED_1000))
7970 /* Copper cannot force SPEED_1000. */
7971 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7972 (cmd->speed == SPEED_1000))
7974 else if ((cmd->speed == SPEED_1000) &&
7975 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7978 tg3_full_lock(tp, 0);
7980 tp->link_config.autoneg = cmd->autoneg;
7981 if (cmd->autoneg == AUTONEG_ENABLE) {
7982 tp->link_config.advertising = cmd->advertising;
7983 tp->link_config.speed = SPEED_INVALID;
7984 tp->link_config.duplex = DUPLEX_INVALID;
7986 tp->link_config.advertising = 0;
7987 tp->link_config.speed = cmd->speed;
7988 tp->link_config.duplex = cmd->duplex;
7991 if (netif_running(dev))
7992 tg3_setup_phy(tp, 1);
7994 tg3_full_unlock(tp);
7999 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8001 struct tg3 *tp = netdev_priv(dev);
8003 strcpy(info->driver, DRV_MODULE_NAME);
8004 strcpy(info->version, DRV_MODULE_VERSION);
8005 strcpy(info->fw_version, tp->fw_ver);
8006 strcpy(info->bus_info, pci_name(tp->pdev));
8009 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8011 struct tg3 *tp = netdev_priv(dev);
8013 wol->supported = WAKE_MAGIC;
8015 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8016 wol->wolopts = WAKE_MAGIC;
8017 memset(&wol->sopass, 0, sizeof(wol->sopass));
8020 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8022 struct tg3 *tp = netdev_priv(dev);
8024 if (wol->wolopts & ~WAKE_MAGIC)
8026 if ((wol->wolopts & WAKE_MAGIC) &&
8027 tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
8028 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
8031 spin_lock_bh(&tp->lock);
8032 if (wol->wolopts & WAKE_MAGIC)
8033 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8035 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8036 spin_unlock_bh(&tp->lock);
8041 static u32 tg3_get_msglevel(struct net_device *dev)
8043 struct tg3 *tp = netdev_priv(dev);
8044 return tp->msg_enable;
8047 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8049 struct tg3 *tp = netdev_priv(dev);
8050 tp->msg_enable = value;
8053 #if TG3_TSO_SUPPORT != 0
8054 static int tg3_set_tso(struct net_device *dev, u32 value)
8056 struct tg3 *tp = netdev_priv(dev);
8058 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8063 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
8065 dev->features |= NETIF_F_TSO6;
8067 dev->features &= ~NETIF_F_TSO6;
8069 return ethtool_op_set_tso(dev, value);
8073 static int tg3_nway_reset(struct net_device *dev)
8075 struct tg3 *tp = netdev_priv(dev);
8079 if (!netif_running(dev))
8082 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8085 spin_lock_bh(&tp->lock);
8087 tg3_readphy(tp, MII_BMCR, &bmcr);
8088 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8089 ((bmcr & BMCR_ANENABLE) ||
8090 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8091 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8095 spin_unlock_bh(&tp->lock);
8100 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8102 struct tg3 *tp = netdev_priv(dev);
8104 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8105 ering->rx_mini_max_pending = 0;
8106 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8107 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8109 ering->rx_jumbo_max_pending = 0;
8111 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8113 ering->rx_pending = tp->rx_pending;
8114 ering->rx_mini_pending = 0;
8115 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8116 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8118 ering->rx_jumbo_pending = 0;
8120 ering->tx_pending = tp->tx_pending;
8123 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8125 struct tg3 *tp = netdev_priv(dev);
8126 int irq_sync = 0, err = 0;
8128 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8129 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8130 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
8133 if (netif_running(dev)) {
8138 tg3_full_lock(tp, irq_sync);
8140 tp->rx_pending = ering->rx_pending;
8142 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8143 tp->rx_pending > 63)
8144 tp->rx_pending = 63;
8145 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8146 tp->tx_pending = ering->tx_pending;
8148 if (netif_running(dev)) {
8149 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8150 err = tg3_restart_hw(tp, 1);
8152 tg3_netif_start(tp);
8155 tg3_full_unlock(tp);
8160 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8162 struct tg3 *tp = netdev_priv(dev);
8164 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8165 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8166 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8169 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8171 struct tg3 *tp = netdev_priv(dev);
8172 int irq_sync = 0, err = 0;
8174 if (netif_running(dev)) {
8179 tg3_full_lock(tp, irq_sync);
8181 if (epause->autoneg)
8182 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8184 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8185 if (epause->rx_pause)
8186 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8188 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8189 if (epause->tx_pause)
8190 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8192 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8194 if (netif_running(dev)) {
8195 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8196 err = tg3_restart_hw(tp, 1);
8198 tg3_netif_start(tp);
8201 tg3_full_unlock(tp);
8206 static u32 tg3_get_rx_csum(struct net_device *dev)
8208 struct tg3 *tp = netdev_priv(dev);
8209 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8212 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8214 struct tg3 *tp = netdev_priv(dev);
8216 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8222 spin_lock_bh(&tp->lock);
8224 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8226 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8227 spin_unlock_bh(&tp->lock);
8232 #if (LINUX_VERSION_CODE >= 0x20418)
8233 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8235 struct tg3 *tp = netdev_priv(dev);
8237 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8244 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8245 #if (LINUX_VERSION_CODE >= 0x20418) && (LINUX_VERSION_CODE < 0x2060c)
8246 tg3_set_tx_hw_csum(dev, data);
8248 ethtool_op_set_tx_hw_csum(dev, data);
8251 ethtool_op_set_tx_csum(dev, data);
8257 static int tg3_get_stats_count (struct net_device *dev)
8259 return TG3_NUM_STATS;
8262 static int tg3_get_test_count (struct net_device *dev)
8264 return TG3_NUM_TEST;
8267 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8269 switch (stringset) {
8271 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
8274 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
8277 WARN_ON(1); /* we need a WARN() */
8282 static int tg3_phys_id(struct net_device *dev, u32 data)
8284 struct tg3 *tp = netdev_priv(dev);
8287 if (!netif_running(tp->dev))
8293 for (i = 0; i < (data * 2); i++) {
8295 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8296 LED_CTRL_1000MBPS_ON |
8297 LED_CTRL_100MBPS_ON |
8298 LED_CTRL_10MBPS_ON |
8299 LED_CTRL_TRAFFIC_OVERRIDE |
8300 LED_CTRL_TRAFFIC_BLINK |
8301 LED_CTRL_TRAFFIC_LED);
8304 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8305 LED_CTRL_TRAFFIC_OVERRIDE);
8306 #if (LINUX_VERSION_CODE < 0x20609)
8307 set_current_state(TASK_INTERRUPTIBLE);
8308 if (schedule_timeout(HZ / 2))
8310 if (msleep_interruptible(500))
8314 tw32(MAC_LED_CTRL, tp->led_ctrl);
8318 static void tg3_get_ethtool_stats (struct net_device *dev,
8319 struct ethtool_stats *estats, u64 *tmp_stats)
8321 struct tg3 *tp = netdev_priv(dev);
8322 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8325 #define NVRAM_TEST_SIZE 0x100
8326 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8328 static int tg3_test_nvram(struct tg3 *tp)
8330 u32 *buf, csum, magic;
8331 int i, j, err = 0, size;
8333 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8336 if (magic == TG3_EEPROM_MAGIC)
8337 size = NVRAM_TEST_SIZE;
8338 else if ((magic & 0xff000000) == 0xa5000000) {
8339 if ((magic & 0xe00000) == 0x200000)
8340 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8346 buf = kmalloc(size, GFP_KERNEL);
8351 for (i = 0, j = 0; i < size; i += 4, j++) {
8354 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8356 buf[j] = cpu_to_le32(val);
8361 /* Selfboot format */
8362 if ((cpu_to_be32(buf[0]) & 0xff000000) == 0xa5000000) {
8363 u8 *buf8 = (u8 *) buf, csum8 = 0;
8365 for (i = 0; i < size; i++)
8377 /* Bootstrap checksum at offset 0x10 */
8378 csum = calc_crc((unsigned char *) buf, 0x10);
8379 if(csum != cpu_to_le32(buf[0x10/4]))
8382 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8383 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8384 if (csum != cpu_to_le32(buf[0xfc/4]))
8394 #define TG3_SERDES_TIMEOUT_SEC 2
8395 #define TG3_COPPER_TIMEOUT_SEC 7
8397 static int tg3_test_link(struct tg3 *tp)
8401 if (!netif_running(tp->dev))
8404 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8405 max = TG3_SERDES_TIMEOUT_SEC;
8407 max = TG3_COPPER_TIMEOUT_SEC;
8409 for (i = 0; i < max; i++) {
8410 if (netif_carrier_ok(tp->dev))
8413 #if (LINUX_VERSION_CODE < 0x20609)
8414 set_current_state(TASK_INTERRUPTIBLE);
8415 if (schedule_timeout(HZ))
8417 if (msleep_interruptible(1000))
8425 /* Only test the commonly used registers */
8426 static int tg3_test_registers(struct tg3 *tp)
8428 int i, is_5705, is_5750;
8429 u32 offset, read_mask, write_mask, val, save_val, read_val;
8433 #define TG3_FL_5705 0x1
8434 #define TG3_FL_NOT_5705 0x2
8435 #define TG3_FL_NOT_5788 0x4
8436 #define TG3_FL_NOT_5750 0x8
8440 /* MAC Control Registers */
8441 { MAC_MODE, TG3_FL_NOT_5705,
8442 0x00000000, 0x00ef6f8c },
8443 { MAC_MODE, TG3_FL_5705,
8444 0x00000000, 0x01ef6b8c },
8445 { MAC_STATUS, TG3_FL_NOT_5705,
8446 0x03800107, 0x00000000 },
8447 { MAC_STATUS, TG3_FL_5705,
8448 0x03800100, 0x00000000 },
8449 { MAC_ADDR_0_HIGH, 0x0000,
8450 0x00000000, 0x0000ffff },
8451 { MAC_ADDR_0_LOW, 0x0000,
8452 0x00000000, 0xffffffff },
8453 { MAC_RX_MTU_SIZE, 0x0000,
8454 0x00000000, 0x0000ffff },
8455 { MAC_TX_MODE, 0x0000,
8456 0x00000000, 0x00000070 },
8457 { MAC_TX_LENGTHS, 0x0000,
8458 0x00000000, 0x00003fff },
8459 { MAC_RX_MODE, TG3_FL_NOT_5705,
8460 0x00000000, 0x000007fc },
8461 { MAC_RX_MODE, TG3_FL_5705,
8462 0x00000000, 0x000007dc },
8463 { MAC_HASH_REG_0, 0x0000,
8464 0x00000000, 0xffffffff },
8465 { MAC_HASH_REG_1, 0x0000,
8466 0x00000000, 0xffffffff },
8467 { MAC_HASH_REG_2, 0x0000,
8468 0x00000000, 0xffffffff },
8469 { MAC_HASH_REG_3, 0x0000,
8470 0x00000000, 0xffffffff },
8472 /* Receive Data and Receive BD Initiator Control Registers. */
8473 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8474 0x00000000, 0xffffffff },
8475 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8476 0x00000000, 0xffffffff },
8477 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8478 0x00000000, 0x00000003 },
8479 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8480 0x00000000, 0xffffffff },
8481 { RCVDBDI_STD_BD+0, 0x0000,
8482 0x00000000, 0xffffffff },
8483 { RCVDBDI_STD_BD+4, 0x0000,
8484 0x00000000, 0xffffffff },
8485 { RCVDBDI_STD_BD+8, 0x0000,
8486 0x00000000, 0xffff0002 },
8487 { RCVDBDI_STD_BD+0xc, 0x0000,
8488 0x00000000, 0xffffffff },
8490 /* Receive BD Initiator Control Registers. */
8491 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8492 0x00000000, 0xffffffff },
8493 { RCVBDI_STD_THRESH, TG3_FL_5705,
8494 0x00000000, 0x000003ff },
8495 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8496 0x00000000, 0xffffffff },
8498 /* Host Coalescing Control Registers. */
8499 { HOSTCC_MODE, TG3_FL_NOT_5705,
8500 0x00000000, 0x00000004 },
8501 { HOSTCC_MODE, TG3_FL_5705,
8502 0x00000000, 0x000000f6 },
8503 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8504 0x00000000, 0xffffffff },
8505 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8506 0x00000000, 0x000003ff },
8507 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8508 0x00000000, 0xffffffff },
8509 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8510 0x00000000, 0x000003ff },
8511 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8512 0x00000000, 0xffffffff },
8513 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8514 0x00000000, 0x000000ff },
8515 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8516 0x00000000, 0xffffffff },
8517 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8518 0x00000000, 0x000000ff },
8519 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8520 0x00000000, 0xffffffff },
8521 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8522 0x00000000, 0xffffffff },
8523 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8524 0x00000000, 0xffffffff },
8525 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8526 0x00000000, 0x000000ff },
8527 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8528 0x00000000, 0xffffffff },
8529 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8530 0x00000000, 0x000000ff },
8531 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8532 0x00000000, 0xffffffff },
8533 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8534 0x00000000, 0xffffffff },
8535 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8536 0x00000000, 0xffffffff },
8537 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8538 0x00000000, 0xffffffff },
8539 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8540 0x00000000, 0xffffffff },
8541 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8542 0xffffffff, 0x00000000 },
8543 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8544 0xffffffff, 0x00000000 },
8546 /* Buffer Manager Control Registers. */
8547 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8548 0x00000000, 0x007fff80 },
8549 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8550 0x00000000, 0x007fffff },
8551 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8552 0x00000000, 0x0000003f },
8553 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8554 0x00000000, 0x000001ff },
8555 { BUFMGR_MB_HIGH_WATER, 0x0000,
8556 0x00000000, 0x000001ff },
8557 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8558 0xffffffff, 0x00000000 },
8559 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8560 0xffffffff, 0x00000000 },
8562 /* Mailbox Registers */
8563 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8564 0x00000000, 0x000001ff },
8565 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8566 0x00000000, 0x000001ff },
8567 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8568 0x00000000, 0x000007ff },
8569 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8570 0x00000000, 0x000001ff },
8572 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8577 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8579 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8583 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8584 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8587 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8590 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8591 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8594 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8597 offset = (u32) reg_tbl[i].offset;
8598 read_mask = reg_tbl[i].read_mask;
8599 write_mask = reg_tbl[i].write_mask;
8601 /* Save the original register content */
8602 save_val = tr32(offset);
8604 /* Determine the read-only value. */
8605 read_val = save_val & read_mask;
8607 /* Write zero to the register, then make sure the read-only bits
8608 * are not changed and the read/write bits are all zeros.
8614 /* Test the read-only and read/write bits. */
8615 if (((val & read_mask) != read_val) || (val & write_mask))
8618 /* Write ones to all the bits defined by RdMask and WrMask, then
8619 * make sure the read-only bits are not changed and the
8620 * read/write bits are all ones.
8622 tw32(offset, read_mask | write_mask);
8626 /* Test the read-only bits. */
8627 if ((val & read_mask) != read_val)
8630 /* Test the read/write bits. */
8631 if ((val & write_mask) != write_mask)
8634 tw32(offset, save_val);
8640 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8641 tw32(offset, save_val);
8645 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8647 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8651 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8652 for (j = 0; j < len; j += 4) {
8655 tg3_write_mem(tp, offset + j, test_pattern[i]);
8656 tg3_read_mem(tp, offset + j, &val);
8657 if (val != test_pattern[i])
8664 static int tg3_test_memory(struct tg3 *tp)
8666 static struct mem_entry {
8669 } mem_tbl_570x[] = {
8670 { 0x00000000, 0x00b50},
8671 { 0x00002000, 0x1c000},
8672 { 0xffffffff, 0x00000}
8673 }, mem_tbl_5705[] = {
8674 { 0x00000100, 0x0000c},
8675 { 0x00000200, 0x00008},
8676 { 0x00004000, 0x00800},
8677 { 0x00006000, 0x01000},
8678 { 0x00008000, 0x02000},
8679 { 0x00010000, 0x0e000},
8680 { 0xffffffff, 0x00000}
8681 }, mem_tbl_5755[] = {
8682 { 0x00000200, 0x00008},
8683 { 0x00004000, 0x00800},
8684 { 0x00006000, 0x00800},
8685 { 0x00008000, 0x02000},
8686 { 0x00010000, 0x0c000},
8687 { 0xffffffff, 0x00000}
8689 struct mem_entry *mem_tbl;
8693 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8696 mem_tbl = mem_tbl_5755;
8698 mem_tbl = mem_tbl_5705;
8700 mem_tbl = mem_tbl_570x;
8702 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8703 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8704 mem_tbl[i].len)) != 0)
8711 #define TG3_MAC_LOOPBACK 0
8712 #define TG3_PHY_LOOPBACK 1
8714 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8716 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8718 struct sk_buff *skb, *rx_skb;
8721 int num_pkts, tx_len, rx_len, i, err;
8722 struct tg3_rx_buffer_desc *desc;
8724 if (loopback_mode == TG3_MAC_LOOPBACK) {
8725 /* HW errata - mac loopback fails in some cases on 5780.
8726 * Normal traffic and PHY loopback are not affected by
8729 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8732 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8733 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8734 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8735 mac_mode |= MAC_MODE_PORT_MODE_MII;
8737 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8738 tw32(MAC_MODE, mac_mode);
8739 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8742 val = BMCR_LOOPBACK | BMCR_FULLDPLX;
8743 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8744 val |= BMCR_SPEED100;
8746 val |= BMCR_SPEED1000;
8748 tg3_writephy(tp, MII_BMCR, val);
8751 /* reset to prevent losing 1st rx packet intermittently */
8752 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8753 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8755 tw32_f(MAC_RX_MODE, tp->rx_mode);
8757 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8758 MAC_MODE_LINK_POLARITY;
8759 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8760 mac_mode |= MAC_MODE_PORT_MODE_MII;
8762 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8763 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8764 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8765 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8766 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8768 tw32(MAC_MODE, mac_mode);
8776 skb = netdev_alloc_skb(tp->dev, tx_len);
8780 tx_data = skb_put(skb, tx_len);
8781 memcpy(tx_data, tp->dev->dev_addr, 6);
8782 memset(tx_data + 6, 0x0, 8);
8784 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8786 for (i = 14; i < tx_len; i++)
8787 tx_data[i] = (u8) (i & 0xff);
8789 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8791 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8796 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8800 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8805 /* Some platforms need to sync memory here */
8808 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8810 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8814 for (i = 0; i < 50; i++) {
8815 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8820 tx_idx = tp->hw_status->idx[0].tx_consumer;
8821 rx_idx = tp->hw_status->idx[0].rx_producer;
8822 if ((tx_idx == tp->tx_prod) &&
8823 (rx_idx == (rx_start_idx + num_pkts)))
8827 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8830 if (tx_idx != tp->tx_prod)
8833 if (rx_idx != rx_start_idx + num_pkts)
8836 desc = &tp->rx_rcb[rx_start_idx];
8837 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8838 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8839 if (opaque_key != RXD_OPAQUE_RING_STD)
8842 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8843 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8846 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8847 if (rx_len != tx_len)
8850 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8852 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8853 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8855 for (i = 14; i < tx_len; i++) {
8856 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8861 /* tg3_free_rings will unmap and free the rx_skb */
8866 #define TG3_MAC_LOOPBACK_FAILED 1
8867 #define TG3_PHY_LOOPBACK_FAILED 2
8868 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8869 TG3_PHY_LOOPBACK_FAILED)
8871 static int tg3_test_loopback(struct tg3 *tp)
8875 if (!netif_running(tp->dev))
8876 return TG3_LOOPBACK_FAILED;
8878 err = tg3_reset_hw(tp, 1);
8880 return TG3_LOOPBACK_FAILED;
8882 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8883 err |= TG3_MAC_LOOPBACK_FAILED;
8884 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8885 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8886 err |= TG3_PHY_LOOPBACK_FAILED;
8892 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8895 struct tg3 *tp = netdev_priv(dev);
8897 if (tp->link_config.phy_is_low_power)
8898 tg3_set_power_state(tp, PCI_D0);
8900 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8902 if (tg3_test_nvram(tp) != 0) {
8903 etest->flags |= ETH_TEST_FL_FAILED;
8906 if (tg3_test_link(tp) != 0) {
8907 etest->flags |= ETH_TEST_FL_FAILED;
8910 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8911 int err, irq_sync = 0;
8913 if (netif_running(dev)) {
8918 tg3_full_lock(tp, irq_sync);
8920 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8921 err = tg3_nvram_lock(tp);
8922 tg3_halt_cpu(tp, RX_CPU_BASE);
8923 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8924 tg3_halt_cpu(tp, TX_CPU_BASE);
8926 tg3_nvram_unlock(tp);
8928 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8931 if (tg3_test_registers(tp) != 0) {
8932 etest->flags |= ETH_TEST_FL_FAILED;
8935 if (tg3_test_memory(tp) != 0) {
8936 etest->flags |= ETH_TEST_FL_FAILED;
8939 if ((data[4] = tg3_test_loopback(tp)) != 0)
8940 etest->flags |= ETH_TEST_FL_FAILED;
8942 tg3_full_unlock(tp);
8944 if (tg3_test_interrupt(tp) != 0) {
8945 etest->flags |= ETH_TEST_FL_FAILED;
8949 tg3_full_lock(tp, 0);
8951 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8952 if (netif_running(dev)) {
8953 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8954 if (!tg3_restart_hw(tp, 1))
8955 tg3_netif_start(tp);
8958 tg3_full_unlock(tp);
8960 if (tp->link_config.phy_is_low_power)
8961 tg3_set_power_state(tp, PCI_D3hot);
8965 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8967 #if (LINUX_VERSION_CODE >= 0x020607)
8968 struct mii_ioctl_data *data = if_mii(ifr);
8970 struct mii_ioctl_data *data = (struct mii_ioctl_data *) &ifr->ifr_ifru;
8972 struct tg3 *tp = netdev_priv(dev);
8977 data->phy_id = PHY_ADDR;
8983 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8984 break; /* We have no PHY */
8986 if (tp->link_config.phy_is_low_power)
8989 spin_lock_bh(&tp->lock);
8990 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8991 spin_unlock_bh(&tp->lock);
8993 data->val_out = mii_regval;
8999 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9000 break; /* We have no PHY */
9002 if (!capable(CAP_NET_ADMIN))
9005 if (tp->link_config.phy_is_low_power)
9008 spin_lock_bh(&tp->lock);
9009 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9010 spin_unlock_bh(&tp->lock);
9021 #if TG3_VLAN_TAG_USED
9022 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9024 struct tg3 *tp = netdev_priv(dev);
9026 if (netif_running(dev))
9029 tg3_full_lock(tp, 0);
9033 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9034 __tg3_set_rx_mode(dev);
9036 tg3_full_unlock(tp);
9038 if (netif_running(dev))
9039 tg3_netif_start(tp);
9042 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
9044 struct tg3 *tp = netdev_priv(dev);
9046 if (netif_running(dev))
9049 tg3_full_lock(tp, 0);
9051 tp->vlgrp->vlan_devices[vid] = NULL;
9052 tg3_full_unlock(tp);
9054 if (netif_running(dev))
9055 tg3_netif_start(tp);
9059 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9061 struct tg3 *tp = netdev_priv(dev);
9063 memcpy(ec, &tp->coal, sizeof(*ec));
9067 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9069 struct tg3 *tp = netdev_priv(dev);
9070 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9071 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9073 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9074 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9075 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9076 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9077 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9080 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9081 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9082 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9083 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9084 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9085 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9086 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9087 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9088 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9089 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9092 /* No rx interrupts will be generated if both are zero */
9093 if ((ec->rx_coalesce_usecs == 0) &&
9094 (ec->rx_max_coalesced_frames == 0))
9097 /* No tx interrupts will be generated if both are zero */
9098 if ((ec->tx_coalesce_usecs == 0) &&
9099 (ec->tx_max_coalesced_frames == 0))
9102 /* Only copy relevant parameters, ignore all others. */
9103 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9104 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9105 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9106 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9107 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9108 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9109 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9110 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9111 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9113 if (netif_running(dev)) {
9114 tg3_full_lock(tp, 0);
9115 __tg3_set_coalesce(tp, &tp->coal);
9116 tg3_full_unlock(tp);
9121 static struct ethtool_ops tg3_ethtool_ops = {
9122 .get_settings = tg3_get_settings,
9123 .set_settings = tg3_set_settings,
9124 .get_drvinfo = tg3_get_drvinfo,
9125 .get_regs_len = tg3_get_regs_len,
9126 .get_regs = tg3_get_regs,
9127 .get_wol = tg3_get_wol,
9128 .set_wol = tg3_set_wol,
9129 .get_msglevel = tg3_get_msglevel,
9130 .set_msglevel = tg3_set_msglevel,
9131 .nway_reset = tg3_nway_reset,
9132 .get_link = ethtool_op_get_link,
9133 #if (LINUX_VERSION_CODE >= 0x20418)
9134 .get_eeprom_len = tg3_get_eeprom_len,
9136 #ifdef ETHTOOL_GEEPROM
9137 .get_eeprom = tg3_get_eeprom,
9139 #ifdef ETHTOOL_SEEPROM
9140 .set_eeprom = tg3_set_eeprom,
9142 .get_ringparam = tg3_get_ringparam,
9143 .set_ringparam = tg3_set_ringparam,
9144 .get_pauseparam = tg3_get_pauseparam,
9145 .set_pauseparam = tg3_set_pauseparam,
9146 .get_rx_csum = tg3_get_rx_csum,
9147 .set_rx_csum = tg3_set_rx_csum,
9148 .get_tx_csum = ethtool_op_get_tx_csum,
9149 #if (LINUX_VERSION_CODE >= 0x20418)
9150 .set_tx_csum = tg3_set_tx_csum,
9152 .get_sg = ethtool_op_get_sg,
9153 .set_sg = ethtool_op_set_sg,
9154 #if TG3_TSO_SUPPORT != 0
9155 .get_tso = ethtool_op_get_tso,
9156 .set_tso = tg3_set_tso,
9158 .self_test_count = tg3_get_test_count,
9159 .self_test = tg3_self_test,
9160 .get_strings = tg3_get_strings,
9161 .phys_id = tg3_phys_id,
9162 .get_stats_count = tg3_get_stats_count,
9163 .get_ethtool_stats = tg3_get_ethtool_stats,
9164 .get_coalesce = tg3_get_coalesce,
9165 .set_coalesce = tg3_set_coalesce,
9166 #ifdef ETHTOOL_GPERMADDR
9167 .get_perm_addr = ethtool_op_get_perm_addr,
9171 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9173 u32 cursize, val, magic;
9175 tp->nvram_size = EEPROM_CHIP_SIZE;
9177 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9180 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
9184 * Size the chip by reading offsets at increasing powers of two.
9185 * When we encounter our validation signature, we know the addressing
9186 * has wrapped around, and thus have our chip size.
9190 while (cursize < tp->nvram_size) {
9191 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9200 tp->nvram_size = cursize;
9203 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9207 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9210 /* Selfboot format */
9211 if (val != TG3_EEPROM_MAGIC) {
9212 tg3_get_eeprom_size(tp);
9216 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9218 tp->nvram_size = (val >> 16) * 1024;
9222 tp->nvram_size = 0x20000;
9225 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9229 nvcfg1 = tr32(NVRAM_CFG1);
9230 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9231 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9234 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9235 tw32(NVRAM_CFG1, nvcfg1);
9238 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9239 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9240 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9241 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9242 tp->nvram_jedecnum = JEDEC_ATMEL;
9243 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9244 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9246 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9247 tp->nvram_jedecnum = JEDEC_ATMEL;
9248 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9250 case FLASH_VENDOR_ATMEL_EEPROM:
9251 tp->nvram_jedecnum = JEDEC_ATMEL;
9252 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9253 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9255 case FLASH_VENDOR_ST:
9256 tp->nvram_jedecnum = JEDEC_ST;
9257 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9258 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9260 case FLASH_VENDOR_SAIFUN:
9261 tp->nvram_jedecnum = JEDEC_SAIFUN;
9262 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9264 case FLASH_VENDOR_SST_SMALL:
9265 case FLASH_VENDOR_SST_LARGE:
9266 tp->nvram_jedecnum = JEDEC_SST;
9267 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9272 tp->nvram_jedecnum = JEDEC_ATMEL;
9273 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9274 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9278 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9282 nvcfg1 = tr32(NVRAM_CFG1);
9284 /* NVRAM protection for TPM */
9285 if (nvcfg1 & (1 << 27))
9286 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9288 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9289 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9290 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9291 tp->nvram_jedecnum = JEDEC_ATMEL;
9292 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9294 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9295 tp->nvram_jedecnum = JEDEC_ATMEL;
9296 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9297 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9299 case FLASH_5752VENDOR_ST_M45PE10:
9300 case FLASH_5752VENDOR_ST_M45PE20:
9301 case FLASH_5752VENDOR_ST_M45PE40:
9302 tp->nvram_jedecnum = JEDEC_ST;
9303 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9304 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9308 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9309 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9310 case FLASH_5752PAGE_SIZE_256:
9311 tp->nvram_pagesize = 256;
9313 case FLASH_5752PAGE_SIZE_512:
9314 tp->nvram_pagesize = 512;
9316 case FLASH_5752PAGE_SIZE_1K:
9317 tp->nvram_pagesize = 1024;
9319 case FLASH_5752PAGE_SIZE_2K:
9320 tp->nvram_pagesize = 2048;
9322 case FLASH_5752PAGE_SIZE_4K:
9323 tp->nvram_pagesize = 4096;
9325 case FLASH_5752PAGE_SIZE_264:
9326 tp->nvram_pagesize = 264;
9331 /* For eeprom, set pagesize to maximum eeprom size */
9332 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9334 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9335 tw32(NVRAM_CFG1, nvcfg1);
9339 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9343 nvcfg1 = tr32(NVRAM_CFG1);
9345 /* NVRAM protection for TPM */
9346 if (nvcfg1 & (1 << 27))
9347 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9349 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9350 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9351 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9352 tp->nvram_jedecnum = JEDEC_ATMEL;
9353 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9354 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9356 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9357 tw32(NVRAM_CFG1, nvcfg1);
9359 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9360 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9361 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9362 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9363 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9364 tp->nvram_jedecnum = JEDEC_ATMEL;
9365 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9366 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9367 tp->nvram_pagesize = 264;
9369 case FLASH_5752VENDOR_ST_M45PE10:
9370 case FLASH_5752VENDOR_ST_M45PE20:
9371 case FLASH_5752VENDOR_ST_M45PE40:
9372 tp->nvram_jedecnum = JEDEC_ST;
9373 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9374 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9375 tp->nvram_pagesize = 256;
9380 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9384 nvcfg1 = tr32(NVRAM_CFG1);
9386 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9387 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9388 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9389 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9390 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9391 tp->nvram_jedecnum = JEDEC_ATMEL;
9392 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9393 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9395 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9396 tw32(NVRAM_CFG1, nvcfg1);
9398 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9399 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9400 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9401 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9402 tp->nvram_jedecnum = JEDEC_ATMEL;
9403 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9404 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9405 tp->nvram_pagesize = 264;
9407 case FLASH_5752VENDOR_ST_M45PE10:
9408 case FLASH_5752VENDOR_ST_M45PE20:
9409 case FLASH_5752VENDOR_ST_M45PE40:
9410 tp->nvram_jedecnum = JEDEC_ST;
9411 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9412 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9413 tp->nvram_pagesize = 256;
9418 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9419 static void __devinit tg3_nvram_init(struct tg3 *tp)
9423 tw32_f(GRC_EEPROM_ADDR,
9424 (EEPROM_ADDR_FSM_RESET |
9425 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9426 EEPROM_ADDR_CLKPERD_SHIFT)));
9428 /* XXX schedule_timeout() ... */
9429 for (j = 0; j < 100; j++)
9432 /* Enable seeprom accesses. */
9433 tw32_f(GRC_LOCAL_CTRL,
9434 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9437 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9438 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9439 tp->tg3_flags |= TG3_FLAG_NVRAM;
9441 if (tg3_nvram_lock(tp)) {
9442 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9443 "tg3_nvram_init failed.\n", tp->dev->name);
9446 tg3_enable_nvram_access(tp);
9448 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9449 tg3_get_5752_nvram_info(tp);
9450 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9451 tg3_get_5755_nvram_info(tp);
9452 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9453 tg3_get_5787_nvram_info(tp);
9455 tg3_get_nvram_info(tp);
9457 tg3_get_nvram_size(tp);
9459 tg3_disable_nvram_access(tp);
9460 tg3_nvram_unlock(tp);
9463 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9465 tg3_get_eeprom_size(tp);
9469 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9470 u32 offset, u32 *val)
9475 if (offset > EEPROM_ADDR_ADDR_MASK ||
9479 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9480 EEPROM_ADDR_DEVID_MASK |
9482 tw32(GRC_EEPROM_ADDR,
9484 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9485 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9486 EEPROM_ADDR_ADDR_MASK) |
9487 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9489 for (i = 0; i < 10000; i++) {
9490 tmp = tr32(GRC_EEPROM_ADDR);
9492 if (tmp & EEPROM_ADDR_COMPLETE)
9496 if (!(tmp & EEPROM_ADDR_COMPLETE))
9499 *val = tr32(GRC_EEPROM_DATA);
9503 #define NVRAM_CMD_TIMEOUT 10000
9505 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9509 tw32(NVRAM_CMD, nvram_cmd);
9510 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9512 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9517 if (i == NVRAM_CMD_TIMEOUT) {
9523 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9525 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9526 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9527 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9528 (tp->nvram_jedecnum == JEDEC_ATMEL))
9530 addr = ((addr / tp->nvram_pagesize) <<
9531 ATMEL_AT45DB0X1B_PAGE_POS) +
9532 (addr % tp->nvram_pagesize);
9537 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9539 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9540 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9541 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9542 (tp->nvram_jedecnum == JEDEC_ATMEL))
9544 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9545 tp->nvram_pagesize) +
9546 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9551 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9555 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9556 return tg3_nvram_read_using_eeprom(tp, offset, val);
9558 offset = tg3_nvram_phys_addr(tp, offset);
9560 if (offset > NVRAM_ADDR_MSK)
9563 ret = tg3_nvram_lock(tp);
9567 tg3_enable_nvram_access(tp);
9569 tw32(NVRAM_ADDR, offset);
9570 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9571 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9574 *val = swab32(tr32(NVRAM_RDDATA));
9576 tg3_disable_nvram_access(tp);
9578 tg3_nvram_unlock(tp);
9583 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9588 err = tg3_nvram_read(tp, offset, &tmp);
9593 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9594 u32 offset, u32 len, u8 *buf)
9599 for (i = 0; i < len; i += 4) {
9604 memcpy(&data, buf + i, 4);
9606 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9608 val = tr32(GRC_EEPROM_ADDR);
9609 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9611 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9613 tw32(GRC_EEPROM_ADDR, val |
9614 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9615 (addr & EEPROM_ADDR_ADDR_MASK) |
9619 for (j = 0; j < 10000; j++) {
9620 val = tr32(GRC_EEPROM_ADDR);
9622 if (val & EEPROM_ADDR_COMPLETE)
9626 if (!(val & EEPROM_ADDR_COMPLETE)) {
9635 /* offset and length are dword aligned */
9636 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9640 u32 pagesize = tp->nvram_pagesize;
9641 u32 pagemask = pagesize - 1;
9645 tmp = kmalloc(pagesize, GFP_KERNEL);
9651 u32 phy_addr, page_off, size;
9653 phy_addr = offset & ~pagemask;
9655 for (j = 0; j < pagesize; j += 4) {
9656 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9657 (u32 *) (tmp + j))))
9663 page_off = offset & pagemask;
9670 memcpy(tmp + page_off, buf, size);
9672 offset = offset + (pagesize - page_off);
9674 tg3_enable_nvram_access(tp);
9677 * Before we can erase the flash page, we need
9678 * to issue a special "write enable" command.
9680 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9682 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9685 /* Erase the target page */
9686 tw32(NVRAM_ADDR, phy_addr);
9688 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9689 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9691 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9694 /* Issue another write enable to start the write. */
9695 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9697 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9700 for (j = 0; j < pagesize; j += 4) {
9703 data = *((u32 *) (tmp + j));
9704 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9706 tw32(NVRAM_ADDR, phy_addr + j);
9708 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9712 nvram_cmd |= NVRAM_CMD_FIRST;
9713 else if (j == (pagesize - 4))
9714 nvram_cmd |= NVRAM_CMD_LAST;
9716 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9723 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9724 tg3_nvram_exec_cmd(tp, nvram_cmd);
9731 /* offset and length are dword aligned */
9732 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9737 for (i = 0; i < len; i += 4, offset += 4) {
9738 u32 data, page_off, phy_addr, nvram_cmd;
9740 memcpy(&data, buf + i, 4);
9741 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9743 page_off = offset % tp->nvram_pagesize;
9745 phy_addr = tg3_nvram_phys_addr(tp, offset);
9747 tw32(NVRAM_ADDR, phy_addr);
9749 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9751 if ((page_off == 0) || (i == 0))
9752 nvram_cmd |= NVRAM_CMD_FIRST;
9753 if (page_off == (tp->nvram_pagesize - 4))
9754 nvram_cmd |= NVRAM_CMD_LAST;
9757 nvram_cmd |= NVRAM_CMD_LAST;
9759 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9760 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9761 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9762 (tp->nvram_jedecnum == JEDEC_ST) &&
9763 (nvram_cmd & NVRAM_CMD_FIRST)) {
9765 if ((ret = tg3_nvram_exec_cmd(tp,
9766 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9771 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9772 /* We always do complete word writes to eeprom. */
9773 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9776 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9782 /* offset and length are dword aligned */
9783 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9787 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9788 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9789 ~GRC_LCLCTRL_GPIO_OUTPUT1);
9793 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9794 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9799 ret = tg3_nvram_lock(tp);
9803 tg3_enable_nvram_access(tp);
9804 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9805 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9806 tw32(NVRAM_WRITE1, 0x406);
9808 grc_mode = tr32(GRC_MODE);
9809 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9811 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9812 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9814 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9818 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9822 grc_mode = tr32(GRC_MODE);
9823 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9825 tg3_disable_nvram_access(tp);
9826 tg3_nvram_unlock(tp);
9829 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9830 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9837 struct subsys_tbl_ent {
9838 u16 subsys_vendor, subsys_devid;
9842 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9843 /* Broadcom boards. */
9844 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9845 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9846 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9847 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9848 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9849 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9850 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9851 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9852 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9853 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9854 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9857 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9858 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9859 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9860 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9861 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9864 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9865 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9866 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9867 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9869 /* Compaq boards. */
9870 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9871 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9872 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9873 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9874 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9877 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9880 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9884 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9885 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9886 tp->pdev->subsystem_vendor) &&
9887 (subsys_id_to_phy_id[i].subsys_devid ==
9888 tp->pdev->subsystem_device))
9889 return &subsys_id_to_phy_id[i];
9894 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9899 /* On some early chips the SRAM cannot be accessed in D3hot state,
9900 * so need make sure we're in D0.
9902 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9903 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9904 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9905 #if (LINUX_VERSION_CODE < 0x20607)
9906 set_current_state(TASK_UNINTERRUPTIBLE);
9907 schedule_timeout(HZ / 1000);
9912 /* Make sure register accesses (indirect or otherwise)
9913 * will function correctly.
9915 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9916 tp->misc_host_ctrl);
9918 /* The memory arbiter has to be enabled in order for SRAM accesses
9919 * to succeed. Normally on powerup the tg3 chip firmware will make
9920 * sure it is enabled, but other entities such as system netboot
9921 * code might disable it.
9923 val = tr32(MEMARB_MODE);
9924 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9926 tp->phy_id = PHY_ID_INVALID;
9927 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9929 /* Assume an onboard device by default. */
9930 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9932 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9933 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9934 u32 nic_cfg, led_cfg;
9935 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9936 int eeprom_phy_serdes = 0;
9938 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9939 tp->nic_sram_data_cfg = nic_cfg;
9941 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9942 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9943 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9944 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9945 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9946 (ver > 0) && (ver < 0x100))
9947 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9949 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9950 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9951 eeprom_phy_serdes = 1;
9953 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9954 if (nic_phy_id != 0) {
9955 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9956 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9958 eeprom_phy_id = (id1 >> 16) << 10;
9959 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9960 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9964 tp->phy_id = eeprom_phy_id;
9965 if (eeprom_phy_serdes) {
9966 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9967 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9969 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9972 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9973 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9974 SHASTA_EXT_LED_MODE_MASK);
9976 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9980 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9981 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9984 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9985 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9988 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9989 tp->led_ctrl = LED_CTRL_MODE_MAC;
9991 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9992 * read on some older 5700/5701 bootcode.
9994 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9996 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9998 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10002 case SHASTA_EXT_LED_SHARED:
10003 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10004 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10005 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10006 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10007 LED_CTRL_MODE_PHY_2);
10010 case SHASTA_EXT_LED_MAC:
10011 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10014 case SHASTA_EXT_LED_COMBO:
10015 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10016 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10017 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10018 LED_CTRL_MODE_PHY_2);
10023 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10024 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10025 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10026 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10028 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
10029 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10031 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10033 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10034 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10035 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10036 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10038 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
10039 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
10041 if (cfg2 & (1 << 17))
10042 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10044 /* serdes signal pre-emphasis in register 0x590 set by */
10045 /* bootcode if bit 18 is set */
10046 if (cfg2 & (1 << 18))
10047 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10051 static int __devinit tg3_phy_probe(struct tg3 *tp)
10053 u32 hw_phy_id_1, hw_phy_id_2;
10054 u32 hw_phy_id, hw_phy_id_masked;
10057 /* Reading the PHY ID register can conflict with ASF
10058 * firwmare access to the PHY hardware.
10061 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
10062 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10064 /* Now read the physical PHY_ID from the chip and verify
10065 * that it is sane. If it doesn't look good, we fall back
10066 * to either the hard-coded table based PHY_ID and failing
10067 * that the value found in the eeprom area.
10069 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10070 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10072 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
10073 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10074 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
10076 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10079 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10080 tp->phy_id = hw_phy_id;
10081 if (hw_phy_id_masked == PHY_ID_BCM8002)
10082 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10084 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10086 if (tp->phy_id != PHY_ID_INVALID) {
10087 /* Do nothing, phy ID already set up in
10088 * tg3_get_eeprom_hw_cfg().
10091 struct subsys_tbl_ent *p;
10093 /* No eeprom signature? Try the hardcoded
10094 * subsys device table.
10096 p = lookup_by_subsys(tp);
10100 tp->phy_id = p->phy_id;
10102 tp->phy_id == PHY_ID_BCM8002)
10103 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10107 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10108 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10109 u32 bmsr, adv_reg, tg3_ctrl;
10111 tg3_readphy(tp, MII_BMSR, &bmsr);
10112 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10113 (bmsr & BMSR_LSTATUS))
10114 goto skip_phy_reset;
10116 err = tg3_phy_reset(tp);
10120 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10121 ADVERTISE_100HALF | ADVERTISE_100FULL |
10122 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10124 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10125 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10126 MII_TG3_CTRL_ADV_1000_FULL);
10127 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10128 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10129 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10130 MII_TG3_CTRL_ENABLE_AS_MASTER);
10133 if (!tg3_copper_is_advertising_all(tp)) {
10134 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10136 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10137 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10139 tg3_writephy(tp, MII_BMCR,
10140 BMCR_ANENABLE | BMCR_ANRESTART);
10142 tg3_phy_set_wirespeed(tp);
10144 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10145 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10146 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10150 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10151 err = tg3_init_5401phy_dsp(tp);
10156 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10157 err = tg3_init_5401phy_dsp(tp);
10160 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10161 tp->link_config.advertising =
10162 (ADVERTISED_1000baseT_Half |
10163 ADVERTISED_1000baseT_Full |
10164 ADVERTISED_Autoneg |
10166 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10167 tp->link_config.advertising &=
10168 ~(ADVERTISED_1000baseT_Half |
10169 ADVERTISED_1000baseT_Full);
10174 static void __devinit tg3_read_partno(struct tg3 *tp)
10176 unsigned char vpd_data[256];
10180 if (tg3_nvram_read_swab(tp, 0x0, &magic))
10181 goto out_not_found;
10183 if (magic == TG3_EEPROM_MAGIC) {
10184 for (i = 0; i < 256; i += 4) {
10187 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10188 goto out_not_found;
10190 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
10191 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
10192 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10193 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10198 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10199 for (i = 0; i < 256; i += 4) {
10203 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10205 while (j++ < 100) {
10206 pci_read_config_word(tp->pdev, vpd_cap +
10207 PCI_VPD_ADDR, &tmp16);
10208 if (tmp16 & 0x8000)
10210 #if (LINUX_VERSION_CODE < 0x20607)
10211 set_current_state(TASK_UNINTERRUPTIBLE);
10212 schedule_timeout(1);
10217 if (!(tmp16 & 0x8000))
10218 goto out_not_found;
10220 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10222 tmp = cpu_to_le32(tmp);
10223 memcpy(&vpd_data[i], &tmp, 4);
10227 /* Now parse and find the part number. */
10228 for (i = 0; i < 256; ) {
10229 unsigned char val = vpd_data[i];
10232 if (val == 0x82 || val == 0x91) {
10235 (vpd_data[i + 2] << 8)));
10240 goto out_not_found;
10242 block_end = (i + 3 +
10244 (vpd_data[i + 2] << 8)));
10246 while (i < block_end) {
10247 if (vpd_data[i + 0] == 'P' &&
10248 vpd_data[i + 1] == 'N') {
10249 int partno_len = vpd_data[i + 2];
10251 if (partno_len > 24)
10252 goto out_not_found;
10254 memcpy(tp->board_part_number,
10263 /* Part number not found. */
10264 goto out_not_found;
10268 strcpy(tp->board_part_number, "none");
10271 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10273 u32 val, offset, start;
10275 if (tg3_nvram_read_swab(tp, 0, &val))
10278 if (val != TG3_EEPROM_MAGIC)
10281 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10282 tg3_nvram_read_swab(tp, 0x4, &start))
10285 offset = tg3_nvram_logical_addr(tp, offset);
10286 if (tg3_nvram_read_swab(tp, offset, &val))
10289 if ((val & 0xfc000000) == 0x0c000000) {
10290 u32 ver_offset, addr;
10293 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10294 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10300 addr = offset + ver_offset - start;
10301 for (i = 0; i < 16; i += 4) {
10302 if (tg3_nvram_read(tp, addr + i, &val))
10305 val = cpu_to_le32(val);
10306 memcpy(tp->fw_ver + i, &val, 4);
10311 static int __devinit tg3_get_invariants(struct tg3 *tp)
10313 #if (LINUX_VERSION_CODE >= 0x2060a)
10314 static struct pci_device_id write_reorder_chipsets[] = {
10315 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10316 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10317 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10318 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10319 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10320 PCI_DEVICE_ID_VIA_8385_0) },
10325 u32 cacheline_sz_reg;
10326 u32 pci_state_reg, grc_misc_cfg;
10331 /* Force memory write invalidate off. If we leave it on,
10332 * then on 5700_BX chips we have to enable a workaround.
10333 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10334 * to match the cacheline size. The Broadcom driver have this
10335 * workaround but turns MWI off all the times so never uses
10336 * it. This seems to suggest that the workaround is insufficient.
10338 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10339 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10340 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10342 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10343 * has the register indirect write enable bit set before
10344 * we try to access any of the MMIO registers. It is also
10345 * critical that the PCI-X hw workaround situation is decided
10346 * before that as well.
10348 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10351 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10352 MISC_HOST_CTRL_CHIPREV_SHIFT);
10354 /* Wrong chip ID in 5752 A0. This code can be removed later
10355 * as A0 is not in production.
10357 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10358 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10360 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10361 * we need to disable memory and use config. cycles
10362 * only to access all registers. The 5702/03 chips
10363 * can mistakenly decode the special cycles from the
10364 * ICH chipsets as memory write cycles, causing corruption
10365 * of register and memory space. Only certain ICH bridges
10366 * will drive special cycles with non-zero data during the
10367 * address phase which can fall within the 5703's address
10368 * range. This is not an ICH bug as the PCI spec allows
10369 * non-zero address during special cycles. However, only
10370 * these ICH bridges are known to drive non-zero addresses
10371 * during special cycles.
10373 * Since special cycles do not cross PCI bridges, we only
10374 * enable this workaround if the 5703 is on the secondary
10375 * bus of these ICH bridges.
10377 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10378 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10379 static struct tg3_dev_id {
10383 } ich_chipsets[] = {
10384 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10386 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10388 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10390 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10394 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10395 struct pci_dev *bridge = NULL;
10397 while (pci_id->vendor != 0) {
10398 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10404 if (pci_id->rev != PCI_ANY_ID) {
10407 pci_read_config_byte(bridge, PCI_REVISION_ID,
10409 if (rev > pci_id->rev)
10412 if (bridge->subordinate &&
10413 (bridge->subordinate->number ==
10414 tp->pdev->bus->number)) {
10416 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10417 pci_dev_put(bridge);
10423 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10424 * DMA addresses > 40-bit. This bridge may have other additional
10425 * 57xx devices behind it in some 4-port NIC designs for example.
10426 * Any tg3 device found behind the bridge will also need the 40-bit
10429 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10430 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10431 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10432 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10433 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10436 struct pci_dev *bridge = NULL;
10439 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10440 PCI_DEVICE_ID_SERVERWORKS_EPB,
10442 if (bridge && bridge->subordinate &&
10443 (bridge->subordinate->number <=
10444 tp->pdev->bus->number) &&
10445 (bridge->subordinate->subordinate >=
10446 tp->pdev->bus->number)) {
10447 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10448 pci_dev_put(bridge);
10454 /* Initialize misc host control in PCI block. */
10455 tp->misc_host_ctrl |= (misc_ctrl_reg &
10456 MISC_HOST_CTRL_CHIPREV);
10457 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10458 tp->misc_host_ctrl);
10460 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10461 &cacheline_sz_reg);
10463 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10464 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10465 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10466 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10469 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10470 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10472 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10473 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10475 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10476 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10477 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10479 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10481 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10482 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10483 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10485 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10486 TG3_FLG2_HW_TSO_1_BUG;
10487 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10489 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10490 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10494 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10495 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10496 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10497 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10498 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10499 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10501 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10502 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10504 /* If we have an AMD 762 or VIA K8T800 chipset, write
10505 * reordering to the mailbox registers done by the host
10506 * controller can cause major troubles. We read back from
10507 * every mailbox register write to force the writes to be
10508 * posted to the chip in order.
10510 #if (LINUX_VERSION_CODE < 0x2060a)
10511 if ((pci_find_device(PCI_VENDOR_ID_AMD,
10512 PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL) ||
10513 pci_find_device(PCI_VENDOR_ID_AMD,
10514 PCI_DEVICE_ID_AMD_8131_BRIDGE, NULL) ||
10515 pci_find_device(PCI_VENDOR_ID_VIA,
10516 PCI_DEVICE_ID_VIA_8385_0, NULL)) &&
10518 if (pci_dev_present(write_reorder_chipsets) &&
10520 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10521 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10524 tp->pci_lat_timer < 64) {
10525 tp->pci_lat_timer = 64;
10527 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
10528 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
10529 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
10530 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
10532 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10536 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10539 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10540 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10542 /* If this is a 5700 BX chipset, and we are in PCI-X
10543 * mode, enable register write workaround.
10545 * The workaround is to use indirect register accesses
10546 * for all chip writes not to mailbox registers.
10548 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10552 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10554 /* The chip can have it's power management PCI config
10555 * space registers clobbered due to this bug.
10556 * So explicitly force the chip into D0 here.
10558 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10560 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10561 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10562 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10565 /* Also, force SERR#/PERR# in PCI command. */
10566 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10567 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10568 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10572 /* 5700 BX chips need to have their TX producer index mailboxes
10573 * written twice to workaround a bug.
10575 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10576 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10578 /* Back to back register writes can cause problems on this chip,
10579 * the workaround is to read back all reg writes except those to
10580 * mailbox regs. See tg3_write_indirect_reg32().
10582 * PCI Express 5750_A0 rev chips need this workaround too.
10584 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10585 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10586 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10587 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10589 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10590 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10591 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10592 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10594 /* Chip-specific fixup from Broadcom driver */
10595 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10596 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10597 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10598 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10601 /* Default fast path register access methods */
10602 tp->read32 = tg3_read32;
10603 tp->write32 = tg3_write32;
10604 tp->read32_mbox = tg3_read32;
10605 tp->write32_mbox = tg3_write32;
10606 tp->write32_tx_mbox = tg3_write32;
10607 tp->write32_rx_mbox = tg3_write32;
10609 /* Various workaround register access methods */
10610 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10611 tp->write32 = tg3_write_indirect_reg32;
10612 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10613 tp->write32 = tg3_write_flush_reg32;
10615 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10616 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10617 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10618 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10619 tp->write32_rx_mbox = tg3_write_flush_reg32;
10622 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10623 tp->read32 = tg3_read_indirect_reg32;
10624 tp->write32 = tg3_write_indirect_reg32;
10625 tp->read32_mbox = tg3_read_indirect_mbox;
10626 tp->write32_mbox = tg3_write_indirect_mbox;
10627 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10628 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10633 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10634 pci_cmd &= ~PCI_COMMAND_MEMORY;
10635 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10638 if (tp->write32 == tg3_write_indirect_reg32 ||
10639 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10640 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10641 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10642 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10644 /* Get eeprom hw config before calling tg3_set_power_state().
10645 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10646 * determined before calling tg3_set_power_state() so that
10647 * we know whether or not to switch out of Vaux power.
10648 * When the flag is set, it means that GPIO1 is used for eeprom
10649 * write protect and also implies that it is a LOM where GPIOs
10650 * are not used to switch power.
10652 tg3_get_eeprom_hw_cfg(tp);
10654 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10655 * GPIO1 driven high will bring 5700's external PHY out of reset.
10656 * It is also used as eeprom write protect on LOMs.
10658 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10659 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10660 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10661 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10662 GRC_LCLCTRL_GPIO_OUTPUT1);
10663 /* Unused GPIO3 must be driven as output on 5752 because there
10664 * are no pull-up resistors on unused GPIO pins.
10666 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10667 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10669 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10670 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10672 /* Force the chip into D0. */
10673 err = tg3_set_power_state(tp, PCI_D0);
10675 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10676 pci_name(tp->pdev));
10680 /* 5700 B0 chips do not support checksumming correctly due
10681 * to hardware bugs.
10683 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10684 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10686 /* Derive initial jumbo mode from MTU assigned in
10687 * ether_setup() via the alloc_etherdev() call
10689 if (tp->dev->mtu > ETH_DATA_LEN &&
10690 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10691 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10693 /* Determine WakeOnLan speed to use. */
10694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10695 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10696 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10697 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10698 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10700 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10703 /* A few boards don't want Ethernet@WireSpeed phy feature */
10704 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10705 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10706 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10707 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10708 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10709 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10711 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10712 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10713 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10714 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10715 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10717 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10718 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10719 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10720 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10722 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10725 tp->coalesce_mode = 0;
10726 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10727 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10728 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10730 /* Initialize MAC MI mode, polling disabled. */
10731 tw32_f(MAC_MI_MODE, tp->mi_mode);
10734 /* Initialize data/descriptor byte/word swapping. */
10735 val = tr32(GRC_MODE);
10736 val &= GRC_MODE_HOST_STACKUP;
10737 tw32(GRC_MODE, val | tp->grc_mode);
10739 tg3_switch_clocks(tp);
10741 /* Clear this out for sanity. */
10742 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10744 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10746 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10747 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10748 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10750 if (chiprevid == CHIPREV_ID_5701_A0 ||
10751 chiprevid == CHIPREV_ID_5701_B0 ||
10752 chiprevid == CHIPREV_ID_5701_B2 ||
10753 chiprevid == CHIPREV_ID_5701_B5) {
10754 void __iomem *sram_base;
10756 /* Write some dummy words into the SRAM status block
10757 * area, see if it reads back correctly. If the return
10758 * value is bad, force enable the PCIX workaround.
10760 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10762 writel(0x00000000, sram_base);
10763 writel(0x00000000, sram_base + 4);
10764 writel(0xffffffff, sram_base + 4);
10765 if (readl(sram_base) != 0x00000000)
10766 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10771 tg3_nvram_init(tp);
10773 grc_misc_cfg = tr32(GRC_MISC_CFG);
10774 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10776 /* Broadcom's driver says that CIOBE multisplit has a bug */
10778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10779 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10780 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10781 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10785 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10786 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10787 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10789 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10790 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10791 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10792 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10793 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10794 HOSTCC_MODE_CLRTICK_TXBD);
10796 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10797 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10798 tp->misc_host_ctrl);
10801 /* these are limited to 10/100 only */
10802 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10803 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10804 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10805 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10806 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10807 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10808 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10809 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10810 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10811 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10812 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10814 err = tg3_phy_probe(tp);
10816 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10817 pci_name(tp->pdev), err);
10818 /* ... but do not return immediately ... */
10821 tg3_read_partno(tp);
10822 tg3_read_fw_ver(tp);
10824 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10825 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10827 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10828 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10830 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10833 /* 5700 {AX,BX} chips have a broken status block link
10834 * change bit implementation, so we must use the
10835 * status register in those cases.
10837 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10838 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10840 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10842 /* The led_ctrl is set during tg3_phy_probe, here we might
10843 * have to force the link status polling mechanism based
10844 * upon subsystem IDs.
10846 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10847 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10848 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10849 TG3_FLAG_USE_LINKCHG_REG);
10852 /* For all SERDES we poll the MAC status register. */
10853 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10854 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10856 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10858 /* All chips before 5787 can get confused if TX buffers
10859 * straddle the 4GB address boundary in some cases.
10861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10863 tp->dev->hard_start_xmit = tg3_start_xmit;
10865 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10868 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10869 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10872 tp->rx_std_max_post = TG3_RX_RING_SIZE;
10874 /* Increment the rx prod index on the rx std ring by at most
10875 * 8 for these chips to workaround hw errata.
10877 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10880 tp->rx_std_max_post = 8;
10882 /* By default, disable wake-on-lan. User can change this
10883 * using ETHTOOL_SWOL.
10885 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10890 #ifdef CONFIG_SPARC64
10891 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10893 struct net_device *dev = tp->dev;
10894 struct pci_dev *pdev = tp->pdev;
10895 struct pcidev_cookie *pcp = pdev->sysdata;
10898 unsigned char *addr;
10901 addr = of_get_property(pcp->prom_node, "local-mac-address",
10903 if (addr && len == 6) {
10904 memcpy(dev->dev_addr, addr, 6);
10905 memcpy(dev->perm_addr, dev->dev_addr, 6);
10912 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10914 struct net_device *dev = tp->dev;
10916 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10917 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10922 static int __devinit tg3_get_device_address(struct tg3 *tp)
10924 struct net_device *dev = tp->dev;
10925 u32 hi, lo, mac_offset;
10928 #ifdef CONFIG_SPARC64
10929 if (!tg3_get_macaddr_sparc(tp))
10934 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10935 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10936 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10938 if (tg3_nvram_lock(tp))
10939 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10941 tg3_nvram_unlock(tp);
10944 /* First try to get it from MAC address mailbox. */
10945 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10946 if ((hi >> 16) == 0x484b) {
10947 dev->dev_addr[0] = (hi >> 8) & 0xff;
10948 dev->dev_addr[1] = (hi >> 0) & 0xff;
10950 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10951 dev->dev_addr[2] = (lo >> 24) & 0xff;
10952 dev->dev_addr[3] = (lo >> 16) & 0xff;
10953 dev->dev_addr[4] = (lo >> 8) & 0xff;
10954 dev->dev_addr[5] = (lo >> 0) & 0xff;
10956 /* Some old bootcode may report a 0 MAC address in SRAM */
10957 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10960 /* Next, try NVRAM. */
10961 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10962 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10963 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10964 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10965 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10966 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10967 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10968 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10970 /* Finally just fetch it out of the MAC control regs. */
10972 hi = tr32(MAC_ADDR_0_HIGH);
10973 lo = tr32(MAC_ADDR_0_LOW);
10975 dev->dev_addr[5] = lo & 0xff;
10976 dev->dev_addr[4] = (lo >> 8) & 0xff;
10977 dev->dev_addr[3] = (lo >> 16) & 0xff;
10978 dev->dev_addr[2] = (lo >> 24) & 0xff;
10979 dev->dev_addr[1] = hi & 0xff;
10980 dev->dev_addr[0] = (hi >> 8) & 0xff;
10984 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10985 #ifdef CONFIG_SPARC64
10986 if (!tg3_get_default_macaddr_sparc(tp))
10991 #ifdef ETHTOOL_GPERMADDR
10992 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10997 #define BOUNDARY_SINGLE_CACHELINE 1
10998 #define BOUNDARY_MULTI_CACHELINE 2
11000 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11002 int cacheline_size;
11006 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11008 cacheline_size = 1024;
11010 cacheline_size = (int) byte * 4;
11012 /* On 5703 and later chips, the boundary bits have no
11015 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11016 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11017 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11020 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11021 goal = BOUNDARY_MULTI_CACHELINE;
11023 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11024 goal = BOUNDARY_SINGLE_CACHELINE;
11033 /* PCI controllers on most RISC systems tend to disconnect
11034 * when a device tries to burst across a cache-line boundary.
11035 * Therefore, letting tg3 do so just wastes PCI bandwidth.
11037 * Unfortunately, for PCI-E there are only limited
11038 * write-side controls for this, and thus for reads
11039 * we will still get the disconnects. We'll also waste
11040 * these PCI cycles for both read and write for chips
11041 * other than 5700 and 5701 which do not implement the
11044 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11045 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11046 switch (cacheline_size) {
11051 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11052 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11053 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11055 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11056 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11061 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11062 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11066 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11067 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11070 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11071 switch (cacheline_size) {
11075 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11076 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11077 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11083 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11084 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11088 switch (cacheline_size) {
11090 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11091 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11092 DMA_RWCTRL_WRITE_BNDRY_16);
11097 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11098 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11099 DMA_RWCTRL_WRITE_BNDRY_32);
11104 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11105 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11106 DMA_RWCTRL_WRITE_BNDRY_64);
11111 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11112 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11113 DMA_RWCTRL_WRITE_BNDRY_128);
11118 val |= (DMA_RWCTRL_READ_BNDRY_256 |
11119 DMA_RWCTRL_WRITE_BNDRY_256);
11122 val |= (DMA_RWCTRL_READ_BNDRY_512 |
11123 DMA_RWCTRL_WRITE_BNDRY_512);
11127 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11128 DMA_RWCTRL_WRITE_BNDRY_1024);
11137 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11139 struct tg3_internal_buffer_desc test_desc;
11140 u32 sram_dma_descs;
11143 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11145 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11146 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11147 tw32(RDMAC_STATUS, 0);
11148 tw32(WDMAC_STATUS, 0);
11150 tw32(BUFMGR_MODE, 0);
11151 tw32(FTQ_RESET, 0);
11153 test_desc.addr_hi = ((u64) buf_dma) >> 32;
11154 test_desc.addr_lo = buf_dma & 0xffffffff;
11155 test_desc.nic_mbuf = 0x00002100;
11156 test_desc.len = size;
11159 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11160 * the *second* time the tg3 driver was getting loaded after an
11163 * Broadcom tells me:
11164 * ...the DMA engine is connected to the GRC block and a DMA
11165 * reset may affect the GRC block in some unpredictable way...
11166 * The behavior of resets to individual blocks has not been tested.
11168 * Broadcom noted the GRC reset will also reset all sub-components.
11171 test_desc.cqid_sqid = (13 << 8) | 2;
11173 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11176 test_desc.cqid_sqid = (16 << 8) | 7;
11178 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11181 test_desc.flags = 0x00000005;
11183 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11186 val = *(((u32 *)&test_desc) + i);
11187 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11188 sram_dma_descs + (i * sizeof(u32)));
11189 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11191 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11194 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11196 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11200 for (i = 0; i < 40; i++) {
11204 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11206 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11207 if ((val & 0xffff) == sram_dma_descs) {
11218 #define TEST_BUFFER_SIZE 0x2000
11220 static int __devinit tg3_test_dma(struct tg3 *tp)
11222 dma_addr_t buf_dma;
11223 u32 *buf, saved_dma_rwctrl;
11226 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11232 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11233 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11235 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11237 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11238 /* DMA read watermark not used on PCIE */
11239 tp->dma_rwctrl |= 0x00180000;
11240 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11242 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11243 tp->dma_rwctrl |= 0x003f0000;
11245 tp->dma_rwctrl |= 0x003f000f;
11247 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11248 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11249 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11251 /* If the 5704 is behind the EPB bridge, we can
11252 * do the less restrictive ONE_DMA workaround for
11253 * better performance.
11255 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11257 tp->dma_rwctrl |= 0x8000;
11258 else if (ccval == 0x6 || ccval == 0x7)
11259 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11261 /* Set bit 23 to enable PCIX hw bug fix */
11262 tp->dma_rwctrl |= 0x009f0000;
11263 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11264 /* 5780 always in PCIX mode */
11265 tp->dma_rwctrl |= 0x00144000;
11266 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11267 /* 5714 always in PCIX mode */
11268 tp->dma_rwctrl |= 0x00148000;
11270 tp->dma_rwctrl |= 0x001b000f;
11274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11275 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11276 tp->dma_rwctrl &= 0xfffffff0;
11278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11279 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11280 /* Remove this if it causes problems for some boards. */
11281 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11283 /* On 5700/5701 chips, we need to set this bit.
11284 * Otherwise the chip will issue cacheline transactions
11285 * to streamable DMA memory with not all the byte
11286 * enables turned on. This is an error on several
11287 * RISC PCI controllers, in particular sparc64.
11289 * On 5703/5704 chips, this bit has been reassigned
11290 * a different meaning. In particular, it is used
11291 * on those chips to enable a PCI-X workaround.
11293 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11296 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11299 /* Unneeded, already done by tg3_get_invariants. */
11300 tg3_switch_clocks(tp);
11304 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11305 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11308 /* It is best to perform DMA test with maximum write burst size
11309 * to expose the 5700/5701 write DMA bug.
11311 saved_dma_rwctrl = tp->dma_rwctrl;
11312 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11313 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11318 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11321 /* Send the buffer to the chip. */
11322 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11324 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11329 /* validate data reached card RAM correctly. */
11330 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11332 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11333 if (le32_to_cpu(val) != p[i]) {
11334 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
11335 /* ret = -ENODEV here? */
11340 /* Now read it back. */
11341 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11343 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11349 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11353 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11354 DMA_RWCTRL_WRITE_BNDRY_16) {
11355 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11356 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11357 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11360 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11366 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11372 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11373 DMA_RWCTRL_WRITE_BNDRY_16) {
11374 #if (LINUX_VERSION_CODE >= 0x2060a)
11375 static struct pci_device_id dma_wait_state_chipsets[] = {
11376 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11377 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11382 /* DMA test passed without adjusting DMA boundary,
11383 * now look for chipsets that are known to expose the
11384 * DMA bug without failing the test.
11386 #if (LINUX_VERSION_CODE < 0x2060a)
11387 if (pci_find_device(PCI_VENDOR_ID_APPLE,
11388 PCI_DEVICE_ID_APPLE_UNI_N_PCI15, NULL))
11390 if (pci_dev_present(dma_wait_state_chipsets))
11393 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11394 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11397 /* Safe to use the calculated DMA boundary. */
11398 tp->dma_rwctrl = saved_dma_rwctrl;
11400 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11404 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11409 static void __devinit tg3_init_link_config(struct tg3 *tp)
11411 tp->link_config.advertising =
11412 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11413 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11414 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11415 ADVERTISED_Autoneg | ADVERTISED_MII);
11416 tp->link_config.speed = SPEED_INVALID;
11417 tp->link_config.duplex = DUPLEX_INVALID;
11418 tp->link_config.autoneg = AUTONEG_ENABLE;
11419 tp->link_config.active_speed = SPEED_INVALID;
11420 tp->link_config.active_duplex = DUPLEX_INVALID;
11421 tp->link_config.phy_is_low_power = 0;
11422 tp->link_config.orig_speed = SPEED_INVALID;
11423 tp->link_config.orig_duplex = DUPLEX_INVALID;
11424 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11427 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11429 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11430 tp->bufmgr_config.mbuf_read_dma_low_water =
11431 DEFAULT_MB_RDMA_LOW_WATER_5705;
11432 tp->bufmgr_config.mbuf_mac_rx_low_water =
11433 DEFAULT_MB_MACRX_LOW_WATER_5705;
11434 tp->bufmgr_config.mbuf_high_water =
11435 DEFAULT_MB_HIGH_WATER_5705;
11437 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11438 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11439 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11440 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11441 tp->bufmgr_config.mbuf_high_water_jumbo =
11442 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11444 tp->bufmgr_config.mbuf_read_dma_low_water =
11445 DEFAULT_MB_RDMA_LOW_WATER;
11446 tp->bufmgr_config.mbuf_mac_rx_low_water =
11447 DEFAULT_MB_MACRX_LOW_WATER;
11448 tp->bufmgr_config.mbuf_high_water =
11449 DEFAULT_MB_HIGH_WATER;
11451 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11452 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11453 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11454 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11455 tp->bufmgr_config.mbuf_high_water_jumbo =
11456 DEFAULT_MB_HIGH_WATER_JUMBO;
11459 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11460 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11463 static char * __devinit tg3_phy_string(struct tg3 *tp)
11465 switch (tp->phy_id & PHY_ID_MASK) {
11466 case PHY_ID_BCM5400: return "5400";
11467 case PHY_ID_BCM5401: return "5401";
11468 case PHY_ID_BCM5411: return "5411";
11469 case PHY_ID_BCM5701: return "5701";
11470 case PHY_ID_BCM5703: return "5703";
11471 case PHY_ID_BCM5704: return "5704";
11472 case PHY_ID_BCM5705: return "5705";
11473 case PHY_ID_BCM5750: return "5750";
11474 case PHY_ID_BCM5752: return "5752";
11475 case PHY_ID_BCM5714: return "5714";
11476 case PHY_ID_BCM5780: return "5780";
11477 case PHY_ID_BCM5755: return "5755";
11478 case PHY_ID_BCM5787: return "5787";
11479 case PHY_ID_BCM8002: return "8002/serdes";
11480 case 0: return "serdes";
11481 default: return "unknown";
11485 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11487 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11488 strcpy(str, "PCI Express");
11490 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11491 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11493 strcpy(str, "PCIX:");
11495 if ((clock_ctrl == 7) ||
11496 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11497 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11498 strcat(str, "133MHz");
11499 else if (clock_ctrl == 0)
11500 strcat(str, "33MHz");
11501 else if (clock_ctrl == 2)
11502 strcat(str, "50MHz");
11503 else if (clock_ctrl == 4)
11504 strcat(str, "66MHz");
11505 else if (clock_ctrl == 6)
11506 strcat(str, "100MHz");
11508 strcpy(str, "PCI:");
11509 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11510 strcat(str, "66MHz");
11512 strcat(str, "33MHz");
11514 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11515 strcat(str, ":32-bit");
11517 strcat(str, ":64-bit");
11521 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11523 struct pci_dev *peer;
11524 unsigned int func, devnr = tp->pdev->devfn & ~7;
11526 for (func = 0; func < 8; func++) {
11527 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11528 if (peer && peer != tp->pdev)
11532 /* 5704 can be configured in single-port mode, set peer to
11533 * tp->pdev in that case.
11541 * We don't need to keep the refcount elevated; there's no way
11542 * to remove one half of this device without removing the other
11549 static void __devinit tg3_init_coal(struct tg3 *tp)
11551 struct ethtool_coalesce *ec = &tp->coal;
11553 memset(ec, 0, sizeof(*ec));
11554 ec->cmd = ETHTOOL_GCOALESCE;
11555 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11556 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11557 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11558 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11559 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11560 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11561 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11562 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11563 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11565 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11566 HOSTCC_MODE_CLRTICK_TXBD)) {
11567 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11568 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11569 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11570 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11573 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11574 ec->rx_coalesce_usecs_irq = 0;
11575 ec->tx_coalesce_usecs_irq = 0;
11576 ec->stats_block_coalesce_usecs = 0;
11580 static int __devinit tg3_init_one(struct pci_dev *pdev,
11581 const struct pci_device_id *ent)
11583 static int tg3_version_printed = 0;
11584 unsigned long tg3reg_base, tg3reg_len;
11585 struct net_device *dev;
11587 int i, err, pm_cap;
11589 u64 dma_mask, persist_dma_mask;
11591 if (tg3_version_printed++ == 0)
11592 printk(KERN_INFO "%s", version);
11594 err = pci_enable_device(pdev);
11596 printk(KERN_ERR PFX "Cannot enable PCI device, "
11601 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11602 printk(KERN_ERR PFX "Cannot find proper PCI device "
11603 "base address, aborting.\n");
11605 goto err_out_disable_pdev;
11608 err = pci_request_regions(pdev, DRV_MODULE_NAME);
11610 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11612 goto err_out_disable_pdev;
11615 pci_set_master(pdev);
11617 /* Find power-management capability. */
11618 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11620 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11623 goto err_out_free_res;
11626 tg3reg_base = pci_resource_start(pdev, 0);
11627 tg3reg_len = pci_resource_len(pdev, 0);
11629 dev = alloc_etherdev(sizeof(*tp));
11631 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11633 goto err_out_free_res;
11636 SET_MODULE_OWNER(dev);
11637 #if (LINUX_VERSION_CODE >= 0x20419)
11638 SET_NETDEV_DEV(dev, &pdev->dev);
11641 #if TG3_VLAN_TAG_USED
11642 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11643 dev->vlan_rx_register = tg3_vlan_rx_register;
11644 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11647 tp = netdev_priv(dev);
11650 tp->pm_cap = pm_cap;
11651 tp->mac_mode = TG3_DEF_MAC_MODE;
11652 tp->rx_mode = TG3_DEF_RX_MODE;
11653 tp->tx_mode = TG3_DEF_TX_MODE;
11654 tp->mi_mode = MAC_MI_MODE_BASE;
11656 tp->msg_enable = tg3_debug;
11658 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11660 /* The word/byte swap controls here control register access byte
11661 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11664 tp->misc_host_ctrl =
11665 MISC_HOST_CTRL_MASK_PCI_INT |
11666 MISC_HOST_CTRL_WORD_SWAP |
11667 MISC_HOST_CTRL_INDIR_ACCESS |
11668 MISC_HOST_CTRL_PCISTATE_RW;
11670 /* The NONFRM (non-frame) byte/word swap controls take effect
11671 * on descriptor entries, anything which isn't packet data.
11673 * The StrongARM chips on the board (one for tx, one for rx)
11674 * are running in big-endian mode.
11676 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11677 GRC_MODE_WSWAP_NONFRM_DATA);
11678 #ifdef __BIG_ENDIAN
11679 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11681 spin_lock_init(&tp->lock);
11682 spin_lock_init(&tp->indirect_lock);
11683 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11685 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11686 if (tp->regs == 0UL) {
11687 printk(KERN_ERR PFX "Cannot map device registers, "
11690 goto err_out_free_dev;
11693 tg3_init_link_config(tp);
11695 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11696 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11697 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11699 dev->open = tg3_open;
11700 dev->stop = tg3_close;
11701 dev->get_stats = tg3_get_stats;
11702 dev->set_multicast_list = tg3_set_rx_mode;
11703 dev->set_mac_address = tg3_set_mac_addr;
11704 dev->do_ioctl = tg3_ioctl;
11705 dev->tx_timeout = tg3_tx_timeout;
11706 dev->poll = tg3_poll;
11707 dev->ethtool_ops = &tg3_ethtool_ops;
11709 dev->watchdog_timeo = TG3_TX_TIMEOUT;
11710 dev->change_mtu = tg3_change_mtu;
11711 dev->irq = pdev->irq;
11712 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11713 dev->poll_controller = tg3_poll_controller;
11716 err = tg3_get_invariants(tp);
11718 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11720 goto err_out_iounmap;
11723 /* The EPB bridge inside 5714, 5715, and 5780 and any
11724 * device behind the EPB cannot support DMA addresses > 40-bit.
11725 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11726 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11727 * do DMA address check in tg3_start_xmit().
11729 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11730 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11731 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11732 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11733 #ifdef CONFIG_HIGHMEM
11734 dma_mask = DMA_64BIT_MASK;
11737 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11739 /* Configure DMA attributes. */
11740 if (dma_mask > DMA_32BIT_MASK) {
11741 err = pci_set_dma_mask(pdev, dma_mask);
11743 dev->features |= NETIF_F_HIGHDMA;
11744 err = pci_set_consistent_dma_mask(pdev,
11747 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11748 "DMA for consistent allocations\n");
11749 goto err_out_iounmap;
11753 if (err || dma_mask == DMA_32BIT_MASK) {
11754 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11756 printk(KERN_ERR PFX "No usable DMA configuration, "
11758 goto err_out_iounmap;
11762 tg3_init_bufmgr_config(tp);
11764 #if TG3_TSO_SUPPORT != 0
11765 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11766 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11768 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11769 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11770 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11771 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11772 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11774 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11777 /* TSO is on by default on chips that support hardware TSO.
11778 * Firmware TSO on older chips gives lower performance, so it
11779 * is off by default, but can be enabled using ethtool.
11781 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11782 dev->features |= NETIF_F_TSO;
11783 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11784 dev->features |= NETIF_F_TSO6;
11789 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11790 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11791 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11792 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11793 tp->rx_pending = 63;
11796 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11797 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11798 tp->pdev_peer = tg3_find_peer(tp);
11800 err = tg3_get_device_address(tp);
11802 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11804 goto err_out_iounmap;
11808 * Reset chip in case UNDI or EFI driver did not shutdown
11809 * DMA self test will enable WDMAC and we'll see (spurious)
11810 * pending DMA on the PCI bus at that point.
11812 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11813 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11814 #if (LINUX_VERSION_CODE < 0x2060a)
11815 pci_save_state(tp->pdev, tp->pci_cfg_state);
11817 pci_save_state(tp->pdev);
11819 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11820 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11823 err = tg3_test_dma(tp);
11825 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11826 goto err_out_iounmap;
11829 /* Tigon3 can do ipv4 only... and some chips have buggy
11832 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11833 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11834 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11835 dev->features |= NETIF_F_HW_CSUM;
11837 dev->features |= NETIF_F_IP_CSUM;
11838 dev->features |= NETIF_F_SG;
11839 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11841 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11843 /* flow control autonegotiation is default behavior */
11844 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11848 /* Now that we have fully setup the chip, save away a snapshot
11849 * of the PCI config space. We need to restore this after
11850 * GRC_MISC_CFG core clock resets and some resume events.
11852 #if (LINUX_VERSION_CODE < 0x2060a)
11853 pci_save_state(tp->pdev, tp->pci_cfg_state);
11855 pci_save_state(tp->pdev);
11858 err = register_netdev(dev);
11860 printk(KERN_ERR PFX "Cannot register net device, "
11862 goto err_out_iounmap;
11865 pci_set_drvdata(pdev, dev);
11867 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11869 tp->board_part_number,
11870 tp->pci_chip_rev_id,
11871 tg3_phy_string(tp),
11872 tg3_bus_string(tp, str),
11873 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11875 for (i = 0; i < 6; i++)
11876 printk("%2.2x%c", dev->dev_addr[i],
11877 i == 5 ? '\n' : ':');
11879 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11880 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11883 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11884 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11885 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11886 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11887 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11888 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11889 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11890 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11891 dev->name, tp->dma_rwctrl,
11892 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11893 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11895 netif_carrier_off(tp->dev);
11906 #if (LINUX_VERSION_CODE >= 0x20418)
11913 pci_release_regions(pdev);
11915 err_out_disable_pdev:
11916 pci_disable_device(pdev);
11917 pci_set_drvdata(pdev, NULL);
11921 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11923 struct net_device *dev = pci_get_drvdata(pdev);
11926 struct tg3 *tp = netdev_priv(dev);
11928 #if (LINUX_VERSION_CODE >= 0x20600)
11929 flush_scheduled_work();
11931 unregister_netdev(dev);
11936 #if (LINUX_VERSION_CODE >= 0x20418)
11941 pci_release_regions(pdev);
11942 pci_disable_device(pdev);
11943 pci_set_drvdata(pdev, NULL);
11947 #if (LINUX_VERSION_CODE < 0x2060b)
11948 static int tg3_suspend(struct pci_dev *pdev, u32 state)
11950 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11953 struct net_device *dev = pci_get_drvdata(pdev);
11954 struct tg3 *tp = netdev_priv(dev);
11957 if (!netif_running(dev))
11960 #if (LINUX_VERSION_CODE >= 0x20600)
11961 flush_scheduled_work();
11963 tg3_netif_stop(tp);
11965 del_timer_sync(&tp->timer);
11967 tg3_full_lock(tp, 1);
11968 tg3_disable_ints(tp);
11969 tg3_full_unlock(tp);
11971 netif_device_detach(dev);
11973 tg3_full_lock(tp, 0);
11974 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11975 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11976 tg3_full_unlock(tp);
11978 #if (LINUX_VERSION_CODE < 0x2060b)
11979 err = tg3_set_power_state(tp, state);
11981 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11984 tg3_full_lock(tp, 0);
11986 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11987 if (tg3_restart_hw(tp, 1))
11990 tp->timer.expires = jiffies + tp->timer_offset;
11991 add_timer(&tp->timer);
11993 netif_device_attach(dev);
11994 tg3_netif_start(tp);
11997 tg3_full_unlock(tp);
12003 static int tg3_resume(struct pci_dev *pdev)
12005 struct net_device *dev = pci_get_drvdata(pdev);
12006 struct tg3 *tp = netdev_priv(dev);
12009 if (!netif_running(dev))
12012 #if (LINUX_VERSION_CODE < 0x2060a)
12013 pci_restore_state(tp->pdev, tp->pci_cfg_state);
12015 pci_restore_state(tp->pdev);
12018 err = tg3_set_power_state(tp, PCI_D0);
12022 netif_device_attach(dev);
12024 tg3_full_lock(tp, 0);
12026 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12027 err = tg3_restart_hw(tp, 1);
12031 tp->timer.expires = jiffies + tp->timer_offset;
12032 add_timer(&tp->timer);
12034 tg3_netif_start(tp);
12037 tg3_full_unlock(tp);
12042 static struct pci_driver tg3_driver = {
12043 .name = DRV_MODULE_NAME,
12044 .id_table = tg3_pci_tbl,
12045 .probe = tg3_init_one,
12046 .remove = __devexit_p(tg3_remove_one),
12047 .suspend = tg3_suspend,
12048 .resume = tg3_resume
12051 static int __init tg3_init(void)
12053 return pci_module_init(&tg3_driver);
12056 static void __exit tg3_cleanup(void)
12058 pci_unregister_driver(&tg3_driver);
12061 module_init(tg3_init);
12062 module_exit(tg3_cleanup);